diff --git a/workspace/virtuallab/frozen_inference_lab.pb b/workspace/virtuallab/frozen_inference_lab.pb new file mode 100644 index 0000000000000000000000000000000000000000..ed3d6512b093a04e4330abe2408ecc3773760337 Binary files /dev/null and b/workspace/virtuallab/frozen_inference_lab.pb differ diff --git a/workspace/virtuallab/label_map_lab.pbtxt b/workspace/virtuallab/label_map_lab.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..388ee0b8884b0b8cbd92f75bd6fdbb112294223e --- /dev/null +++ b/workspace/virtuallab/label_map_lab.pbtxt @@ -0,0 +1,9 @@ +item { + id: 1 + name: 'robot' +} + +item { + id: 2 + name: 'corobot' +} diff --git a/workspace/virtuallab/src/image_process/scripts/image_process.py b/workspace/virtuallab/src/image_process/scripts/image_process.py index 1e32f7e043093c080c299743cda745509dcfb085..e59dee29eb1ca63b48c8621d291b413fe24c295f 100755 --- a/workspace/virtuallab/src/image_process/scripts/image_process.py +++ b/workspace/virtuallab/src/image_process/scripts/image_process.py @@ -11,7 +11,7 @@ import os import numpy as np import tensorflow as tf -experiment = 'simulation' # simulation or real +experiment = 'lab' # simulation or real class Nodo(object): def __init__(self): @@ -25,7 +25,7 @@ class Nodo(object): self.br = CvBridge() # Node cycle rate (in Hz). - self.loop_rate = rospy.Rate(20) + self.loop_rate = rospy.Rate(30) # Publishers self.pub1 = rospy.Publisher( @@ -284,7 +284,7 @@ class Nodo(object): warped_image1 = cv2.warpPerspective(self.image1.copy( ), M, (self.dst_width_r, self.dst_height_r), flags=cv2.INTER_LINEAR) - self.pub3.publish(self.br.cv2_to_imgmsg(warped_image1, 'bgr8')) + self.pub4.publish(self.br.cv2_to_imgmsg(warped_image1, 'bgr8')) if self.image2 is not None: @@ -307,7 +307,7 @@ class Nodo(object): warped_image3 = cv2.warpPerspective(self.image3.copy( ), M, (self.dst_width_r, self.dst_height_r), flags=cv2.INTER_LINEAR) - self.pub4.publish(self.br.cv2_to_imgmsg(warped_image3 , 'bgr8')) + self.pub3.publish(self.br.cv2_to_imgmsg(warped_image3 , 'bgr8')) if self.image4 is not None: diff --git a/workspace/virtuallab/src/laser_scan_convert/src/laser_scan_convert.cpp b/workspace/virtuallab/src/laser_scan_convert/src/laser_scan_convert.cpp index 10cfaa6f77548fd0dcb2fec68fbc3726be296054..081742d8595df7b211d06fc83281b6e4f465749a 100644 --- a/workspace/virtuallab/src/laser_scan_convert/src/laser_scan_convert.cpp +++ b/workspace/virtuallab/src/laser_scan_convert/src/laser_scan_convert.cpp @@ -20,8 +20,8 @@ public: LaserScanToPointCloud(ros::NodeHandle n) : n_(n), - laser_sub_(n_, "scan", 10), - laser_notifier_(laser_sub_,listener_, "hokuyo", 10) //world for lab hokuyo for simulation + laser_sub_(n_, "scan", 50), + laser_notifier_(laser_sub_,listener_, "world", 50) //laser for lab hokuyo for simulation { laser_notifier_.registerCallback( boost::bind(&LaserScanToPointCloud::scanCallback, this, _1)); diff --git a/workspace/virtuallab/src/multiple-object-tracking-lidar/src/main.cpp b/workspace/virtuallab/src/multiple-object-tracking-lidar/src/main.cpp index 0b02e06423fe6667eff08c5e5046d788be98c502..6553d244155c89640dfdd1eef159458033b9f5f6 100644 --- a/workspace/virtuallab/src/multiple-object-tracking-lidar/src/main.cpp +++ b/workspace/virtuallab/src/multiple-object-tracking-lidar/src/main.cpp @@ -547,8 +547,8 @@ else std::vector cluster_indices; pcl::EuclideanClusterExtraction ec; ec.setClusterTolerance (0.3); - ec.setMinClusterSize (10); - ec.setMaxClusterSize (600); + ec.setMinClusterSize (5); + ec.setMaxClusterSize (15); ec.setSearchMethod (tree); ec.setInputCloud (input_cloud); //cout<<"PCL init successfull\n"; diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/CONTRIBUTING.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..8073982f4ad3830dc31f32381c7dff3fa7f62b4f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to the TensorFlow Object Detection API + +Patches to TensorFlow Object Detection API are welcome! + +We require contributors to fill out either the individual or corporate +Contributor License Agreement (CLA). + + * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). + * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). + +Please follow the +[TensorFlow contributing guidelines](https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md) +when submitting pull requests. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/README.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..932705539bcc1c9b876cf4e797961b736abcac05 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/README.md @@ -0,0 +1,191 @@ +# TensorFlow Object Detection API +[![TensorFlow 2.2](https://img.shields.io/badge/TensorFlow-2.2-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + +Creating accurate machine learning models capable of localizing and identifying +multiple objects in a single image remains a core challenge in computer vision. +The TensorFlow Object Detection API is an open source framework built on top of +TensorFlow that makes it easy to construct, train and deploy object detection +models. At Google we’ve certainly found this codebase to be useful for our +computer vision needs, and we hope that you will as well.

+

+Contributions to the codebase are welcome and we would love to hear back from +you if you find this API useful. Finally if you use the TensorFlow Object +Detection API for a research publication, please consider citing: + +``` +"Speed/accuracy trade-offs for modern convolutional object detectors." +Huang J, Rathod V, Sun C, Zhu M, Korattikara A, Fathi A, Fischer I, Wojna Z, +Song Y, Guadarrama S, Murphy K, CVPR 2017 +``` + +\[[link](https://arxiv.org/abs/1611.10012)\]\[[bibtex](https://scholar.googleusercontent.com/scholar.bib?q=info:l291WsrB-hQJ:scholar.google.com/&output=citation&scisig=AAGBfm0AAAAAWUIIlnPZ_L9jxvPwcC49kDlELtaeIyU-&scisf=4&ct=citation&cd=-1&hl=en&scfhb=1)\] + +

+ +

+ +## Support for TensorFlow 2 and 1 +The TensorFlow Object Detection API supports both TensorFlow 2 (TF2) and +TensorFlow 1 (TF1). A majority of the modules in the library are both TF1 and +TF2 compatible. In cases where they are not, we provide two versions. + +Although we will continue to maintain the TF1 models and provide support, we +encourage users to try the Object Detection API with TF2 for the following +reasons: + +* We provide new architectures supported in TF2 only and we will continue to + develop in TF2 going forward. + +* The popular models we ported from TF1 to TF2 achieve the same performance. + +* A single training and evaluation binary now supports both GPU and TPU + distribution strategies making it possible to train models with synchronous + SGD by default. + +* Eager execution with new binaries makes debugging easy! + +Finally, if are an existing user of the Object Detection API we have retained +the same config language you are familiar with and ensured that the +TF2 training/eval binary takes the same arguments as our TF1 binaries. + +Note: The models we provide in [TF2 Zoo](g3doc/tf2_detection_zoo.md) and +[TF1 Zoo](g3doc/tf1_detection_zoo.md) are specific to the TensorFlow major +version and are not interoperable. + +Please select one of the links below for TensorFlow version-specific +documentation of the Object Detection API: + + +### Tensorflow 2.x + * + Object Detection API TensorFlow 2
+ * + TensorFlow 2 Model Zoo
+ +### Tensorflow 1.x + * + Object Detection API TensorFlow 1
+ * + TensorFlow 1 Model Zoo
+ + +## Whats New + +### Mobile Inference for TF2 models + +TF2 OD API models can now be converted to TensorFlow Lite! Only SSD models +currently supported. See documentation. + +**Thanks to contributors**: Sachin Joglekar + +### TensorFlow 2 Support + +We are happy to announce that the TF OD API officially supports TF2! Our release +includes: + +* New binaries for train/eval/export that are designed to run in eager mode. +* A suite of TF2 compatible (Keras-based) models; this includes migrations of + our most popular TF1.x models (e.g., SSD with MobileNet, RetinaNet, + Faster R-CNN, Mask R-CNN), as well as a few new architectures for which we + will only maintain TF2 implementations: + + 1. CenterNet - a simple and effective anchor-free architecture based on + the recent [Objects as Points](https://arxiv.org/abs/1904.07850) paper by + Zhou et al. + 2. [EfficientDet](https://arxiv.org/abs/1911.09070) - a recent family of + SOTA models discovered with the help of Neural Architecture Search. + +* COCO pre-trained weights for all of the models provided as TF2 style + object-based checkpoints. +* Access to [Distribution Strategies](https://www.tensorflow.org/guide/distributed_training) + for distributed training --- our model are designed to be trainable using sync + multi-GPU and TPU platforms. +* Colabs demo’ing eager mode training and inference. + +See our release blogpost [here](https://blog.tensorflow.org/2020/07/tensorflow-2-meets-object-detection-api.html). +If you are an existing user of the TF OD API using TF 1.x, don’t worry, we’ve +got you covered. + +**Thanks to contributors**: Akhil Chinnakotla, Allen Lavoie, Anirudh Vegesana, +Anjali Sridhar, Austin Myers, Dan Kondratyuk, David Ross, Derek Chow, Jaeyoun +Kim, Jing Li, Jonathan Huang, Jordi Pont-Tuset, Karmel Allison, Kathy Ruan, +Kaushik Shivakumar, Lu He, Mingxing Tan, Pengchong Jin, Ronny Votel, Sara Beery, +Sergi Caelles Prat, Shan Yang, Sudheendra Vijayanarasimhan, Tina Tian, Tomer +Kaftan, Vighnesh Birodkar, Vishnu Banna, Vivek Rathod, Yanhui Liang, Yiming Shi, +Yixin Shi, Yu-hui Chen, Zhichao Lu. + +### MobileDet GPU + +We have released SSDLite with MobileDet GPU backbone, which achieves 17% mAP +higher than the MobileNetV2 SSDLite (27.5 mAP vs 23.5 mAP) on a NVIDIA Jetson +Xavier at comparable latency (3.2ms vs 3.3ms). + +Along with the model definition, we are also releasing model checkpoints trained +on the COCO dataset. + +Thanks to contributors: Yongzhe Wang, Bo Chen, Hanxiao Liu, Le An +(NVIDIA), Yu-Te Cheng (NVIDIA), Oliver Knieps (NVIDIA), and Josh Park (NVIDIA). + +### Context R-CNN + +We have released [Context R-CNN](https://arxiv.org/abs/1912.03538), a model that +uses attention to incorporate contextual information images (e.g. from +temporally nearby frames taken by a static camera) in order to improve accuracy. +Importantly, these contextual images need not be labeled. + +* When applied to a challenging wildlife detection dataset + ([Snapshot Serengeti](http://lila.science/datasets/snapshot-serengeti)), + Context R-CNN with context from up to a month of images outperforms a + single-frame baseline by 17.9% mAP, and outperforms S3D (a 3d convolution + based baseline) by 11.2% mAP. +* Context R-CNN leverages temporal context from the unlabeled frames of a + novel camera deployment to improve performance at that camera, boosting + model generalizeability. + +Read about Context R-CNN on the Google AI blog +[here](https://ai.googleblog.com/2020/06/leveraging-temporal-context-for-object.html). + +We have provided code for generating data with associated context +[here](g3doc/context_rcnn.md), and a sample config for a Context R-CNN model +[here](samples/configs/context_rcnn_resnet101_snapshot_serengeti_sync.config). + +Snapshot Serengeti-trained Faster R-CNN and Context R-CNN models can be found in +the +[model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md#snapshot-serengeti-camera-trap-trained-models). + +A colab demonstrating Context R-CNN is provided +[here](colab_tutorials/context_rcnn_tutorial.ipynb). + +Thanks to contributors: Sara Beery, Jonathan Huang, Guanhang Wu, Vivek +Rathod, Ronny Votel, Zhichao Lu, David Ross, Pietro Perona, Tanya Birch, and the +Wildlife Insights AI Team. + +## Release Notes +See [notes](g3doc/release_notes.md) for all past releases. + +## Getting Help + +To get help with issues you may encounter using the TensorFlow Object Detection +API, create a new question on [StackOverflow](https://stackoverflow.com/) with +the tags "tensorflow" and "object-detection". + +Please report bugs (actually broken code, not usage questions) to the +tensorflow/models GitHub +[issue tracker](https://github.com/tensorflow/models/issues), prefixing the +issue name with "object_detection". + +Please check the [FAQ](g3doc/faq.md) for frequently asked questions before +reporting an issue. + +## Maintainers + +* Jonathan Huang ([@GitHub jch1](https://github.com/jch1)) +* Vivek Rathod ([@GitHub tombstone](https://github.com/tombstone)) +* Vighnesh Birodkar ([@GitHub vighneshbirodkar](https://github.com/vighneshbirodkar)) +* Austin Myers ([@GitHub austin-myers](https://github.com/austin-myers)) +* Zhichao Lu ([@GitHub pkulzc](https://github.com/pkulzc)) +* Ronny Votel ([@GitHub ronnyvotel](https://github.com/ronnyvotel)) +* Yu-hui Chen ([@GitHub yuhuichen1015](https://github.com/yuhuichen1015)) +* Derek Chow ([@GitHub derekjchow](https://github.com/derekjchow)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a02cc3675ecd7b9f1cd586ff00fee96c136bd378 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f48923319d4b5f5278347ba10c7dc7a01f1fe2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/eval_util.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/eval_util.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7136713f8e96e51d04f08823b2ba86b62a0b02a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/eval_util.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/model_lib.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/model_lib.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..568e1077ed1aaccf9067e3d72aa7ff7ef69ce83c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/__pycache__/model_lib.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..439548648b0802b4c64954cc2be68c311c6c2a72 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..480a1da301d01f1304548314be9c023db2b08a1d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/flexible_grid_anchor_generator.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/flexible_grid_anchor_generator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4efc7d9afe638bc3546878dabf52800fa3c34000 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/flexible_grid_anchor_generator.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/grid_anchor_generator.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/grid_anchor_generator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4601f2bd61a048f8296fb5df18fcaea7ef8a3452 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/grid_anchor_generator.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/multiple_grid_anchor_generator.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/multiple_grid_anchor_generator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23570daad8e4df46484c7ea0ce280b768f8037bd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/multiple_grid_anchor_generator.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/multiscale_grid_anchor_generator.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/multiscale_grid_anchor_generator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7276e24a6467bbceaec4d08aab97e5086f33d4c1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/__pycache__/multiscale_grid_anchor_generator.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0f340cc945e684e63d5e4d36de113c38b92558a5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator.py @@ -0,0 +1,134 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generates grid anchors on the fly corresponding to multiple CNN layers.""" + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.core import anchor_generator +from object_detection.core import box_list_ops + + +class FlexibleGridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generate a grid of anchors for multiple CNN layers of different scale.""" + + def __init__(self, base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=True): + """Constructs a FlexibleGridAnchorGenerator. + + This generator is more flexible than the multiple_grid_anchor_generator + and multiscale_grid_anchor_generator, and can generate any of the anchors + that they can generate, plus additional anchor configurations. In + particular, it allows the explicit specification of scale and aspect ratios + at each layer without making any assumptions between the relationship + between scales and aspect ratios between layers. + + Args: + base_sizes: list of tuples of anchor base sizes. For example, setting + base_sizes=[(1, 2, 3), (4, 5)] means that we want 3 anchors at each + grid point on the first layer with the base sizes of 1, 2, and 3, and 2 + anchors at each grid point on the second layer with the base sizes of + 4 and 5. + aspect_ratios: list or tuple of aspect ratios. For example, setting + aspect_ratios=[(1.0, 2.0, 0.5), (1.0, 2.0)] means that we want 3 anchors + at each grid point on the first layer with aspect ratios of 1.0, 2.0, + and 0.5, and 2 anchors at each grid point on the sercond layer with the + base sizes of 1.0 and 2.0. + anchor_strides: list of pairs of strides in pixels (in y and x directions + respectively). For example, setting anchor_strides=[(25, 25), (50, 50)] + means that we want the anchors corresponding to the first layer to be + strided by 25 pixels and those in the second layer to be strided by 50 + pixels in both y and x directions. + anchor_offsets: list of pairs of offsets in pixels (in y and x directions + respectively). The offset specifies where we want the center of the + (0, 0)-th anchor to lie for each layer. For example, setting + anchor_offsets=[(10, 10), (20, 20)]) means that we want the + (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space + and likewise that we want the (0, 0)-th anchor of the second layer to + lie at (25, 25) in pixel space. + normalize_coordinates: whether to produce anchors in normalized + coordinates. (defaults to True). + """ + self._base_sizes = base_sizes + self._aspect_ratios = aspect_ratios + self._anchor_strides = anchor_strides + self._anchor_offsets = anchor_offsets + self._normalize_coordinates = normalize_coordinates + + def name_scope(self): + return 'FlexibleGridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the Generate function. + """ + return [len(size) for size in self._base_sizes] + + def _generate(self, feature_map_shape_list, im_height=1, im_width=1): + """Generates a collection of bounding boxes to be used as anchors. + + Currently we require the input image shape to be statically defined. That + is, im_height and im_width should be integers rather than tensors. + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0), (height_1, width_1), ...]. For example, + setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that + correspond to an 8x8 layer followed by a 7x7 layer. + im_height: the height of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + im_width: the width of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + Raises: + ValueError: if im_height and im_width are 1, but normalized coordinates + were requested. + """ + anchor_grid_list = [] + for (feat_shape, base_sizes, aspect_ratios, anchor_stride, anchor_offset + ) in zip(feature_map_shape_list, self._base_sizes, self._aspect_ratios, + self._anchor_strides, self._anchor_offsets): + anchor_grid = grid_anchor_generator.tile_anchors( + feat_shape[0], + feat_shape[1], + tf.cast(tf.convert_to_tensor(base_sizes), dtype=tf.float32), + tf.cast(tf.convert_to_tensor(aspect_ratios), dtype=tf.float32), + tf.constant([1.0, 1.0]), + tf.cast(tf.convert_to_tensor(anchor_stride), dtype=tf.float32), + tf.cast(tf.convert_to_tensor(anchor_offset), dtype=tf.float32)) + num_anchors = anchor_grid.num_boxes_static() + if num_anchors is None: + num_anchors = anchor_grid.num_boxes() + anchor_indices = tf.zeros([num_anchors]) + anchor_grid.add_field('feature_map_index', anchor_indices) + if self._normalize_coordinates: + if im_height == 1 or im_width == 1: + raise ValueError( + 'Normalized coordinates were requested upon construction of the ' + 'FlexibleGridAnchorGenerator, but a subsequent call to ' + 'generate did not supply dimension information.') + anchor_grid = box_list_ops.to_normalized_coordinates( + anchor_grid, im_height, im_width, check_range=False) + anchor_grid_list.append(anchor_grid) + + return anchor_grid_list diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4462f094cf23451e15a5eb4c3be4b1c05fc7b3e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bab34b750180081ee03d5d6681f2e449c283dfd4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py @@ -0,0 +1,292 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generators.flexible_grid_anchor_generator_test.py.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import flexible_grid_anchor_generator as fg +from object_detection.utils import test_case + + +class FlexibleGridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(16, 16),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + anchor_corners_out = self.execute(graph_fn, []) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_unit_dimensions(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(16, 16),] + base_sizes = [(32.0,)] + aspect_ratios = [(1.0,)] + im_height = 1 + im_width = 1 + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + # Positive offsets are produced. + exp_anchor_corners = [[0, 0, 32, 32], + [0, 32, 32, 64], + [32, 0, 64, 32], + [32, 32, 64, 64]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_normalized_anchors_fails_with_unit_dimensions(self): + anchor_generator = fg.FlexibleGridAnchorGenerator( + [(32.0,)], [(1.0,)], [(32, 32),], [(16, 16),], + normalize_coordinates=True) + with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'): + anchor_generator.generate( + feature_map_shape_list=[(2, 2)], im_height=1, im_width=1) + + def test_construct_single_anchor_in_normalized_coordinates(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(16, 16),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = 64 + im_width = 128 + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=True) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128], + [-48./64, -16./128, 80./64, 112./128], + [-16./64, -48./128, 112./64, 80./128], + [-16./64, -16./128, 112./64, 112./128]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_num_anchors_per_location(self): + anchor_strides = [(32, 32), (64, 64)] + anchor_offsets = [(16, 16), (32, 32)] + base_sizes = [(32.0, 64.0, 96.0, 32.0, 64.0, 96.0), + (64.0, 128.0, 172.0, 64.0, 128.0, 172.0)] + aspect_ratios = [(1.0, 1.0, 1.0, 2.0, 2.0, 2.0), + (1.0, 1.0, 1.0, 2.0, 2.0, 2.0)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6]) + + def test_construct_single_anchor_dynamic_size(self): + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(0, 0),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = tf.constant(64) + im_width = tf.constant(64) + feature_map_shape_list = [(2, 2)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + # Zero offsets are used. + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-32, -64, 96, 64], + [-32, -32, 96, 96]] + anchor_corners_out = self.execute_cpu(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_odd_input_dimension(self): + + def graph_fn(): + anchor_strides = [(32, 32),] + anchor_offsets = [(0, 0),] + base_sizes = [(128.0,)] + aspect_ratios = [(1.0,)] + im_height = 65 + im_width = 65 + feature_map_shape_list = [(3, 3)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + anchor_corners_out = self.execute(graph_fn, []) + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-64, 0, 64, 128], + [-32, -64, 96, 64], + [-32, -32, 96, 96], + [-32, 0, 96, 128], + [0, -64, 128, 64], + [0, -32, 128, 96], + [0, 0, 128, 128]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_on_two_feature_maps(self): + + def graph_fn(): + anchor_strides = [(32, 32), (64, 64)] + anchor_offsets = [(16, 16), (32, 32)] + base_sizes = [(128.0,), (256.0,)] + aspect_ratios = [(1.0,), (1.0,)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2), (1, 1)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave(self): + + def graph_fn(): + anchor_strides = [(64, 64),] + anchor_offsets = [(32, 32),] + base_sizes = [(256.0, 362.03867)] + aspect_ratios = [(1.0, 1.0)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect]] + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self): + def graph_fn(): + anchor_strides = [(64, 64),] + anchor_offsets = [(32, 32),] + base_sizes = [(256.0, 362.03867, 256.0, 362.03867)] + aspect_ratios = [(1.0, 1.0, 2.0, 2.0)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect], + # [2**0.0 intermediate scale + 2.0 aspect], + # [2**0.5 intermediate scale + 2.0 aspect]] + + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193], + [-58.50967, -149.0193, 122.50967, 213.0193], + [-96., -224., 160., 288.]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self): + + def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height, + feature_map2_width): + anchor_strides = [(32, 32), (64, 64)] + anchor_offsets = [(16, 16), (32, 32)] + base_sizes = [(128.0,), (256.0,)] + aspect_ratios = [(1.0,), (1.0,)] + im_height = 64 + im_width = 64 + feature_map_shape_list = [(feature_map1_height, feature_map1_width), + (feature_map2_height, feature_map2_width)] + anchor_generator = fg.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, anchor_strides, anchor_offsets, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate( + self.execute_cpu(graph_fn, [ + np.array(2, dtype=np.int32), + np.array(2, dtype=np.int32), + np.array(1, dtype=np.int32), + np.array(1, dtype=np.int32) + ]), + axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..a31bc87996d848201ac57e5e7429ee42ab274299 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator.py @@ -0,0 +1,213 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generates grid anchors on the fly as used in Faster RCNN. + +Generates grid anchors on the fly as described in: +"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" +Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import anchor_generator +from object_detection.core import box_list +from object_detection.utils import ops + + +class GridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generates a grid of anchors at given scales and aspect ratios.""" + + def __init__(self, + scales=(0.5, 1.0, 2.0), + aspect_ratios=(0.5, 1.0, 2.0), + base_anchor_size=None, + anchor_stride=None, + anchor_offset=None): + """Constructs a GridAnchorGenerator. + + Args: + scales: a list of (float) scales, default=(0.5, 1.0, 2.0) + aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0) + base_anchor_size: base anchor size as height, width ( + (length-2 float32 list or tensor, default=[256, 256]) + anchor_stride: difference in centers between base anchors for adjacent + grid positions (length-2 float32 list or tensor, + default=[16, 16]) + anchor_offset: center of the anchor with scale and aspect ratio 1 for the + upper left element of the grid, this should be zero for + feature networks with only VALID padding and even receptive + field size, but may need additional calculation if other + padding is used (length-2 float32 list or tensor, + default=[0, 0]) + """ + # Handle argument defaults + if base_anchor_size is None: + base_anchor_size = [256, 256] + if anchor_stride is None: + anchor_stride = [16, 16] + if anchor_offset is None: + anchor_offset = [0, 0] + + self._scales = scales + self._aspect_ratios = aspect_ratios + self._base_anchor_size = base_anchor_size + self._anchor_stride = anchor_stride + self._anchor_offset = anchor_offset + + def name_scope(self): + return 'GridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the `generate` function. + """ + return [len(self._scales) * len(self._aspect_ratios)] + + def _generate(self, feature_map_shape_list): + """Generates a collection of bounding boxes to be used as anchors. + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0)]. For example, setting + feature_map_shape_list=[(8, 8)] asks for anchors that correspond + to an 8x8 layer. For this anchor generator, only lists of length 1 are + allowed. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + + Raises: + ValueError: if feature_map_shape_list, box_specs_list do not have the same + length. + ValueError: if feature_map_shape_list does not consist of pairs of + integers + """ + if not (isinstance(feature_map_shape_list, list) + and len(feature_map_shape_list) == 1): + raise ValueError('feature_map_shape_list must be a list of length 1.') + if not all([isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in feature_map_shape_list]): + raise ValueError('feature_map_shape_list must be a list of pairs.') + + # Create constants in init_scope so they can be created in tf.functions + # and accessed from outside of the function. + with tf.init_scope(): + self._base_anchor_size = tf.cast(tf.convert_to_tensor( + self._base_anchor_size), dtype=tf.float32) + self._anchor_stride = tf.cast(tf.convert_to_tensor( + self._anchor_stride), dtype=tf.float32) + self._anchor_offset = tf.cast(tf.convert_to_tensor( + self._anchor_offset), dtype=tf.float32) + + grid_height, grid_width = feature_map_shape_list[0] + scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales, + self._aspect_ratios) + scales_grid = tf.reshape(scales_grid, [-1]) + aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1]) + anchors = tile_anchors(grid_height, + grid_width, + scales_grid, + aspect_ratios_grid, + self._base_anchor_size, + self._anchor_stride, + self._anchor_offset) + + num_anchors = anchors.num_boxes_static() + if num_anchors is None: + num_anchors = anchors.num_boxes() + anchor_indices = tf.zeros([num_anchors]) + anchors.add_field('feature_map_index', anchor_indices) + return [anchors] + + +def tile_anchors(grid_height, + grid_width, + scales, + aspect_ratios, + base_anchor_size, + anchor_stride, + anchor_offset): + """Create a tiled set of anchors strided along a grid in image space. + + This op creates a set of anchor boxes by placing a "basis" collection of + boxes with user-specified scales and aspect ratios centered at evenly + distributed points along a grid. The basis collection is specified via the + scale and aspect_ratios arguments. For example, setting scales=[.1, .2, .2] + and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale + .1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2 + and aspect ratio 1/2. Each box is multiplied by "base_anchor_size" before + placing it over its respective center. + + Grid points are specified via grid_height, grid_width parameters as well as + the anchor_stride and anchor_offset parameters. + + Args: + grid_height: size of the grid in the y direction (int or int scalar tensor) + grid_width: size of the grid in the x direction (int or int scalar tensor) + scales: a 1-d (float) tensor representing the scale of each box in the + basis set. + aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each + box in the basis set. The length of the scales and aspect_ratios tensors + must be equal. + base_anchor_size: base anchor size as [height, width] + (float tensor of shape [2]) + anchor_stride: difference in centers between base anchors for adjacent grid + positions (float tensor of shape [2]) + anchor_offset: center of the anchor with scale and aspect ratio 1 for the + upper left element of the grid, this should be zero for + feature networks with only VALID padding and even receptive + field size, but may need some additional calculation if other + padding is used (float tensor of shape [2]) + Returns: + a BoxList holding a collection of N anchor boxes + """ + ratio_sqrts = tf.sqrt(aspect_ratios) + heights = scales / ratio_sqrts * base_anchor_size[0] + widths = scales * ratio_sqrts * base_anchor_size[1] + + # Get a grid of box centers + y_centers = tf.cast(tf.range(grid_height), dtype=tf.float32) + y_centers = y_centers * anchor_stride[0] + anchor_offset[0] + x_centers = tf.cast(tf.range(grid_width), dtype=tf.float32) + x_centers = x_centers * anchor_stride[1] + anchor_offset[1] + x_centers, y_centers = ops.meshgrid(x_centers, y_centers) + + widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers) + heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers) + bbox_centers = tf.stack([y_centers_grid, x_centers_grid], axis=3) + bbox_sizes = tf.stack([heights_grid, widths_grid], axis=3) + bbox_centers = tf.reshape(bbox_centers, [-1, 2]) + bbox_sizes = tf.reshape(bbox_sizes, [-1, 2]) + bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes) + return box_list.BoxList(bbox_corners) + + +def _center_size_bbox_to_corners_bbox(centers, sizes): + """Converts bbox center-size representation to corners representation. + + Args: + centers: a tensor with shape [N, 2] representing bounding box centers + sizes: a tensor with shape [N, 2] representing bounding boxes + + Returns: + corners: tensor with shape [N, 4] representing bounding boxes in corners + representation + """ + return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cc1d568630e15d3de2418f313a0bf920079a4f9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..292076ea1e918607e8114b8ed317452f40062afd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/grid_anchor_generator_test.py @@ -0,0 +1,104 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.grid_anchor_generator.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.utils import test_case + + +class GridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor(self): + """Builds a 1x1 anchor grid to test the size of the output boxes.""" + def graph_fn(): + scales = [0.5, 1.0, 2.0] + aspect_ratios = [0.25, 1.0, 4.0] + anchor_offset = [7, -3] + anchor_generator = grid_anchor_generator.GridAnchorGenerator( + scales, aspect_ratios, anchor_offset=anchor_offset) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)]) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61], + [-505, -131, 519, 125], [-57, -67, 71, 61], + [-121, -131, 135, 125], [-249, -259, 263, 253], + [-25, -131, 39, 125], [-57, -259, 71, 253], + [-121, -515, 135, 509]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid(self): + def graph_fn(): + base_anchor_size = [10, 10] + anchor_stride = [19, 19] + anchor_offset = [0, 0] + scales = [0.5, 1.0, 2.0] + aspect_ratios = [1.0] + + anchor_generator = grid_anchor_generator.GridAnchorGenerator( + scales, + aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=anchor_stride, + anchor_offset=anchor_offset) + + anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)]) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.], + [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5], + [-5., 14., 5, 24], [-10., 9., 10, 29], + [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5], + [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5], + [14., 14., 24, 24], [9., 9., 29, 29]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid_with_dynamic_feature_map_shapes(self): + def graph_fn(feature_map_height, feature_map_width): + base_anchor_size = [10, 10] + anchor_stride = [19, 19] + anchor_offset = [0, 0] + scales = [0.5, 1.0, 2.0] + aspect_ratios = [1.0] + anchor_generator = grid_anchor_generator.GridAnchorGenerator( + scales, + aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=anchor_stride, + anchor_offset=anchor_offset) + + anchors_list = anchor_generator.generate( + feature_map_shape_list=[(feature_map_height, feature_map_width)]) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + + exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.], + [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5], + [-5., 14., 5, 24], [-10., 9., 10, 29], + [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5], + [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5], + [14., 14., 24, 24], [9., 9., 29, 29]] + anchor_corners_out = self.execute_cpu(graph_fn, + [np.array(2, dtype=np.int32), + np.array(2, dtype=np.int32)]) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..5da24d4192c93a0e05a7dd48cce1ae823ae6b60d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator.py @@ -0,0 +1,342 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generates grid anchors on the fly corresponding to multiple CNN layers. + +Generates grid anchors on the fly corresponding to multiple CNN layers as +described in: +"SSD: Single Shot MultiBox Detector" +Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, +Cheng-Yang Fu, Alexander C. Berg +(see Section 2.2: Choosing scales and aspect ratios for default boxes) +""" + +import numpy as np + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.core import anchor_generator +from object_detection.core import box_list_ops + + +class MultipleGridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generate a grid of anchors for multiple CNN layers.""" + + def __init__(self, + box_specs_list, + base_anchor_size=None, + anchor_strides=None, + anchor_offsets=None, + clip_window=None): + """Constructs a MultipleGridAnchorGenerator. + + To construct anchors, at multiple grid resolutions, one must provide a + list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid + size, a corresponding list of (scale, aspect ratio) box specifications. + + For example: + box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid + [(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid + + To support the fully convolutional setting, we pass grid sizes in at + generation time, while scale and aspect ratios are fixed at construction + time. + + Args: + box_specs_list: list of list of (scale, aspect ratio) pairs with the + outside list having the same number of entries as feature_map_shape_list + (which is passed in at generation time). + base_anchor_size: base anchor size as [height, width] + (length-2 float numpy or Tensor, default=[1.0, 1.0]). + The height and width values are normalized to the + minimum dimension of the input height and width, so that + when the base anchor height equals the base anchor + width, the resulting anchor is square even if the input + image is not square. + anchor_strides: list of pairs of strides in pixels (in y and x directions + respectively). For example, setting anchor_strides=[(25, 25), (50, 50)] + means that we want the anchors corresponding to the first layer to be + strided by 25 pixels and those in the second layer to be strided by 50 + pixels in both y and x directions. If anchor_strides=None, they are set + to be the reciprocal of the corresponding feature map shapes. + anchor_offsets: list of pairs of offsets in pixels (in y and x directions + respectively). The offset specifies where we want the center of the + (0, 0)-th anchor to lie for each layer. For example, setting + anchor_offsets=[(10, 10), (20, 20)]) means that we want the + (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space + and likewise that we want the (0, 0)-th anchor of the second layer to + lie at (25, 25) in pixel space. If anchor_offsets=None, then they are + set to be half of the corresponding anchor stride. + clip_window: a tensor of shape [4] specifying a window to which all + anchors should be clipped. If clip_window is None, then no clipping + is performed. + + Raises: + ValueError: if box_specs_list is not a list of list of pairs + ValueError: if clip_window is not either None or a tensor of shape [4] + """ + if isinstance(box_specs_list, list) and all( + [isinstance(list_item, list) for list_item in box_specs_list]): + self._box_specs = box_specs_list + else: + raise ValueError('box_specs_list is expected to be a ' + 'list of lists of pairs') + if base_anchor_size is None: + base_anchor_size = [256, 256] + self._base_anchor_size = base_anchor_size + self._anchor_strides = anchor_strides + self._anchor_offsets = anchor_offsets + if clip_window is not None and clip_window.get_shape().as_list() != [4]: + raise ValueError('clip_window must either be None or a shape [4] tensor') + self._clip_window = clip_window + self._scales = [] + self._aspect_ratios = [] + for box_spec in self._box_specs: + if not all([isinstance(entry, tuple) and len(entry) == 2 + for entry in box_spec]): + raise ValueError('box_specs_list is expected to be a ' + 'list of lists of pairs') + scales, aspect_ratios = zip(*box_spec) + self._scales.append(scales) + self._aspect_ratios.append(aspect_ratios) + + for arg, arg_name in zip([self._anchor_strides, self._anchor_offsets], + ['anchor_strides', 'anchor_offsets']): + if arg and not (isinstance(arg, list) and + len(arg) == len(self._box_specs)): + raise ValueError('%s must be a list with the same length ' + 'as self._box_specs' % arg_name) + if arg and not all([ + isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in arg + ]): + raise ValueError('%s must be a list of pairs.' % arg_name) + + def name_scope(self): + return 'MultipleGridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the Generate function. + """ + return [len(box_specs) for box_specs in self._box_specs] + + def _generate(self, feature_map_shape_list, im_height=1, im_width=1): + """Generates a collection of bounding boxes to be used as anchors. + + The number of anchors generated for a single grid with shape MxM where we + place k boxes over each grid center is k*M^2 and thus the total number of + anchors is the sum over all grids. In our box_specs_list example + (see the constructor docstring), we would place two boxes over each grid + point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and + thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the + output anchors follows the order of how the grid sizes and box_specs are + specified (with box_spec index varying the fastest, followed by width + index, then height index, then grid index). + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0), (height_1, width_1), ...]. For example, + setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that + correspond to an 8x8 layer followed by a 7x7 layer. + im_height: the height of the image to generate the grid for. If both + im_height and im_width are 1, the generated anchors default to + absolute coordinates, otherwise normalized coordinates are produced. + im_width: the width of the image to generate the grid for. If both + im_height and im_width are 1, the generated anchors default to + absolute coordinates, otherwise normalized coordinates are produced. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + + Raises: + ValueError: if feature_map_shape_list, box_specs_list do not have the same + length. + ValueError: if feature_map_shape_list does not consist of pairs of + integers + """ + if not (isinstance(feature_map_shape_list, list) + and len(feature_map_shape_list) == len(self._box_specs)): + raise ValueError('feature_map_shape_list must be a list with the same ' + 'length as self._box_specs') + if not all([isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in feature_map_shape_list]): + raise ValueError('feature_map_shape_list must be a list of pairs.') + + im_height = tf.cast(im_height, dtype=tf.float32) + im_width = tf.cast(im_width, dtype=tf.float32) + + if not self._anchor_strides: + anchor_strides = [(1.0 / tf.cast(pair[0], dtype=tf.float32), + 1.0 / tf.cast(pair[1], dtype=tf.float32)) + for pair in feature_map_shape_list] + else: + anchor_strides = [(tf.cast(stride[0], dtype=tf.float32) / im_height, + tf.cast(stride[1], dtype=tf.float32) / im_width) + for stride in self._anchor_strides] + if not self._anchor_offsets: + anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1]) + for stride in anchor_strides] + else: + anchor_offsets = [(tf.cast(offset[0], dtype=tf.float32) / im_height, + tf.cast(offset[1], dtype=tf.float32) / im_width) + for offset in self._anchor_offsets] + + for arg, arg_name in zip([anchor_strides, anchor_offsets], + ['anchor_strides', 'anchor_offsets']): + if not (isinstance(arg, list) and len(arg) == len(self._box_specs)): + raise ValueError('%s must be a list with the same length ' + 'as self._box_specs' % arg_name) + if not all([isinstance(list_item, tuple) and len(list_item) == 2 + for list_item in arg]): + raise ValueError('%s must be a list of pairs.' % arg_name) + + anchor_grid_list = [] + min_im_shape = tf.minimum(im_height, im_width) + scale_height = min_im_shape / im_height + scale_width = min_im_shape / im_width + if not tf.is_tensor(self._base_anchor_size): + base_anchor_size = [ + scale_height * tf.constant(self._base_anchor_size[0], + dtype=tf.float32), + scale_width * tf.constant(self._base_anchor_size[1], + dtype=tf.float32) + ] + else: + base_anchor_size = [ + scale_height * self._base_anchor_size[0], + scale_width * self._base_anchor_size[1] + ] + for feature_map_index, (grid_size, scales, aspect_ratios, stride, + offset) in enumerate( + zip(feature_map_shape_list, self._scales, + self._aspect_ratios, anchor_strides, + anchor_offsets)): + tiled_anchors = grid_anchor_generator.tile_anchors( + grid_height=grid_size[0], + grid_width=grid_size[1], + scales=scales, + aspect_ratios=aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=stride, + anchor_offset=offset) + if self._clip_window is not None: + tiled_anchors = box_list_ops.clip_to_window( + tiled_anchors, self._clip_window, filter_nonoverlapping=False) + num_anchors_in_layer = tiled_anchors.num_boxes_static() + if num_anchors_in_layer is None: + num_anchors_in_layer = tiled_anchors.num_boxes() + anchor_indices = feature_map_index * tf.ones([num_anchors_in_layer]) + tiled_anchors.add_field('feature_map_index', anchor_indices) + anchor_grid_list.append(tiled_anchors) + + return anchor_grid_list + + +def create_ssd_anchors(num_layers=6, + min_scale=0.2, + max_scale=0.95, + scales=None, + aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3), + interpolated_scale_aspect_ratio=1.0, + base_anchor_size=None, + anchor_strides=None, + anchor_offsets=None, + reduce_boxes_in_lowest_layer=True): + """Creates MultipleGridAnchorGenerator for SSD anchors. + + This function instantiates a MultipleGridAnchorGenerator that reproduces + ``default box`` construction proposed by Liu et al in the SSD paper. + See Section 2.2 for details. Grid sizes are assumed to be passed in + at generation time from finest resolution to coarsest resolution --- this is + used to (linearly) interpolate scales of anchor boxes corresponding to the + intermediate grid sizes. + + Anchors that are returned by calling the `generate` method on the returned + MultipleGridAnchorGenerator object are always in normalized coordinates + and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]). + + Args: + num_layers: integer number of grid layers to create anchors for (actual + grid sizes passed in at generation time) + min_scale: scale of anchors corresponding to finest resolution (float) + max_scale: scale of anchors corresponding to coarsest resolution (float) + scales: As list of anchor scales to use. When not None and not empty, + min_scale and max_scale are not used. + aspect_ratios: list or tuple of (float) aspect ratios to place on each + grid point. + interpolated_scale_aspect_ratio: An additional anchor is added with this + aspect ratio and a scale interpolated between the scale for a layer + and the scale for the next layer (1.0 for the last layer). + This anchor is not included if this value is 0. + base_anchor_size: base anchor size as [height, width]. + The height and width values are normalized to the minimum dimension of the + input height and width, so that when the base anchor height equals the + base anchor width, the resulting anchor is square even if the input image + is not square. + anchor_strides: list of pairs of strides in pixels (in y and x directions + respectively). For example, setting anchor_strides=[(25, 25), (50, 50)] + means that we want the anchors corresponding to the first layer to be + strided by 25 pixels and those in the second layer to be strided by 50 + pixels in both y and x directions. If anchor_strides=None, they are set to + be the reciprocal of the corresponding feature map shapes. + anchor_offsets: list of pairs of offsets in pixels (in y and x directions + respectively). The offset specifies where we want the center of the + (0, 0)-th anchor to lie for each layer. For example, setting + anchor_offsets=[(10, 10), (20, 20)]) means that we want the + (0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space + and likewise that we want the (0, 0)-th anchor of the second layer to lie + at (25, 25) in pixel space. If anchor_offsets=None, then they are set to + be half of the corresponding anchor stride. + reduce_boxes_in_lowest_layer: a boolean to indicate whether the fixed 3 + boxes per location is used in the lowest layer. + + Returns: + a MultipleGridAnchorGenerator + """ + if base_anchor_size is None: + base_anchor_size = [1.0, 1.0] + box_specs_list = [] + if scales is None or not scales: + scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) + for i in range(num_layers)] + [1.0] + else: + # Add 1.0 to the end, which will only be used in scale_next below and used + # for computing an interpolated scale for the largest scale in the list. + scales += [1.0] + + for layer, scale, scale_next in zip( + range(num_layers), scales[:-1], scales[1:]): + layer_box_specs = [] + if layer == 0 and reduce_boxes_in_lowest_layer: + layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)] + else: + for aspect_ratio in aspect_ratios: + layer_box_specs.append((scale, aspect_ratio)) + # Add one more anchor, with a scale between the current scale, and the + # scale for the next layer, with a specified aspect ratio (1.0 by + # default). + if interpolated_scale_aspect_ratio > 0.0: + layer_box_specs.append((np.sqrt(scale*scale_next), + interpolated_scale_aspect_ratio)) + box_specs_list.append(layer_box_specs) + + return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size, + anchor_strides, anchor_offsets) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8abcf1d558411e1544fdcec292a7d80a00dc68b6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cc507eec6487647cdab939b508cf85b58cdf20 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py @@ -0,0 +1,289 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generators.multiple_grid_anchor_generator_test.py.""" + +import numpy as np + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import multiple_grid_anchor_generator as ag +from object_detection.utils import test_case + + +class MultipleGridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor_grid(self): + """Builds a 1x1 anchor grid to test the size of the output boxes.""" + def graph_fn(): + + box_specs_list = [[(.5, .25), (1.0, .25), (2.0, .25), + (.5, 1.0), (1.0, 1.0), (2.0, 1.0), + (.5, 4.0), (1.0, 4.0), (2.0, 4.0)]] + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([256, 256], dtype=tf.float32), + anchor_strides=[(16, 16)], + anchor_offsets=[(7, -3)]) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)]) + return anchors_list[0].get() + exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61], + [-505, -131, 519, 125], [-57, -67, 71, 61], + [-121, -131, 135, 125], [-249, -259, 263, 253], + [-25, -131, 39, 125], [-57, -259, 71, 253], + [-121, -515, 135, 509]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid(self): + def graph_fn(): + box_specs_list = [[(0.5, 1.0), (1.0, 1.0), (2.0, 1.0)]] + + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([10, 10], dtype=tf.float32), + anchor_strides=[(19, 19)], + anchor_offsets=[(0, 0)]) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)]) + return anchors_list[0].get() + exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.], + [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5], + [-5., 14., 5, 24], [-10., 9., 10, 29], + [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5], + [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5], + [14., 14., 24, 24], [9., 9., 29, 29]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid_non_square(self): + + def graph_fn(): + box_specs_list = [[(1.0, 1.0)]] + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, base_anchor_size=tf.constant([1, 1], + dtype=tf.float32)) + anchors_list = anchor_generator.generate(feature_map_shape_list=[( + tf.constant(1, dtype=tf.int32), tf.constant(2, dtype=tf.int32))]) + return anchors_list[0].get() + + exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_dynamic_size_anchor_grid(self): + + def graph_fn(height, width): + box_specs_list = [[(1.0, 1.0)]] + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, base_anchor_size=tf.constant([1, 1], + dtype=tf.float32)) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(height, + width)]) + return anchors_list[0].get() + + exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]] + + anchor_corners_out = self.execute_cpu(graph_fn, + [np.array(1, dtype=np.int32), + np.array(2, dtype=np.int32)]) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_anchor_grid_normalized(self): + def graph_fn(): + box_specs_list = [[(1.0, 1.0)]] + + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, base_anchor_size=tf.constant([1, 1], + dtype=tf.float32)) + anchors_list = anchor_generator.generate( + feature_map_shape_list=[(tf.constant(1, dtype=tf.int32), tf.constant( + 2, dtype=tf.int32))], + im_height=320, + im_width=640) + return anchors_list[0].get() + + exp_anchor_corners = [[0., 0., 1., 0.5], [0., 0.5, 1., 1.]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_multiple_grids(self): + + def graph_fn(): + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5)]] + + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), ( + 2, 2)]) + return [anchors.get() for anchors in anchors_list] + # height and width of box with .5 aspect ratio + h = np.sqrt(2) + w = 1.0/np.sqrt(2) + exp_small_grid_corners = [[-.25, -.25, .75, .75], + [.25-.5*h, .25-.5*w, .25+.5*h, .25+.5*w], + [-.25, .25, .75, 1.25], + [.25-.5*h, .75-.5*w, .25+.5*h, .75+.5*w], + [.25, -.25, 1.25, .75], + [.75-.5*h, .25-.5*w, .75+.5*h, .25+.5*w], + [.25, .25, 1.25, 1.25], + [.75-.5*h, .75-.5*w, .75+.5*h, .75+.5*w]] + # only test first entry of larger set of anchors + exp_big_grid_corners = [[.125-.5, .125-.5, .125+.5, .125+.5], + [.125-1.0, .125-1.0, .125+1.0, .125+1.0], + [.125-.5*h, .125-.5*w, .125+.5*h, .125+.5*w],] + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + self.assertEquals(anchor_corners_out.shape, (56, 4)) + big_grid_corners = anchor_corners_out[0:3, :] + small_grid_corners = anchor_corners_out[48:, :] + self.assertAllClose(small_grid_corners, exp_small_grid_corners) + self.assertAllClose(big_grid_corners, exp_big_grid_corners) + + def test_construct_multiple_grids_with_clipping(self): + + def graph_fn(): + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5)]] + + clip_window = tf.constant([0, 0, 1, 1], dtype=tf.float32) + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + clip_window=clip_window) + anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), ( + 2, 2)]) + return [anchors.get() for anchors in anchors_list] + # height and width of box with .5 aspect ratio + h = np.sqrt(2) + w = 1.0/np.sqrt(2) + exp_small_grid_corners = [[0, 0, .75, .75], + [0, 0, .25+.5*h, .25+.5*w], + [0, .25, .75, 1], + [0, .75-.5*w, .25+.5*h, 1], + [.25, 0, 1, .75], + [.75-.5*h, 0, 1, .25+.5*w], + [.25, .25, 1, 1], + [.75-.5*h, .75-.5*w, 1, 1]] + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + small_grid_corners = anchor_corners_out[48:, :] + self.assertAllClose(small_grid_corners, exp_small_grid_corners) + + def test_invalid_box_specs(self): + # not all box specs are pairs + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5, .3)]] + with self.assertRaises(ValueError): + ag.MultipleGridAnchorGenerator(box_specs_list) + + # box_specs_list is not a list of lists + box_specs_list = [(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)] + with self.assertRaises(ValueError): + ag.MultipleGridAnchorGenerator(box_specs_list) + + def test_invalid_generate_arguments(self): + box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)], + [(1.0, 1.0), (1.0, 0.5)]] + + # incompatible lengths with box_specs_list + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2), (1, 1)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.5, .5)], + anchor_offsets=[(.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)]) + + # not pairs + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4, 4), (2, 2)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25, .1), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)]) + with self.assertRaises(ValueError): + anchor_generator = ag.MultipleGridAnchorGenerator( + box_specs_list, + base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32), + anchor_strides=[(.25, .25), (.5, .5)], + anchor_offsets=[(.125, .125), (.25, .25)]) + anchor_generator.generate(feature_map_shape_list=[(4), (2, 2)]) + + +class CreateSSDAnchorsTest(test_case.TestCase): + + def test_create_ssd_anchors_returns_correct_shape(self): + + def graph_fn1(): + anchor_generator = ag.create_ssd_anchors( + num_layers=6, + min_scale=0.2, + max_scale=0.95, + aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3), + reduce_boxes_in_lowest_layer=True) + + feature_map_shape_list = [(38, 38), (19, 19), (10, 10), + (5, 5), (3, 3), (1, 1)] + anchors_list = anchor_generator.generate( + feature_map_shape_list=feature_map_shape_list) + return [anchors.get() for anchors in anchors_list] + anchor_corners_out = np.concatenate(self.execute(graph_fn1, []), axis=0) + self.assertEquals(anchor_corners_out.shape, (7308, 4)) + + def graph_fn2(): + anchor_generator = ag.create_ssd_anchors( + num_layers=6, min_scale=0.2, max_scale=0.95, + aspect_ratios=(1.0, 2.0, 3.0, 1.0/2, 1.0/3), + reduce_boxes_in_lowest_layer=False) + + feature_map_shape_list = [(38, 38), (19, 19), (10, 10), + (5, 5), (3, 3), (1, 1)] + anchors_list = anchor_generator.generate( + feature_map_shape_list=feature_map_shape_list) + return [anchors.get() for anchors in anchors_list] + anchor_corners_out = np.concatenate(self.execute(graph_fn2, []), axis=0) + self.assertEquals(anchor_corners_out.shape, (11640, 4)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..a3244e1b196e58c46713059fa68ac2f22f7962ef --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator.py @@ -0,0 +1,152 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generates grid anchors on the fly corresponding to multiple CNN layers. + +Generates grid anchors on the fly corresponding to multiple CNN layers as +described in: +"Focal Loss for Dense Object Detection" (https://arxiv.org/abs/1708.02002) +T.-Y. Lin, P. Goyal, R. Girshick, K. He, P. Dollar +""" + +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.core import anchor_generator +from object_detection.core import box_list_ops + + +class MultiscaleGridAnchorGenerator(anchor_generator.AnchorGenerator): + """Generate a grid of anchors for multiple CNN layers of different scale.""" + + def __init__(self, min_level, max_level, anchor_scale, aspect_ratios, + scales_per_octave, normalize_coordinates=True): + """Constructs a MultiscaleGridAnchorGenerator. + + To construct anchors, at multiple scale resolutions, one must provide a + the minimum level and maximum levels on a scale pyramid. To define the size + of anchor, the anchor scale is provided to decide the size relatively to the + stride of the corresponding feature map. The generator allows one pixel + location on feature map maps to multiple anchors, that have different aspect + ratios and intermediate scales. + + Args: + min_level: minimum level in feature pyramid. + max_level: maximum level in feature pyramid. + anchor_scale: anchor scale and feature stride define the size of the base + anchor on an image. For example, given a feature pyramid with strides + [2^3, ..., 2^7] and anchor scale 4. The base anchor size is + 4 * [2^3, ..., 2^7]. + aspect_ratios: list or tuple of (float) aspect ratios to place on each + grid point. + scales_per_octave: integer number of intermediate scales per scale octave. + normalize_coordinates: whether to produce anchors in normalized + coordinates. (defaults to True). + """ + self._anchor_grid_info = [] + self._aspect_ratios = aspect_ratios + self._scales_per_octave = scales_per_octave + self._normalize_coordinates = normalize_coordinates + + scales = [2**(float(scale) / scales_per_octave) + for scale in range(scales_per_octave)] + aspects = list(aspect_ratios) + + for level in range(min_level, max_level + 1): + anchor_stride = [2**level, 2**level] + base_anchor_size = [2**level * anchor_scale, 2**level * anchor_scale] + self._anchor_grid_info.append({ + 'level': level, + 'info': [scales, aspects, base_anchor_size, anchor_stride] + }) + + def name_scope(self): + return 'MultiscaleGridAnchorGenerator' + + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the Generate function. + """ + return len(self._anchor_grid_info) * [ + len(self._aspect_ratios) * self._scales_per_octave] + + def _generate(self, feature_map_shape_list, im_height=1, im_width=1): + """Generates a collection of bounding boxes to be used as anchors. + + For training, we require the input image shape to be statically defined. + That is, im_height and im_width should be integers rather than tensors. + For inference, im_height and im_width can be either integers (for fixed + image size), or tensors (for arbitrary image size). + + Args: + feature_map_shape_list: list of pairs of convnet layer resolutions in the + format [(height_0, width_0), (height_1, width_1), ...]. For example, + setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that + correspond to an 8x8 layer followed by a 7x7 layer. + im_height: the height of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + im_width: the width of the image to generate the grid for. If both + im_height and im_width are 1, anchors can only be generated in + absolute coordinates. + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + Raises: + ValueError: if im_height and im_width are not integers. + ValueError: if im_height and im_width are 1, but normalized coordinates + were requested. + """ + anchor_grid_list = [] + for feat_shape, grid_info in zip(feature_map_shape_list, + self._anchor_grid_info): + # TODO(rathodv) check the feature_map_shape_list is consistent with + # self._anchor_grid_info + level = grid_info['level'] + stride = 2**level + scales, aspect_ratios, base_anchor_size, anchor_stride = grid_info['info'] + feat_h = feat_shape[0] + feat_w = feat_shape[1] + anchor_offset = [0, 0] + if isinstance(im_height, int) and isinstance(im_width, int): + if im_height % 2.0**level == 0 or im_height == 1: + anchor_offset[0] = stride / 2.0 + if im_width % 2.0**level == 0 or im_width == 1: + anchor_offset[1] = stride / 2.0 + if tf.is_tensor(im_height) and tf.is_tensor(im_width): + anchor_offset[0] = stride / 2.0 + anchor_offset[1] = stride / 2.0 + ag = grid_anchor_generator.GridAnchorGenerator( + scales, + aspect_ratios, + base_anchor_size=base_anchor_size, + anchor_stride=anchor_stride, + anchor_offset=anchor_offset) + (anchor_grid,) = ag.generate(feature_map_shape_list=[(feat_h, feat_w)]) + + if self._normalize_coordinates: + if im_height == 1 or im_width == 1: + raise ValueError( + 'Normalized coordinates were requested upon construction of the ' + 'MultiscaleGridAnchorGenerator, but a subsequent call to ' + 'generate did not supply dimension information.') + anchor_grid = box_list_ops.to_normalized_coordinates( + anchor_grid, im_height, im_width, check_range=False) + anchor_grid_list.append(anchor_grid) + + return anchor_grid_list diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c76ccfac7235282542a74cf1a6418303cc040ca Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..82aa8d1df0b72d517c05d4dda1f6c2a7378d3d00 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py @@ -0,0 +1,308 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generators.multiscale_grid_anchor_generator_test.py.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.anchor_generators import multiscale_grid_anchor_generator as mg +from object_detection.utils import test_case + + +class MultiscaleGridAnchorGeneratorTest(test_case.TestCase): + + def test_construct_single_anchor(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_unit_dimensions(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 1.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 1 + im_width = 1 + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + # Positive offsets are produced. + exp_anchor_corners = [[0, 0, 32, 32], + [0, 32, 32, 64], + [32, 0, 64, 32], + [32, 32, 64, 64]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_normalized_anchors_fails_with_unit_dimensions(self): + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level=5, max_level=5, anchor_scale=1.0, aspect_ratios=[1.0], + scales_per_octave=1, normalize_coordinates=True) + with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'): + anchor_generator.generate( + feature_map_shape_list=[(2, 2)], im_height=1, im_width=1) + + def test_construct_single_anchor_in_normalized_coordinates(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 128 + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=True) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128], + [-48./64, -16./128, 80./64, 112./128], + [-16./64, -48./128, 112./64, 80./128], + [-16./64, -16./128, 112./64, 112./128]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_num_anchors_per_location(self): + min_level = 5 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0, 2.0] + scales_per_octave = 3 + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6]) + + def test_construct_single_anchor_dynamic_size(self): + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = tf.constant(64) + im_width = tf.constant(64) + feature_map_shape_list = [(2, 2)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return anchor_corners + + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-32, -64, 96, 64], + [-32, -32, 96, 96]] + # Add anchor offset. + anchor_offset = 2.0**5 / 2.0 + exp_anchor_corners = [ + [b + anchor_offset for b in a] for a in exp_anchor_corners + ] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_odd_input_dimension(self): + + def graph_fn(): + min_level = 5 + max_level = 5 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 65 + im_width = 65 + feature_map_shape_list = [(3, 3)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate( + feature_map_shape_list, im_height=im_height, im_width=im_width) + anchor_corners = anchors_list[0].get() + return (anchor_corners,) + anchor_corners_out = self.execute(graph_fn, []) + exp_anchor_corners = [[-64, -64, 64, 64], + [-64, -32, 64, 96], + [-64, 0, 64, 128], + [-32, -64, 96, 64], + [-32, -32, 96, 96], + [-32, 0, 96, 128], + [0, -64, 128, 64], + [0, -32, 128, 96], + [0, 0, 128, 128]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_on_two_feature_maps(self): + + def graph_fn(): + min_level = 5 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(2, 2), (1, 1)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave(self): + + def graph_fn(): + min_level = 6 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 2 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect]] + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193]] + + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self): + def graph_fn(): + min_level = 6 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0, 2.0] + scales_per_octave = 2 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(1, 1)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + # There are 4 set of anchors in this configuration. The order is: + # [[2**0.0 intermediate scale + 1.0 aspect], + # [2**0.5 intermediate scale + 1.0 aspect], + # [2**0.0 intermediate scale + 2.0 aspect], + # [2**0.5 intermediate scale + 2.0 aspect]] + + exp_anchor_corners = [[-96., -96., 160., 160.], + [-149.0193, -149.0193, 213.0193, 213.0193], + [-58.50967, -149.0193, 122.50967, 213.0193], + [-96., -224., 160., 288.]] + anchor_corners_out = self.execute(graph_fn, []) + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self): + + def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height, + feature_map2_width): + min_level = 5 + max_level = 6 + anchor_scale = 4.0 + aspect_ratios = [1.0] + scales_per_octave = 1 + im_height = 64 + im_width = 64 + feature_map_shape_list = [(feature_map1_height, feature_map1_width), + (feature_map2_height, feature_map2_width)] + anchor_generator = mg.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates=False) + anchors_list = anchor_generator.generate(feature_map_shape_list, + im_height=im_height, + im_width=im_width) + anchor_corners = [anchors.get() for anchors in anchors_list] + return anchor_corners + + anchor_corners_out = np.concatenate( + self.execute_cpu(graph_fn, [ + np.array(2, dtype=np.int32), + np.array(2, dtype=np.int32), + np.array(1, dtype=np.int32), + np.array(1, dtype=np.int32) + ]), + axis=0) + exp_anchor_corners = [[-48, -48, 80, 80], + [-48, -16, 80, 112], + [-16, -48, 112, 80], + [-16, -16, 112, 112], + [-96, -96, 160, 160]] + self.assertAllClose(anchor_corners_out, exp_anchor_corners) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd73f675fc468309bb81d2f53b818b0f3ceb93a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31ab166a36430ff55308063a07cd020de2f3f787 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/faster_rcnn_box_coder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/faster_rcnn_box_coder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00ba34a59d5bb2fe75000b2d909057ac490fd83c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/faster_rcnn_box_coder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/keypoint_box_coder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/keypoint_box_coder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e2ee2675261e7abb582874ebccc1ec7e9224732 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/keypoint_box_coder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/mean_stddev_box_coder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/mean_stddev_box_coder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdc3561becb2f4a04ee2eb02d16965d5d5c9c6de Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/mean_stddev_box_coder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/square_box_coder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/square_box_coder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a80f45b3ed81bf6a67fa1c3a8fbbf827c7708bc3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/__pycache__/square_box_coder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..e06c1b12d2ccff5e4e2992554bd244c2f5b1a822 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Faster RCNN box coder. + +Faster RCNN box coder follows the coding schema described below: + ty = (y - ya) / ha + tx = (x - xa) / wa + th = log(h / ha) + tw = log(w / wa) + where x, y, w, h denote the box's center coordinates, width and height + respectively. Similarly, xa, ya, wa, ha denote the anchor's center + coordinates, width and height. tx, ty, tw and th denote the anchor-encoded + center, width and height respectively. + + See http://arxiv.org/abs/1506.01497 for details. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list + +EPSILON = 1e-8 + + +class FasterRcnnBoxCoder(box_coder.BoxCoder): + """Faster RCNN box coder.""" + + def __init__(self, scale_factors=None): + """Constructor for FasterRcnnBoxCoder. + + Args: + scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. + If set to None, does not perform scaling. For Faster RCNN, + the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. + """ + if scale_factors: + assert len(scale_factors) == 4 + for scalar in scale_factors: + assert scalar > 0 + self._scale_factors = scale_factors + + @property + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + """Encode a box collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, th, tw]. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + # Avoid NaN in division and log below. + ha += EPSILON + wa += EPSILON + h += EPSILON + w += EPSILON + + tx = (xcenter - xcenter_a) / wa + ty = (ycenter - ycenter_a) / ha + tw = tf.log(w / wa) + th = tf.log(h / ha) + # Scales location targets as used in paper for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + th *= self._scale_factors[2] + tw *= self._scale_factors[3] + return tf.transpose(tf.stack([ty, tx, th, tw])) + + def _decode(self, rel_codes, anchors): + """Decode relative codes to boxes. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + + ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes)) + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + th /= self._scale_factors[2] + tw /= self._scale_factors[3] + w = tf.exp(tw) * wa + h = tf.exp(th) * ha + ycenter = ty * ha + ycenter_a + xcenter = tx * wa + xcenter_a + ymin = ycenter - h / 2. + xmin = xcenter - w / 2. + ymax = ycenter + h / 2. + xmax = xcenter + w / 2. + return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9def75cf84987d02d25c5f9f51005e994a9b7dad Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1cd48279af94ee67a2e7d2a96a703168965db897 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/faster_rcnn_box_coder_test.py @@ -0,0 +1,113 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.faster_rcnn_box_coder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class FasterRcnnBoxCoderTest(test_case.TestCase): + + def test_get_correct_relative_codes_after_encoding(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-0.5, -0.416666, -0.405465, -0.182321], + [-0.083333, -0.222222, -0.693147, -1.098612]] + def graph_fn(boxes, anchors): + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_relative_codes_after_encoding_with_scaling(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-1., -1.25, -1.62186, -0.911608], + [-0.166667, -0.666667, -2.772588, -5.493062]] + def graph_fn(boxes, anchors): + scale_factors = [2, 3, 4, 5] + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array([[-0.5, -0.416666, -0.405465, -0.182321], + [-0.083333, -0.222222, -0.693147, -1.098612]], + np.float32) + expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] + def graph_fn(rel_codes, anchors): + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + boxes = coder.decode(rel_codes, anchors) + return boxes.get() + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding_with_scaling(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array([[-1., -1.25, -1.62186, -0.911608], + [-0.166667, -0.666667, -2.772588, -5.493062]], + np.float32) + expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] + def graph_fn(rel_codes, anchors): + scale_factors = [2, 3, 4, 5] + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors).get() + return boxes + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(expected_boxes, boxes_out, rtol=1e-04, + atol=1e-04) + + def test_very_small_Width_nan_after_encoding(self): + boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32) + expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826]] + def graph_fn(boxes, anchors): + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = faster_rcnn_box_coder.FasterRcnnBoxCoder() + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb4bf8b1849499f937b6f9f1d77fe2cf96a5eda --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder.py @@ -0,0 +1,173 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keypoint box coder. + +The keypoint box coder follows the coding schema described below (this is +similar to the FasterRcnnBoxCoder, except that it encodes keypoints in addition +to box coordinates): + ty = (y - ya) / ha + tx = (x - xa) / wa + th = log(h / ha) + tw = log(w / wa) + tky0 = (ky0 - ya) / ha + tkx0 = (kx0 - xa) / wa + tky1 = (ky1 - ya) / ha + tkx1 = (kx1 - xa) / wa + ... + where x, y, w, h denote the box's center coordinates, width and height + respectively. Similarly, xa, ya, wa, ha denote the anchor's center + coordinates, width and height. tx, ty, tw and th denote the anchor-encoded + center, width and height respectively. ky0, kx0, ky1, kx1, ... denote the + keypoints' coordinates, and tky0, tkx0, tky1, tkx1, ... denote the + anchor-encoded keypoint coordinates. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list +from object_detection.core import standard_fields as fields + +EPSILON = 1e-8 + + +class KeypointBoxCoder(box_coder.BoxCoder): + """Keypoint box coder.""" + + def __init__(self, num_keypoints, scale_factors=None): + """Constructor for KeypointBoxCoder. + + Args: + num_keypoints: Number of keypoints to encode/decode. + scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. + In addition to scaling ty and tx, the first 2 scalars are used to scale + the y and x coordinates of the keypoints as well. If set to None, does + not perform scaling. + """ + self._num_keypoints = num_keypoints + + if scale_factors: + assert len(scale_factors) == 4 + for scalar in scale_factors: + assert scalar > 0 + self._scale_factors = scale_factors + self._keypoint_scale_factors = None + if scale_factors is not None: + self._keypoint_scale_factors = tf.expand_dims( + tf.tile([ + tf.cast(scale_factors[0], dtype=tf.float32), + tf.cast(scale_factors[1], dtype=tf.float32) + ], [num_keypoints]), 1) + + @property + def code_size(self): + return 4 + self._num_keypoints * 2 + + def _encode(self, boxes, anchors): + """Encode a box and keypoint collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are + tensors with the shape [N, 4], and keypoints are tensors with the shape + [N, num_keypoints, 2]. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0 + represent the y and x coordinates of the first keypoint, tky1 and tkx1 + represent the y and x coordinates of the second keypoint, and so on. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + keypoints = boxes.get_field(fields.BoxListFields.keypoints) + keypoints = tf.transpose(tf.reshape(keypoints, + [-1, self._num_keypoints * 2])) + num_boxes = boxes.num_boxes() + + # Avoid NaN in division and log below. + ha += EPSILON + wa += EPSILON + h += EPSILON + w += EPSILON + + tx = (xcenter - xcenter_a) / wa + ty = (ycenter - ycenter_a) / ha + tw = tf.log(w / wa) + th = tf.log(h / ha) + + tiled_anchor_centers = tf.tile( + tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) + tiled_anchor_sizes = tf.tile( + tf.stack([ha, wa]), [self._num_keypoints, 1]) + tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes + + # Scales location targets as used in paper for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + th *= self._scale_factors[2] + tw *= self._scale_factors[3] + tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes]) + + tboxes = tf.stack([ty, tx, th, tw]) + return tf.transpose(tf.concat([tboxes, tkeypoints], 0)) + + def _decode(self, rel_codes, anchors): + """Decode relative codes to boxes and keypoints. + + Args: + rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N + anchor-encoded boxes and keypoints + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes and keypoints. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + + num_codes = tf.shape(rel_codes)[0] + result = tf.unstack(tf.transpose(rel_codes)) + ty, tx, th, tw = result[:4] + tkeypoints = result[4:] + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + th /= self._scale_factors[2] + tw /= self._scale_factors[3] + tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes]) + + w = tf.exp(tw) * wa + h = tf.exp(th) * ha + ycenter = ty * ha + ycenter_a + xcenter = tx * wa + xcenter_a + ymin = ycenter - h / 2. + xmin = xcenter - w / 2. + ymax = ycenter + h / 2. + xmax = xcenter + w / 2. + decoded_boxes_keypoints = box_list.BoxList( + tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) + + tiled_anchor_centers = tf.tile( + tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) + tiled_anchor_sizes = tf.tile( + tf.stack([ha, wa]), [self._num_keypoints, 1]) + keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers + keypoints = tf.reshape(tf.transpose(keypoints), + [-1, self._num_keypoints, 2]) + decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints) + return decoded_boxes_keypoints diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf64d09130f6b332c1f2fe05a6d2b98f996307e3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5748255c825cfa46a9db81c082c1341f1c476fbf --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/keypoint_box_coder_test.py @@ -0,0 +1,151 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.keypoint_box_coder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import keypoint_box_coder +from object_detection.core import box_list +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class KeypointBoxCoderTest(test_case.TestCase): + + def test_get_correct_relative_codes_after_encoding(self): + boxes = np.array([[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]], np.float32) + keypoints = np.array([[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]], np.float32) + num_keypoints = len(keypoints[0]) + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + expected_rel_codes = [ + [-0.5, -0.416666, -0.405465, -0.182321, + -0.5, -0.5, -0.833333, 0.], + [-0.083333, -0.222222, -0.693147, -1.098612, + 0.166667, -0.166667, -0.333333, -0.055556] + ] + def graph_fn(boxes, keypoints, anchors): + boxes = box_list.BoxList(boxes) + boxes.add_field(fields.BoxListFields.keypoints, keypoints) + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_relative_codes_after_encoding_with_scaling(self): + boxes = np.array([[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]], np.float32) + keypoints = np.array([[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]], np.float32) + num_keypoints = len(keypoints[0]) + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + expected_rel_codes = [ + [-1., -1.25, -1.62186, -0.911608, + -1.0, -1.5, -1.666667, 0.], + [-0.166667, -0.666667, -2.772588, -5.493062, + 0.333333, -0.5, -0.666667, -0.166667] + ] + def graph_fn(boxes, keypoints, anchors): + scale_factors = [2, 3, 4, 5] + boxes = box_list.BoxList(boxes) + boxes.add_field(fields.BoxListFields.keypoints, keypoints) + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints, scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding(self): + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + rel_codes = np.array([ + [-0.5, -0.416666, -0.405465, -0.182321, + -0.5, -0.5, -0.833333, 0.], + [-0.083333, -0.222222, -0.693147, -1.098612, + 0.166667, -0.166667, -0.333333, -0.055556] + ], np.float32) + expected_boxes = [[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]] + expected_keypoints = [[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]] + num_keypoints = len(expected_keypoints[0]) + def graph_fn(rel_codes, anchors): + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints) + boxes = coder.decode(rel_codes, anchors) + return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints) + boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04, + atol=1e-04) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_get_correct_boxes_after_decoding_with_scaling(self): + anchors = np.array([[15., 12., 30., 18.], + [0.1, 0.0, 0.7, 0.9]], np.float32) + rel_codes = np.array([ + [-1., -1.25, -1.62186, -0.911608, + -1.0, -1.5, -1.666667, 0.], + [-0.166667, -0.666667, -2.772588, -5.493062, + 0.333333, -0.5, -0.666667, -0.166667] + ], np.float32) + expected_boxes = [[10., 10., 20., 15.], + [0.2, 0.1, 0.5, 0.4]] + expected_keypoints = [[[15., 12.], [10., 15.]], + [[0.5, 0.3], [0.2, 0.4]]] + num_keypoints = len(expected_keypoints[0]) + def graph_fn(rel_codes, anchors): + scale_factors = [2, 3, 4, 5] + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints, scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors) + return boxes.get(), boxes.get_field(fields.BoxListFields.keypoints) + boxes_out, keypoints_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(keypoints_out, expected_keypoints, rtol=1e-04, + atol=1e-04) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_very_small_width_nan_after_encoding(self): + boxes = np.array([[10., 10., 10.0000001, 20.]], np.float32) + keypoints = np.array([[[10., 10.], [10.0000001, 20.]]], np.float32) + anchors = np.array([[15., 12., 30., 18.]], np.float32) + expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826, + -0.833333, -0.833333, -0.833333, 0.833333]] + def graph_fn(boxes, keypoints, anchors): + boxes = box_list.BoxList(boxes) + boxes.add_field(fields.BoxListFields.keypoints, keypoints) + anchors = box_list.BoxList(anchors) + coder = keypoint_box_coder.KeypointBoxCoder(2) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, keypoints, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..256f53fd036798cd7b3da8fcdd720c7e3c46e2e4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder.py @@ -0,0 +1,79 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mean stddev box coder. + +This box coder use the following coding schema to encode boxes: +rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev. +""" +from object_detection.core import box_coder +from object_detection.core import box_list + + +class MeanStddevBoxCoder(box_coder.BoxCoder): + """Mean stddev box coder.""" + + def __init__(self, stddev=0.01): + """Constructor for MeanStddevBoxCoder. + + Args: + stddev: The standard deviation used to encode and decode boxes. + """ + self._stddev = stddev + + @property + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + """Encode a box collection with respect to anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of N anchors. + + Returns: + a tensor representing N anchor-encoded boxes + + Raises: + ValueError: if the anchors still have deprecated stddev field. + """ + box_corners = boxes.get() + if anchors.has_field('stddev'): + raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and " + "should not be specified in the box list.") + means = anchors.get() + return (box_corners - means) / self._stddev + + def _decode(self, rel_codes, anchors): + """Decode. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes + + Raises: + ValueError: if the anchors still have deprecated stddev field and expects + the decode method to use stddev value from that field. + """ + means = anchors.get() + if anchors.has_field('stddev'): + raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and " + "should not be specified in the box list.") + box_corners = rel_codes * self._stddev + means + return box_list.BoxList(box_corners) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b1d9534a6eea3e24d778b9c3305c86366756d3c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d94fff1187d3eb1b53e7ca525741f819cd944cc6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/mean_stddev_box_coder_test.py @@ -0,0 +1,61 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.mean_stddev_boxcoder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class MeanStddevBoxCoderTest(test_case.TestCase): + + def testGetCorrectRelativeCodesAfterEncoding(self): + boxes = np.array([[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]], np.float32) + anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32) + expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]] + + def graph_fn(boxes, anchors): + anchors = box_list.BoxList(anchors) + boxes = box_list.BoxList(boxes) + coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def testGetCorrectBoxesAfterDecoding(self): + rel_codes = np.array([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]], + np.float32) + expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]] + anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32) + + def graph_fn(rel_codes, anchors): + anchors = box_list.BoxList(anchors) + coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + decoded_boxes = coder.decode(rel_codes, anchors).get() + return decoded_boxes + + decoded_boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(decoded_boxes_out, expected_box_corners, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..859320fd5024c2762dc935aa23ed437a8cff886b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder.py @@ -0,0 +1,126 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Square box coder. + +Square box coder follows the coding schema described below: +l = sqrt(h * w) +la = sqrt(ha * wa) +ty = (y - ya) / la +tx = (x - xa) / la +tl = log(l / la) +where x, y, w, h denote the box's center coordinates, width, and height, +respectively. Similarly, xa, ya, wa, ha denote the anchor's center +coordinates, width and height. tx, ty, tl denote the anchor-encoded +center, and length, respectively. Because the encoded box is a square, only +one length is encoded. + +This has shown to provide performance improvements over the Faster RCNN box +coder when the objects being detected tend to be square (e.g. faces) and when +the input images are not distorted via resizing. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list + +EPSILON = 1e-8 + + +class SquareBoxCoder(box_coder.BoxCoder): + """Encodes a 3-scalar representation of a square box.""" + + def __init__(self, scale_factors=None): + """Constructor for SquareBoxCoder. + + Args: + scale_factors: List of 3 positive scalars to scale ty, tx, and tl. + If set to None, does not perform scaling. For faster RCNN, + the open-source implementation recommends using [10.0, 10.0, 5.0]. + + Raises: + ValueError: If scale_factors is not length 3 or contains values less than + or equal to 0. + """ + if scale_factors: + if len(scale_factors) != 3: + raise ValueError('The argument scale_factors must be a list of length ' + '3.') + if any(scalar <= 0 for scalar in scale_factors): + raise ValueError('The values in scale_factors must all be greater ' + 'than 0.') + self._scale_factors = scale_factors + + @property + def code_size(self): + return 3 + + def _encode(self, boxes, anchors): + """Encodes a box collection with respect to an anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded. + anchors: BoxList of anchors. + + Returns: + a tensor representing N anchor-encoded boxes of the format + [ty, tx, tl]. + """ + # Convert anchors to the center coordinate representation. + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + la = tf.sqrt(ha * wa) + ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() + l = tf.sqrt(h * w) + # Avoid NaN in division and log below. + la += EPSILON + l += EPSILON + + tx = (xcenter - xcenter_a) / la + ty = (ycenter - ycenter_a) / la + tl = tf.log(l / la) + # Scales location targets for joint training. + if self._scale_factors: + ty *= self._scale_factors[0] + tx *= self._scale_factors[1] + tl *= self._scale_factors[2] + return tf.transpose(tf.stack([ty, tx, tl])) + + def _decode(self, rel_codes, anchors): + """Decodes relative codes to boxes. + + Args: + rel_codes: a tensor representing N anchor-encoded boxes. + anchors: BoxList of anchors. + + Returns: + boxes: BoxList holding N bounding boxes. + """ + ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() + la = tf.sqrt(ha * wa) + + ty, tx, tl = tf.unstack(tf.transpose(rel_codes)) + if self._scale_factors: + ty /= self._scale_factors[0] + tx /= self._scale_factors[1] + tl /= self._scale_factors[2] + l = tf.exp(tl) * la + ycenter = ty * la + ycenter_a + xcenter = tx * la + xcenter_a + ymin = ycenter - l / 2. + xmin = xcenter - l / 2. + ymax = ycenter + l / 2. + xmax = xcenter + l / 2. + return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9ff5bf31daa6bc484743faad0e4cf0224cb8e1e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e6bdcb245dc783cca6999a71e6b7bfe8b118cb2f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/box_coders/square_box_coder_test.py @@ -0,0 +1,114 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.box_coder.square_box_coder.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import square_box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class SquareBoxCoderTest(test_case.TestCase): + + def test_correct_relative_codes_with_default_scale(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-0.790569, -0.263523, -0.293893], + [-0.068041, -0.272166, -0.89588]] + def graph_fn(boxes, anchors): + scale_factors = None + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_correct_relative_codes_with_non_default_scale(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + expected_rel_codes = [[-1.581139, -0.790569, -1.175573], + [-0.136083, -0.816497, -3.583519]] + def graph_fn(boxes, anchors): + scale_factors = [2, 3, 4] + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-03, + atol=1e-03) + + def test_correct_relative_codes_with_small_width(self): + boxes = np.array([[10.0, 10.0, 10.0000001, 20.0]], np.float32) + anchors = np.array([[15.0, 12.0, 30.0, 18.0]], np.float32) + expected_rel_codes = [[-1.317616, 0., -20.670586]] + def graph_fn(boxes, anchors): + scale_factors = None + boxes = box_list.BoxList(boxes) + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + rel_codes = coder.encode(boxes, anchors) + return rel_codes + rel_codes_out = self.execute(graph_fn, [boxes, anchors]) + self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04, + atol=1e-04) + + def test_correct_boxes_with_default_scale(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array([[-0.5, -0.416666, -0.405465], + [-0.083333, -0.222222, -0.693147]], np.float32) + expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], + [0.155051, 0.102989, 0.522474, 0.470412]] + def graph_fn(rel_codes, anchors): + scale_factors = None + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors).get() + return boxes + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + def test_correct_boxes_with_non_default_scale(self): + anchors = np.array([[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]], + np.float32) + rel_codes = np.array( + [[-1., -1.25, -1.62186], [-0.166667, -0.666667, -2.772588]], np.float32) + expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], + [0.155051, 0.102989, 0.522474, 0.470412]] + def graph_fn(rel_codes, anchors): + scale_factors = [2, 3, 4] + anchors = box_list.BoxList(anchors) + coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors) + boxes = coder.decode(rel_codes, anchors).get() + return boxes + boxes_out = self.execute(graph_fn, [rel_codes, anchors]) + self.assertAllClose(boxes_out, expected_boxes, rtol=1e-04, + atol=1e-04) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..007470768d56c57f0c927bd6472b662992a60a90 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd69ffaf33f17f3e1c2f8c087b3b3be750a72553 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/anchor_generator_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/anchor_generator_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15557fbe2c3799453219ed559c158dc29841045b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/anchor_generator_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/box_coder_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/box_coder_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b9c4f8d1d4ca0d621f906685267df4bb0bcb964 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/box_coder_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/box_predictor_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/box_predictor_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b65b06b7b6bd5098b5857d9c70a74e9d76da67 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/box_predictor_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/calibration_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/calibration_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7207619b17d775fa1d65a04ec8bd25eaab87b65e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/calibration_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/dataset_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/dataset_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aec2ce2f637f77a552fb12f96122b3a08da10c76 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/dataset_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/decoder_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/decoder_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca90f0b14632117922008ee69da206bcd19f1b91 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/decoder_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/graph_rewriter_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/graph_rewriter_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..850bb2a24315895f41bd19c09a0727879b4b17a6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/graph_rewriter_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/hyperparams_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/hyperparams_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd039232bfc8deb115780de6d56d97926f5e0e9f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/hyperparams_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/image_resizer_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/image_resizer_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11b0adae8dd587cd5e742b7f4ceb4552b055fefd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/image_resizer_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/losses_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/losses_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b47f24141291aa56ae594d05a2b9cd4f1b1110d3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/losses_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/matcher_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/matcher_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ffc1085efa17364c24e9744f50f42fe22c9fa11 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/matcher_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/model_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/model_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e7af027aabd058506ecd6999cae2502428992a9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/model_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/optimizer_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/optimizer_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..307e2e4f8cadc880ef4aaaee6aa3202c8b88dbd1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/optimizer_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/post_processing_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/post_processing_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97e2ea3e627eac376243d0b61cdd3bfc5d5e7674 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/post_processing_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/preprocessor_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/preprocessor_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87281306b926401c8f67d078b3db4f6d019b6a86 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/preprocessor_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/region_similarity_calculator_builder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/region_similarity_calculator_builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0193482b2a2a13bbd6171bd34508cb105b311da Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/__pycache__/region_similarity_calculator_builder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..7880210a8b1881af71117c1ed60a60f4f4231581 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder.py @@ -0,0 +1,116 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection anchor generator from config.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import zip +from object_detection.anchor_generators import flexible_grid_anchor_generator +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.anchor_generators import multiple_grid_anchor_generator +from object_detection.anchor_generators import multiscale_grid_anchor_generator +from object_detection.protos import anchor_generator_pb2 + + +def build(anchor_generator_config): + """Builds an anchor generator based on the config. + + Args: + anchor_generator_config: An anchor_generator.proto object containing the + config for the desired anchor generator. + + Returns: + Anchor generator based on the config. + + Raises: + ValueError: On empty anchor generator proto. + """ + if not isinstance(anchor_generator_config, + anchor_generator_pb2.AnchorGenerator): + raise ValueError('anchor_generator_config not of type ' + 'anchor_generator_pb2.AnchorGenerator') + if anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'grid_anchor_generator': + grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator + return grid_anchor_generator.GridAnchorGenerator( + scales=[float(scale) for scale in grid_anchor_generator_config.scales], + aspect_ratios=[float(aspect_ratio) + for aspect_ratio + in grid_anchor_generator_config.aspect_ratios], + base_anchor_size=[grid_anchor_generator_config.height, + grid_anchor_generator_config.width], + anchor_stride=[grid_anchor_generator_config.height_stride, + grid_anchor_generator_config.width_stride], + anchor_offset=[grid_anchor_generator_config.height_offset, + grid_anchor_generator_config.width_offset]) + elif anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'ssd_anchor_generator': + ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator + anchor_strides = None + if ssd_anchor_generator_config.height_stride: + anchor_strides = list( + zip(ssd_anchor_generator_config.height_stride, + ssd_anchor_generator_config.width_stride)) + anchor_offsets = None + if ssd_anchor_generator_config.height_offset: + anchor_offsets = list( + zip(ssd_anchor_generator_config.height_offset, + ssd_anchor_generator_config.width_offset)) + return multiple_grid_anchor_generator.create_ssd_anchors( + num_layers=ssd_anchor_generator_config.num_layers, + min_scale=ssd_anchor_generator_config.min_scale, + max_scale=ssd_anchor_generator_config.max_scale, + scales=[float(scale) for scale in ssd_anchor_generator_config.scales], + aspect_ratios=ssd_anchor_generator_config.aspect_ratios, + interpolated_scale_aspect_ratio=( + ssd_anchor_generator_config.interpolated_scale_aspect_ratio), + base_anchor_size=[ + ssd_anchor_generator_config.base_anchor_height, + ssd_anchor_generator_config.base_anchor_width + ], + anchor_strides=anchor_strides, + anchor_offsets=anchor_offsets, + reduce_boxes_in_lowest_layer=( + ssd_anchor_generator_config.reduce_boxes_in_lowest_layer)) + elif anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'multiscale_anchor_generator': + cfg = anchor_generator_config.multiscale_anchor_generator + return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( + cfg.min_level, + cfg.max_level, + cfg.anchor_scale, + [float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios], + cfg.scales_per_octave, + cfg.normalize_coordinates + ) + elif anchor_generator_config.WhichOneof( + 'anchor_generator_oneof') == 'flexible_grid_anchor_generator': + cfg = anchor_generator_config.flexible_grid_anchor_generator + base_sizes = [] + aspect_ratios = [] + strides = [] + offsets = [] + for anchor_grid in cfg.anchor_grid: + base_sizes.append(tuple(anchor_grid.base_sizes)) + aspect_ratios.append(tuple(anchor_grid.aspect_ratios)) + strides.append((anchor_grid.height_stride, anchor_grid.width_stride)) + offsets.append((anchor_grid.height_offset, anchor_grid.width_offset)) + return flexible_grid_anchor_generator.FlexibleGridAnchorGenerator( + base_sizes, aspect_ratios, strides, offsets, cfg.normalize_coordinates) + else: + raise ValueError('Empty anchor generator.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c24757877b37d2372c431283b7905b5bd32848b3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..35cdfcaee26f349e84257816f06c699f384246b1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/anchor_generator_builder_test.py @@ -0,0 +1,339 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for anchor_generator_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.anchor_generators import flexible_grid_anchor_generator +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.anchor_generators import multiple_grid_anchor_generator +from object_detection.anchor_generators import multiscale_grid_anchor_generator +from object_detection.builders import anchor_generator_builder +from object_detection.protos import anchor_generator_pb2 + + +class AnchorGeneratorBuilderTest(tf.test.TestCase): + + def assert_almost_list_equal(self, expected_list, actual_list, delta=None): + self.assertEqual(len(expected_list), len(actual_list)) + for expected_item, actual_item in zip(expected_list, actual_list): + self.assertAlmostEqual(expected_item, actual_item, delta=delta) + + def test_build_grid_anchor_generator_with_defaults(self): + anchor_generator_text_proto = """ + grid_anchor_generator { + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + grid_anchor_generator.GridAnchorGenerator) + self.assertListEqual(anchor_generator_object._scales, []) + self.assertListEqual(anchor_generator_object._aspect_ratios, []) + self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0]) + self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16]) + self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256]) + + def test_build_grid_anchor_generator_with_non_default_parameters(self): + anchor_generator_text_proto = """ + grid_anchor_generator { + height: 128 + width: 512 + height_stride: 10 + width_stride: 20 + height_offset: 30 + width_offset: 40 + scales: [0.4, 2.2] + aspect_ratios: [0.3, 4.5] + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + grid_anchor_generator.GridAnchorGenerator) + self.assert_almost_list_equal(anchor_generator_object._scales, + [0.4, 2.2]) + self.assert_almost_list_equal(anchor_generator_object._aspect_ratios, + [0.3, 4.5]) + self.assertAllEqual(anchor_generator_object._anchor_offset, [30, 40]) + self.assertAllEqual(anchor_generator_object._anchor_stride, [10, 20]) + self.assertAllEqual(anchor_generator_object._base_anchor_size, [128, 512]) + + def test_build_ssd_anchor_generator_with_defaults(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [1.0] + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.1, 0.2, 0.2), + (0.35, 0.418), + (0.499, 0.570), + (0.649, 0.721), + (0.799, 0.871), + (0.949, 0.974)]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + [(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0]) + + def test_build_ssd_anchor_generator_with_custom_scales(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [1.0] + scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8] + reduce_boxes_in_lowest_layer: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.1, math.sqrt(0.1 * 0.15)), + (0.15, math.sqrt(0.15 * 0.2)), + (0.2, math.sqrt(0.2 * 0.4)), + (0.4, math.sqrt(0.4 * 0.6)), + (0.6, math.sqrt(0.6 * 0.8)), + (0.8, math.sqrt(0.8 * 1.0))]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + + def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [0.5] + interpolated_scale_aspect_ratio: 0.5 + reduce_boxes_in_lowest_layer: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + 6 * [(0.5, 0.5)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + + def test_build_ssd_anchor_generator_without_reduced_boxes(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + aspect_ratios: [1.0] + reduce_boxes_in_lowest_layer: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.2, 0.264), + (0.35, 0.418), + (0.499, 0.570), + (0.649, 0.721), + (0.799, 0.871), + (0.949, 0.974)]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + 6 * [(1.0, 1.0)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + + self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0]) + + def test_build_ssd_anchor_generator_with_non_default_parameters(self): + anchor_generator_text_proto = """ + ssd_anchor_generator { + num_layers: 2 + min_scale: 0.3 + max_scale: 0.8 + aspect_ratios: [2.0] + height_stride: 16 + height_stride: 32 + width_stride: 20 + width_stride: 30 + height_offset: 8 + height_offset: 16 + width_offset: 0 + width_offset: 10 + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiple_grid_anchor_generator. + MultipleGridAnchorGenerator) + + for actual_scales, expected_scales in zip( + list(anchor_generator_object._scales), + [(0.1, 0.3, 0.3), (0.8, 0.894)]): + self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) + + for actual_aspect_ratio, expected_aspect_ratio in zip( + list(anchor_generator_object._aspect_ratios), + [(1.0, 2.0, 0.5), (2.0, 1.0)]): + self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) + + for actual_strides, expected_strides in zip( + list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]): + self.assert_almost_list_equal(expected_strides, actual_strides) + + for actual_offsets, expected_offsets in zip( + list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]): + self.assert_almost_list_equal(expected_offsets, actual_offsets) + + self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0]) + + def test_raise_value_error_on_empty_anchor_genertor(self): + anchor_generator_text_proto = """ + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + with self.assertRaises(ValueError): + anchor_generator_builder.build(anchor_generator_proto) + + def test_build_multiscale_anchor_generator_custom_aspect_ratios(self): + anchor_generator_text_proto = """ + multiscale_anchor_generator { + aspect_ratios: [1.0] + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiscale_grid_anchor_generator. + MultiscaleGridAnchorGenerator) + for level, anchor_grid_info in zip( + range(3, 8), anchor_generator_object._anchor_grid_info): + self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info'])) + self.assertTrue(level, anchor_grid_info['level']) + self.assertEqual(len(anchor_grid_info['info']), 4) + self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5]) + self.assertTrue(anchor_grid_info['info'][1], 1.0) + self.assertAllClose(anchor_grid_info['info'][2], + [4.0 * 2**level, 4.0 * 2**level]) + self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level]) + self.assertTrue(anchor_generator_object._normalize_coordinates) + + def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates( + self): + anchor_generator_text_proto = """ + multiscale_anchor_generator { + aspect_ratios: [1.0] + normalize_coordinates: false + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + multiscale_grid_anchor_generator. + MultiscaleGridAnchorGenerator) + self.assertFalse(anchor_generator_object._normalize_coordinates) + + def test_build_flexible_anchor_generator(self): + anchor_generator_text_proto = """ + flexible_grid_anchor_generator { + anchor_grid { + base_sizes: [1.5] + aspect_ratios: [1.0] + height_stride: 16 + width_stride: 20 + height_offset: 8 + width_offset: 9 + } + anchor_grid { + base_sizes: [1.0, 2.0] + aspect_ratios: [1.0, 0.5] + height_stride: 32 + width_stride: 30 + height_offset: 10 + width_offset: 11 + } + } + """ + anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() + text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) + anchor_generator_object = anchor_generator_builder.build( + anchor_generator_proto) + self.assertIsInstance(anchor_generator_object, + flexible_grid_anchor_generator. + FlexibleGridAnchorGenerator) + + for actual_base_sizes, expected_base_sizes in zip( + list(anchor_generator_object._base_sizes), [(1.5,), (1.0, 2.0)]): + self.assert_almost_list_equal(expected_base_sizes, actual_base_sizes) + + for actual_aspect_ratios, expected_aspect_ratios in zip( + list(anchor_generator_object._aspect_ratios), [(1.0,), (1.0, 0.5)]): + self.assert_almost_list_equal(expected_aspect_ratios, + actual_aspect_ratios) + + for actual_strides, expected_strides in zip( + list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]): + self.assert_almost_list_equal(expected_strides, actual_strides) + + for actual_offsets, expected_offsets in zip( + list(anchor_generator_object._anchor_offsets), [(8, 9), (10, 11)]): + self.assert_almost_list_equal(expected_offsets, actual_offsets) + + self.assertTrue(anchor_generator_object._normalize_coordinates) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..cc13d5a2f01c5a1f66e83abc5bb5ada542047d83 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder.py @@ -0,0 +1,66 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection box coder from configuration.""" +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.box_coders import keypoint_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.box_coders import square_box_coder +from object_detection.protos import box_coder_pb2 + + +def build(box_coder_config): + """Builds a box coder object based on the box coder config. + + Args: + box_coder_config: A box_coder.proto object containing the config for the + desired box coder. + + Returns: + BoxCoder based on the config. + + Raises: + ValueError: On empty box coder proto. + """ + if not isinstance(box_coder_config, box_coder_pb2.BoxCoder): + raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.') + + if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder': + return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[ + box_coder_config.faster_rcnn_box_coder.y_scale, + box_coder_config.faster_rcnn_box_coder.x_scale, + box_coder_config.faster_rcnn_box_coder.height_scale, + box_coder_config.faster_rcnn_box_coder.width_scale + ]) + if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder': + return keypoint_box_coder.KeypointBoxCoder( + box_coder_config.keypoint_box_coder.num_keypoints, + scale_factors=[ + box_coder_config.keypoint_box_coder.y_scale, + box_coder_config.keypoint_box_coder.x_scale, + box_coder_config.keypoint_box_coder.height_scale, + box_coder_config.keypoint_box_coder.width_scale + ]) + if (box_coder_config.WhichOneof('box_coder_oneof') == + 'mean_stddev_box_coder'): + return mean_stddev_box_coder.MeanStddevBoxCoder( + stddev=box_coder_config.mean_stddev_box_coder.stddev) + if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder': + return square_box_coder.SquareBoxCoder(scale_factors=[ + box_coder_config.square_box_coder.y_scale, + box_coder_config.square_box_coder.x_scale, + box_coder_config.square_box_coder.length_scale + ]) + raise ValueError('Empty box coder.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..216efa86dac7eb849781136281f263fb5452d48b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5db9947cb643a7ba90e66d431dc3b80b3b82e00c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_coder_builder_test.py @@ -0,0 +1,136 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for box_coder_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.box_coders import keypoint_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.box_coders import square_box_coder +from object_detection.builders import box_coder_builder +from object_detection.protos import box_coder_pb2 + + +class BoxCoderBuilderTest(tf.test.TestCase): + + def test_build_faster_rcnn_box_coder_with_defaults(self): + box_coder_text_proto = """ + faster_rcnn_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, + faster_rcnn_box_coder.FasterRcnnBoxCoder) + self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) + + def test_build_faster_rcnn_box_coder_with_non_default_parameters(self): + box_coder_text_proto = """ + faster_rcnn_box_coder { + y_scale: 6.0 + x_scale: 3.0 + height_scale: 7.0 + width_scale: 8.0 + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, + faster_rcnn_box_coder.FasterRcnnBoxCoder) + self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) + + def test_build_keypoint_box_coder_with_defaults(self): + box_coder_text_proto = """ + keypoint_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) + self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) + + def test_build_keypoint_box_coder_with_non_default_parameters(self): + box_coder_text_proto = """ + keypoint_box_coder { + num_keypoints: 6 + y_scale: 6.0 + x_scale: 3.0 + height_scale: 7.0 + width_scale: 8.0 + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) + self.assertEqual(box_coder_object._num_keypoints, 6) + self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) + + def test_build_mean_stddev_box_coder(self): + box_coder_text_proto = """ + mean_stddev_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertTrue( + isinstance(box_coder_object, + mean_stddev_box_coder.MeanStddevBoxCoder)) + + def test_build_square_box_coder_with_defaults(self): + box_coder_text_proto = """ + square_box_coder { + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertTrue( + isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) + self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0]) + + def test_build_square_box_coder_with_non_default_parameters(self): + box_coder_text_proto = """ + square_box_coder { + y_scale: 6.0 + x_scale: 3.0 + length_scale: 7.0 + } + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + box_coder_object = box_coder_builder.build(box_coder_proto) + self.assertTrue( + isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) + self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0]) + + def test_raise_error_on_empty_box_coder(self): + box_coder_text_proto = """ + """ + box_coder_proto = box_coder_pb2.BoxCoder() + text_format.Merge(box_coder_text_proto, box_coder_proto) + with self.assertRaises(ValueError): + box_coder_builder.build(box_coder_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..029649d8d9dd68877adac6bb971d5fd024f62246 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder.py @@ -0,0 +1,975 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Function to build box predictor from configuration.""" + +import collections +import tensorflow.compat.v1 as tf +from object_detection.predictors import convolutional_box_predictor +from object_detection.predictors import convolutional_keras_box_predictor +from object_detection.predictors import mask_rcnn_box_predictor +from object_detection.predictors import mask_rcnn_keras_box_predictor +from object_detection.predictors import rfcn_box_predictor +from object_detection.predictors import rfcn_keras_box_predictor +from object_detection.predictors.heads import box_head +from object_detection.predictors.heads import class_head +from object_detection.predictors.heads import keras_box_head +from object_detection.predictors.heads import keras_class_head +from object_detection.predictors.heads import keras_mask_head +from object_detection.predictors.heads import mask_head +from object_detection.protos import box_predictor_pb2 + + +def build_convolutional_box_predictor(is_training, + num_classes, + conv_hyperparams_fn, + min_depth, + max_depth, + num_layers_before_predictor, + use_dropout, + dropout_keep_prob, + kernel_size, + box_code_size, + apply_sigmoid_to_scores=False, + add_background_class=True, + class_prediction_bias_init=0.0, + use_depthwise=False, + box_encodings_clip_range=None): + """Builds the ConvolutionalBoxPredictor from the arguments. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + box_code_size: Size of encoding for each box. + apply_sigmoid_to_scores: If True, apply the sigmoid on the output + class_predictions. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: Constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + + Returns: + A ConvolutionalBoxPredictor class. + """ + box_prediction_head = box_head.ConvolutionalBoxHead( + is_training=is_training, + box_code_size=box_code_size, + kernel_size=kernel_size, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + class_prediction_head = class_head.ConvolutionalClassHead( + is_training=is_training, + num_class_slots=num_classes + 1 if add_background_class else num_classes, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + kernel_size=kernel_size, + apply_sigmoid_to_scores=apply_sigmoid_to_scores, + class_prediction_bias_init=class_prediction_bias_init, + use_depthwise=use_depthwise) + other_heads = {} + return convolutional_box_predictor.ConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams_fn=conv_hyperparams_fn, + num_layers_before_predictor=num_layers_before_predictor, + min_depth=min_depth, + max_depth=max_depth) + + +def build_convolutional_keras_box_predictor(is_training, + num_classes, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + num_predictions_per_location_list, + min_depth, + max_depth, + num_layers_before_predictor, + use_dropout, + dropout_keep_prob, + kernel_size, + box_code_size, + add_background_class=True, + class_prediction_bias_init=0.0, + use_depthwise=False, + box_encodings_clip_range=None, + name='BoxPredictor'): + """Builds the Keras ConvolutionalBoxPredictor from the arguments. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + box_code_size: Size of encoding for each box. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + name: A string name scope to assign to the box predictor. If `None`, Keras + will auto-generate one from the class name. + + Returns: + A Keras ConvolutionalBoxPredictor class. + """ + box_prediction_heads = [] + class_prediction_heads = [] + other_heads = {} + + for stack_index, num_predictions_per_location in enumerate( + num_predictions_per_location_list): + box_prediction_heads.append( + keras_box_head.ConvolutionalBoxHead( + is_training=is_training, + box_code_size=box_code_size, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + num_predictions_per_location=num_predictions_per_location, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range, + name='ConvolutionalBoxHead_%d' % stack_index)) + class_prediction_heads.append( + keras_class_head.ConvolutionalClassHead( + is_training=is_training, + num_class_slots=( + num_classes + 1 if add_background_class else num_classes), + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + num_predictions_per_location=num_predictions_per_location, + class_prediction_bias_init=class_prediction_bias_init, + use_depthwise=use_depthwise, + name='ConvolutionalClassHead_%d' % stack_index)) + + return convolutional_keras_box_predictor.ConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_heads=box_prediction_heads, + class_prediction_heads=class_prediction_heads, + other_heads=other_heads, + conv_hyperparams=conv_hyperparams, + num_layers_before_predictor=num_layers_before_predictor, + min_depth=min_depth, + max_depth=max_depth, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + name=name) + + +def build_weight_shared_convolutional_box_predictor( + is_training, + num_classes, + conv_hyperparams_fn, + depth, + num_layers_before_predictor, + box_code_size, + kernel_size=3, + add_background_class=True, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + share_prediction_tower=False, + apply_batch_norm=True, + use_depthwise=False, + score_converter_fn=tf.identity, + box_encodings_clip_range=None, + keyword_args=None): + """Builds and returns a WeightSharedConvolutionalBoxPredictor class. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + share_prediction_tower: Whether to share the multi-layer tower between box + prediction and class prediction heads. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. + score_converter_fn: Callable score converter to perform elementwise op on + class scores. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + keyword_args: A dictionary with additional args. + + Returns: + A WeightSharedConvolutionalBoxPredictor class. + """ + box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( + box_code_size=box_code_size, + kernel_size=kernel_size, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + class_prediction_head = ( + class_head.WeightSharedConvolutionalClassHead( + num_class_slots=( + num_classes + 1 if add_background_class else num_classes), + kernel_size=kernel_size, + class_prediction_bias_init=class_prediction_bias_init, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + use_depthwise=use_depthwise, + score_converter_fn=score_converter_fn)) + other_heads = {} + return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams_fn=conv_hyperparams_fn, + depth=depth, + num_layers_before_predictor=num_layers_before_predictor, + kernel_size=kernel_size, + apply_batch_norm=apply_batch_norm, + share_prediction_tower=share_prediction_tower, + use_depthwise=use_depthwise) + + +def build_weight_shared_convolutional_keras_box_predictor( + is_training, + num_classes, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + num_predictions_per_location_list, + depth, + num_layers_before_predictor, + box_code_size, + kernel_size=3, + add_background_class=True, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + share_prediction_tower=False, + apply_batch_norm=True, + use_depthwise=False, + score_converter_fn=tf.identity, + box_encodings_clip_range=None, + name='WeightSharedConvolutionalBoxPredictor', + keyword_args=None): + """Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. + add_background_class: Whether to add an implicit background class. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + share_prediction_tower: Whether to share the multi-layer tower between box + prediction and class prediction heads. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. + score_converter_fn: Callable score converter to perform elementwise op on + class scores. + box_encodings_clip_range: Min and max values for clipping the box_encodings. + name: A string name scope to assign to the box predictor. If `None`, Keras + will auto-generate one from the class name. + keyword_args: A dictionary with additional args. + + Returns: + A Keras WeightSharedConvolutionalBoxPredictor class. + """ + if len(set(num_predictions_per_location_list)) > 1: + raise ValueError('num predictions per location must be same for all' + 'feature maps, found: {}'.format( + num_predictions_per_location_list)) + num_predictions_per_location = num_predictions_per_location_list[0] + + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=box_code_size, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=num_predictions_per_location, + use_depthwise=use_depthwise, + box_encodings_clip_range=box_encodings_clip_range, + name='WeightSharedConvolutionalBoxHead') + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=( + num_classes + 1 if add_background_class else num_classes), + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + kernel_size=kernel_size, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=num_predictions_per_location, + class_prediction_bias_init=class_prediction_bias_init, + use_depthwise=use_depthwise, + score_converter_fn=score_converter_fn, + name='WeightSharedConvolutionalClassHead') + other_heads = {} + + return ( + convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams=conv_hyperparams, + depth=depth, + num_layers_before_predictor=num_layers_before_predictor, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + kernel_size=kernel_size, + apply_batch_norm=apply_batch_norm, + share_prediction_tower=share_prediction_tower, + use_depthwise=use_depthwise, + name=name)) + + + + +def build_mask_rcnn_keras_box_predictor(is_training, + num_classes, + fc_hyperparams, + freeze_batchnorm, + use_dropout, + dropout_keep_prob, + box_code_size, + add_background_class=True, + share_box_across_classes=False, + predict_instance_masks=False, + conv_hyperparams=None, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample_masks=False): + """Builds and returns a MaskRCNNKerasBoxPredictor class. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for fully connected dense ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + add_background_class: Whether to add an implicit background class. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + predict_instance_masks: If True, will add a third stage mask prediction + to the returned class. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample_masks: Whether to apply convolutions on mask + features before upsampling using nearest neighbor resizing. Otherwise, + mask features are resized to [`mask_height`, `mask_width`] using + bilinear resizing before applying convolutions. + + Returns: + A MaskRCNNKerasBoxPredictor class. + """ + box_prediction_head = keras_box_head.MaskRCNNBoxHead( + is_training=is_training, + num_classes=num_classes, + fc_hyperparams=fc_hyperparams, + freeze_batchnorm=freeze_batchnorm, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + box_code_size=box_code_size, + share_box_across_classes=share_box_across_classes) + class_prediction_head = keras_class_head.MaskRCNNClassHead( + is_training=is_training, + num_class_slots=num_classes + 1 if add_background_class else num_classes, + fc_hyperparams=fc_hyperparams, + freeze_batchnorm=freeze_batchnorm, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob) + third_stage_heads = {} + if predict_instance_masks: + third_stage_heads[ + mask_rcnn_box_predictor. + MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + mask_height=mask_height, + mask_width=mask_width, + mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, + mask_prediction_conv_depth=mask_prediction_conv_depth, + masks_are_class_agnostic=masks_are_class_agnostic, + convolve_then_upsample=convolve_then_upsample_masks) + return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor( + is_training=is_training, + num_classes=num_classes, + freeze_batchnorm=freeze_batchnorm, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + third_stage_heads=third_stage_heads) + + +def build_mask_rcnn_box_predictor(is_training, + num_classes, + fc_hyperparams_fn, + use_dropout, + dropout_keep_prob, + box_code_size, + add_background_class=True, + share_box_across_classes=False, + predict_instance_masks=False, + conv_hyperparams_fn=None, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample_masks=False): + """Builds and returns a MaskRCNNBoxPredictor class. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for fully connected ops. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + add_background_class: Whether to add an implicit background class. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + predict_instance_masks: If True, will add a third stage mask prediction + to the returned class. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample_masks: Whether to apply convolutions on mask + features before upsampling using nearest neighbor resizing. Otherwise, + mask features are resized to [`mask_height`, `mask_width`] using + bilinear resizing before applying convolutions. + + Returns: + A MaskRCNNBoxPredictor class. + """ + box_prediction_head = box_head.MaskRCNNBoxHead( + is_training=is_training, + num_classes=num_classes, + fc_hyperparams_fn=fc_hyperparams_fn, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob, + box_code_size=box_code_size, + share_box_across_classes=share_box_across_classes) + class_prediction_head = class_head.MaskRCNNClassHead( + is_training=is_training, + num_class_slots=num_classes + 1 if add_background_class else num_classes, + fc_hyperparams_fn=fc_hyperparams_fn, + use_dropout=use_dropout, + dropout_keep_prob=dropout_keep_prob) + third_stage_heads = {} + if predict_instance_masks: + third_stage_heads[ + mask_rcnn_box_predictor. + MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead( + num_classes=num_classes, + conv_hyperparams_fn=conv_hyperparams_fn, + mask_height=mask_height, + mask_width=mask_width, + mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, + mask_prediction_conv_depth=mask_prediction_conv_depth, + masks_are_class_agnostic=masks_are_class_agnostic, + convolve_then_upsample=convolve_then_upsample_masks) + return mask_rcnn_box_predictor.MaskRCNNBoxPredictor( + is_training=is_training, + num_classes=num_classes, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + third_stage_heads=third_stage_heads) + + +def build_score_converter(score_converter_config, is_training): + """Builds score converter based on the config. + + Builds one of [tf.identity, tf.sigmoid] score converters based on the config + and whether the BoxPredictor is for training or inference. + + Args: + score_converter_config: + box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter. + is_training: Indicates whether the BoxPredictor is in training mode. + + Returns: + Callable score converter op. + + Raises: + ValueError: On unknown score converter. + """ + if score_converter_config == ( + box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY): + return tf.identity + if score_converter_config == ( + box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID): + return tf.identity if is_training else tf.sigmoid + raise ValueError('Unknown score converter.') + + +BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange', + ['min', 'max']) + + +def build(argscope_fn, box_predictor_config, is_training, num_classes, + add_background_class=True): + """Builds box predictor based on the configuration. + + Builds box predictor based on the configuration. See box_predictor.proto for + configurable options. Also, see box_predictor.py for more details. + + Args: + argscope_fn: A function that takes the following inputs: + * hyperparams_pb2.Hyperparams proto + * a boolean indicating if the model is in training mode. + and returns a tf slim argscope for Conv and FC hyperparameters. + box_predictor_config: box_predictor_pb2.BoxPredictor proto containing + configuration. + is_training: Whether the models is in training mode. + num_classes: Number of classes to predict. + add_background_class: Whether to add an implicit background class. + + Returns: + box_predictor: box_predictor.BoxPredictor object. + + Raises: + ValueError: On unknown box predictor. + """ + if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): + raise ValueError('box_predictor_config not of type ' + 'box_predictor_pb2.BoxPredictor.') + + box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') + + if box_predictor_oneof == 'convolutional_box_predictor': + config_box_predictor = box_predictor_config.convolutional_box_predictor + conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, + is_training) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + return build_convolutional_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + conv_hyperparams_fn=conv_hyperparams_fn, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + min_depth=config_box_predictor.min_depth, + max_depth=config_box_predictor.max_depth, + apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_depthwise=config_box_predictor.use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + + if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': + config_box_predictor = ( + box_predictor_config.weight_shared_convolutional_box_predictor) + conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, + is_training) + apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( + 'batch_norm') + # During training phase, logits are used to compute the loss. Only apply + # sigmoid at inference to make the inference graph TPU friendly. + score_converter_fn = build_score_converter( + config_box_predictor.score_converter, is_training) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + keyword_args = None + + return build_weight_shared_convolutional_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + conv_hyperparams_fn=conv_hyperparams_fn, + depth=config_box_predictor.depth, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + share_prediction_tower=config_box_predictor.share_prediction_tower, + apply_batch_norm=apply_batch_norm, + use_depthwise=config_box_predictor.use_depthwise, + score_converter_fn=score_converter_fn, + box_encodings_clip_range=box_encodings_clip_range, + keyword_args=keyword_args) + + + if box_predictor_oneof == 'mask_rcnn_box_predictor': + config_box_predictor = box_predictor_config.mask_rcnn_box_predictor + fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams, + is_training) + conv_hyperparams_fn = None + if config_box_predictor.HasField('conv_hyperparams'): + conv_hyperparams_fn = argscope_fn( + config_box_predictor.conv_hyperparams, is_training) + return build_mask_rcnn_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + fc_hyperparams_fn=fc_hyperparams_fn, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + share_box_across_classes=( + config_box_predictor.share_box_across_classes), + predict_instance_masks=config_box_predictor.predict_instance_masks, + conv_hyperparams_fn=conv_hyperparams_fn, + mask_height=config_box_predictor.mask_height, + mask_width=config_box_predictor.mask_width, + mask_prediction_num_conv_layers=( + config_box_predictor.mask_prediction_num_conv_layers), + mask_prediction_conv_depth=( + config_box_predictor.mask_prediction_conv_depth), + masks_are_class_agnostic=( + config_box_predictor.masks_are_class_agnostic), + convolve_then_upsample_masks=( + config_box_predictor.convolve_then_upsample_masks)) + + if box_predictor_oneof == 'rfcn_box_predictor': + config_box_predictor = box_predictor_config.rfcn_box_predictor + conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, + is_training) + box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams_fn=conv_hyperparams_fn, + crop_size=[config_box_predictor.crop_height, + config_box_predictor.crop_width], + num_spatial_bins=[config_box_predictor.num_spatial_bins_height, + config_box_predictor.num_spatial_bins_width], + depth=config_box_predictor.depth, + box_code_size=config_box_predictor.box_code_size) + return box_predictor_object + raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof)) + + +def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update, + num_predictions_per_location_list, box_predictor_config, + is_training, num_classes, add_background_class=True): + """Builds a Keras-based box predictor based on the configuration. + + Builds Keras-based box predictor based on the configuration. + See box_predictor.proto for configurable options. Also, see box_predictor.py + for more details. + + Args: + hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams + proto and returns a `hyperparams_builder.KerasLayerHyperparams` + for Conv or FC hyperparameters. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + box_predictor_config: box_predictor_pb2.BoxPredictor proto containing + configuration. + is_training: Whether the models is in training mode. + num_classes: Number of classes to predict. + add_background_class: Whether to add an implicit background class. + + Returns: + box_predictor: box_predictor.KerasBoxPredictor object. + + Raises: + ValueError: On unknown box predictor, or one with no Keras box predictor. + """ + if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): + raise ValueError('box_predictor_config not of type ' + 'box_predictor_pb2.BoxPredictor.') + + box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') + + if box_predictor_oneof == 'convolutional_box_predictor': + config_box_predictor = box_predictor_config.convolutional_box_predictor + conv_hyperparams = hyperparams_fn( + config_box_predictor.conv_hyperparams) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + + return build_convolutional_keras_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + num_predictions_per_location_list=num_predictions_per_location_list, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + min_depth=config_box_predictor.min_depth, + max_depth=config_box_predictor.max_depth, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_depthwise=config_box_predictor.use_depthwise, + box_encodings_clip_range=box_encodings_clip_range) + + if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': + config_box_predictor = ( + box_predictor_config.weight_shared_convolutional_box_predictor) + conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams) + apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( + 'batch_norm') + # During training phase, logits are used to compute the loss. Only apply + # sigmoid at inference to make the inference graph TPU friendly. This is + # required because during TPU inference, model.postprocess is not called. + score_converter_fn = build_score_converter( + config_box_predictor.score_converter, is_training) + # Optionally apply clipping to box encodings, when box_encodings_clip_range + # is set. + box_encodings_clip_range = None + if config_box_predictor.HasField('box_encodings_clip_range'): + box_encodings_clip_range = BoxEncodingsClipRange( + min=config_box_predictor.box_encodings_clip_range.min, + max=config_box_predictor.box_encodings_clip_range.max) + keyword_args = None + + return build_weight_shared_convolutional_keras_box_predictor( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + num_predictions_per_location_list=num_predictions_per_location_list, + depth=config_box_predictor.depth, + num_layers_before_predictor=( + config_box_predictor.num_layers_before_predictor), + box_code_size=config_box_predictor.box_code_size, + kernel_size=config_box_predictor.kernel_size, + add_background_class=add_background_class, + class_prediction_bias_init=( + config_box_predictor.class_prediction_bias_init), + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + share_prediction_tower=config_box_predictor.share_prediction_tower, + apply_batch_norm=apply_batch_norm, + use_depthwise=config_box_predictor.use_depthwise, + score_converter_fn=score_converter_fn, + box_encodings_clip_range=box_encodings_clip_range, + keyword_args=keyword_args) + + if box_predictor_oneof == 'mask_rcnn_box_predictor': + config_box_predictor = box_predictor_config.mask_rcnn_box_predictor + fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams) + conv_hyperparams = None + if config_box_predictor.HasField('conv_hyperparams'): + conv_hyperparams = hyperparams_fn( + config_box_predictor.conv_hyperparams) + return build_mask_rcnn_keras_box_predictor( + is_training=is_training, + num_classes=num_classes, + add_background_class=add_background_class, + fc_hyperparams=fc_hyperparams, + freeze_batchnorm=freeze_batchnorm, + use_dropout=config_box_predictor.use_dropout, + dropout_keep_prob=config_box_predictor.dropout_keep_probability, + box_code_size=config_box_predictor.box_code_size, + share_box_across_classes=( + config_box_predictor.share_box_across_classes), + predict_instance_masks=config_box_predictor.predict_instance_masks, + conv_hyperparams=conv_hyperparams, + mask_height=config_box_predictor.mask_height, + mask_width=config_box_predictor.mask_width, + mask_prediction_num_conv_layers=( + config_box_predictor.mask_prediction_num_conv_layers), + mask_prediction_conv_depth=( + config_box_predictor.mask_prediction_conv_depth), + masks_are_class_agnostic=( + config_box_predictor.masks_are_class_agnostic), + convolve_then_upsample_masks=( + config_box_predictor.convolve_then_upsample_masks)) + + if box_predictor_oneof == 'rfcn_box_predictor': + config_box_predictor = box_predictor_config.rfcn_box_predictor + conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams) + box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor( + is_training=is_training, + num_classes=num_classes, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + crop_size=[config_box_predictor.crop_height, + config_box_predictor.crop_width], + num_spatial_bins=[config_box_predictor.num_spatial_bins_height, + config_box_predictor.num_spatial_bins_width], + depth=config_box_predictor.depth, + box_code_size=config_box_predictor.box_code_size) + return box_predictor_object + + raise ValueError( + 'Unknown box predictor for Keras: {}'.format(box_predictor_oneof)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e224323aa418b0183b6758ba436af2719f31a70 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..88b9843d7618e633592264f3ea10afb8c07736e5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/box_predictor_builder_test.py @@ -0,0 +1,667 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for box_predictor_builder.""" + +import unittest +from unittest import mock # pylint: disable=g-importing-member +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import mask_rcnn_box_predictor +from object_detection.protos import box_predictor_pb2 +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_calls_conv_argscope_fn(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn + self.assertAlmostEqual((hyperparams_proto.regularizer. + l1_regularizer.weight), + (conv_hyperparams_actual.regularizer.l1_regularizer. + weight)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.stddev), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.stddev)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.mean), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.mean)) + self.assertEqual(hyperparams_proto.activation, + conv_hyperparams_actual.activation) + self.assertFalse(is_training) + + def test_construct_non_default_conv_box_predictor(self): + box_predictor_text_proto = """ + convolutional_box_predictor { + min_depth: 2 + max_depth: 16 + num_layers_before_predictor: 2 + use_dropout: false + dropout_keep_probability: 0.4 + kernel_size: 3 + box_code_size: 3 + apply_sigmoid_to_scores: true + class_prediction_bias_init: 4.0 + use_depthwise: true + } + """ + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10, + add_background_class=False) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._min_depth, 2) + self.assertEqual(box_predictor._max_depth, 16) + self.assertEqual(box_predictor._num_layers_before_predictor, 2) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4) + self.assertTrue(class_head._apply_sigmoid_to_scores) + self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) + self.assertEqual(class_head._num_class_slots, 10) + self.assertEqual(box_predictor.num_classes, 10) + self.assertFalse(box_predictor._is_training) + self.assertTrue(class_head._use_depthwise) + + def test_construct_default_conv_box_predictor(self): + box_predictor_text_proto = """ + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + }""" + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=hyperparams_builder.build, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._min_depth, 0) + self.assertEqual(box_predictor._max_depth, 0) + self.assertEqual(box_predictor._num_layers_before_predictor, 0) + self.assertTrue(class_head._use_dropout) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8) + self.assertFalse(class_head._apply_sigmoid_to_scores) + self.assertEqual(class_head._num_class_slots, 91) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertFalse(class_head._use_depthwise) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_calls_conv_argscope_fn(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + (box_predictor_proto.weight_shared_convolutional_box_predictor + .conv_hyperparams.CopyFrom(hyperparams_proto)) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn + self.assertAlmostEqual((hyperparams_proto.regularizer. + l1_regularizer.weight), + (conv_hyperparams_actual.regularizer.l1_regularizer. + weight)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.stddev), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.stddev)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.mean), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.mean)) + self.assertEqual(hyperparams_proto.activation, + conv_hyperparams_actual.activation) + self.assertFalse(is_training) + + def test_construct_non_default_conv_box_predictor(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + depth: 2 + num_layers_before_predictor: 2 + kernel_size: 7 + box_code_size: 3 + class_prediction_bias_init: 4.0 + } + """ + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + (box_predictor_proto.weight_shared_convolutional_box_predictor. + conv_hyperparams.CopyFrom(hyperparams_proto)) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10, + add_background_class=False) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._depth, 2) + self.assertEqual(box_predictor._num_layers_before_predictor, 2) + self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) + self.assertEqual(box_predictor.num_classes, 10) + self.assertFalse(box_predictor._is_training) + self.assertEqual(box_predictor._apply_batch_norm, False) + + def test_construct_non_default_depthwise_conv_box_predictor(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + depth: 2 + num_layers_before_predictor: 2 + kernel_size: 7 + box_code_size: 3 + class_prediction_bias_init: 4.0 + use_depthwise: true + } + """ + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + (box_predictor_proto.weight_shared_convolutional_box_predictor. + conv_hyperparams.CopyFrom(hyperparams_proto)) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10, + add_background_class=False) + class_head = box_predictor._class_prediction_head + self.assertEqual(box_predictor._depth, 2) + self.assertEqual(box_predictor._num_layers_before_predictor, 2) + self.assertEqual(box_predictor._apply_batch_norm, False) + self.assertEqual(box_predictor._use_depthwise, True) + self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) + self.assertEqual(box_predictor.num_classes, 10) + self.assertFalse(box_predictor._is_training) + + def test_construct_default_conv_box_predictor(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + conv_hyperparams { + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + }""" + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=hyperparams_builder.build, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor._depth, 0) + self.assertEqual(box_predictor._num_layers_before_predictor, 0) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._apply_batch_norm, False) + + def test_construct_default_conv_box_predictor_with_batch_norm(self): + box_predictor_text_proto = """ + weight_shared_convolutional_box_predictor { + conv_hyperparams { + regularizer { + l1_regularizer { + } + } + batch_norm { + train: true + } + initializer { + truncated_normal_initializer { + } + } + } + }""" + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=hyperparams_builder.build, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor._depth, 0) + self.assertEqual(box_predictor._num_layers_before_predictor, 0) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._apply_batch_norm, True) + + + + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_builder_calls_fc_argscope_fn(self): + fc_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + op: FC + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto) + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom( + hyperparams_proto) + mock_argscope_fn = mock.Mock(return_value='arg_scope') + box_predictor = box_predictor_builder.build( + argscope_fn=mock_argscope_fn, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + mock_argscope_fn.assert_called_with(hyperparams_proto, False) + self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn, + 'arg_scope') + self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn, + 'arg_scope') + + def test_non_default_mask_rcnn_box_predictor(self): + fc_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + op: FC + """ + box_predictor_text_proto = """ + mask_rcnn_box_predictor { + use_dropout: true + dropout_keep_probability: 0.8 + box_code_size: 3 + share_box_across_classes: true + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto) + def mock_fc_argscope_builder(fc_hyperparams_arg, is_training): + return (fc_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_fc_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + self.assertTrue(box_head._use_dropout) + self.assertTrue(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 3) + self.assertEqual(box_head._share_box_across_classes, True) + + def test_build_default_mask_rcnn_box_predictor(self): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( + hyperparams_pb2.Hyperparams.FC) + box_predictor = box_predictor_builder.build( + argscope_fn=mock.Mock(return_value='arg_scope'), + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + self.assertFalse(box_head._use_dropout) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 4) + self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0) + + def test_build_box_predictor_with_mask_branch(self): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( + hyperparams_pb2.Hyperparams.FC) + box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = ( + hyperparams_pb2.Hyperparams.CONV) + box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True + box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512 + box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16 + box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16 + mock_argscope_fn = mock.Mock(return_value='arg_scope') + box_predictor = box_predictor_builder.build( + argscope_fn=mock_argscope_fn, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + mock_argscope_fn.assert_has_calls( + [mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams, + True), + mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams, + True)], any_order=True) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + third_stage_heads = box_predictor._third_stage_heads + self.assertFalse(box_head._use_dropout) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 4) + self.assertIn( + mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads) + self.assertEqual( + third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] + ._mask_prediction_conv_depth, 512) + + def test_build_box_predictor_with_convlve_then_upsample_masks(self): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( + hyperparams_pb2.Hyperparams.FC) + box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = ( + hyperparams_pb2.Hyperparams.CONV) + box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True + box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512 + box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24 + box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24 + box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = ( + True) + + mock_argscope_fn = mock.Mock(return_value='arg_scope') + box_predictor = box_predictor_builder.build( + argscope_fn=mock_argscope_fn, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + mock_argscope_fn.assert_has_calls( + [mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams, + True), + mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams, + True)], any_order=True) + box_head = box_predictor._box_prediction_head + class_head = box_predictor._class_prediction_head + third_stage_heads = box_predictor._third_stage_heads + self.assertFalse(box_head._use_dropout) + self.assertFalse(class_head._use_dropout) + self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) + self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_head._box_code_size, 4) + self.assertIn( + mask_rcnn_box_predictor.MASK_PREDICTIONS, third_stage_heads) + self.assertEqual( + third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] + ._mask_prediction_conv_depth, 512) + self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] + ._convolve_then_upsample) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only Tests.') +class RfcnBoxPredictorBuilderTest(tf.test.TestCase): + + def test_box_predictor_calls_fc_argscope_fn(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.0003 + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.3 + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=False, + num_classes=10) + (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn + self.assertAlmostEqual((hyperparams_proto.regularizer. + l1_regularizer.weight), + (conv_hyperparams_actual.regularizer.l1_regularizer. + weight)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.stddev), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.stddev)) + self.assertAlmostEqual((hyperparams_proto.initializer. + truncated_normal_initializer.mean), + (conv_hyperparams_actual.initializer. + truncated_normal_initializer.mean)) + self.assertEqual(hyperparams_proto.activation, + conv_hyperparams_actual.activation) + self.assertFalse(is_training) + + def test_non_default_rfcn_box_predictor(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + box_predictor_text_proto = """ + rfcn_box_predictor { + num_spatial_bins_height: 4 + num_spatial_bins_width: 4 + depth: 4 + box_code_size: 3 + crop_height: 16 + crop_width: 16 + } + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(box_predictor_text_proto, box_predictor_proto) + box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._box_code_size, 3) + self.assertEqual(box_predictor._num_spatial_bins, [4, 4]) + self.assertEqual(box_predictor._crop_size, [16, 16]) + + def test_default_rfcn_box_predictor(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) + def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): + return (conv_hyperparams_arg, is_training) + + box_predictor_proto = box_predictor_pb2.BoxPredictor() + box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( + hyperparams_proto) + box_predictor = box_predictor_builder.build( + argscope_fn=mock_conv_argscope_builder, + box_predictor_config=box_predictor_proto, + is_training=True, + num_classes=90) + self.assertEqual(box_predictor.num_classes, 90) + self.assertTrue(box_predictor._is_training) + self.assertEqual(box_predictor._box_code_size, 4) + self.assertEqual(box_predictor._num_spatial_bins, [3, 3]) + self.assertEqual(box_predictor._crop_size, [12, 12]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..4adc170d3f1a203abc47dd74017c656ba967e74b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder.py @@ -0,0 +1,250 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tensorflow ops to calibrate class predictions and background class.""" + +import tensorflow.compat.v1 as tf +from object_detection.utils import shape_utils + + +def _find_interval_containing_new_value(x, new_value): + """Find the index of x (ascending-ordered) after which new_value occurs.""" + new_value_shape = shape_utils.combined_static_and_dynamic_shape(new_value)[0] + x_shape = shape_utils.combined_static_and_dynamic_shape(x)[0] + compare = tf.cast(tf.reshape(new_value, shape=(new_value_shape, 1)) >= + tf.reshape(x, shape=(1, x_shape)), + dtype=tf.int32) + diff = compare[:, 1:] - compare[:, :-1] + interval_idx = tf.argmin(diff, axis=1) + return interval_idx + + +def _tf_linear_interp1d(x_to_interpolate, fn_x, fn_y): + """Tensorflow implementation of 1d linear interpolation. + + Args: + x_to_interpolate: tf.float32 Tensor of shape (num_examples,) over which 1d + linear interpolation is performed. + fn_x: Monotonically-increasing, non-repeating tf.float32 Tensor of shape + (length,) used as the domain to approximate a function. + fn_y: tf.float32 Tensor of shape (length,) used as the range to approximate + a function. + + Returns: + tf.float32 Tensor of shape (num_examples,) + """ + x_pad = tf.concat([fn_x[:1] - 1, fn_x, fn_x[-1:] + 1], axis=0) + y_pad = tf.concat([fn_y[:1], fn_y, fn_y[-1:]], axis=0) + interval_idx = _find_interval_containing_new_value(x_pad, x_to_interpolate) + + # Interpolate + alpha = ( + (x_to_interpolate - tf.gather(x_pad, interval_idx)) / + (tf.gather(x_pad, interval_idx + 1) - tf.gather(x_pad, interval_idx))) + interpolation = ((1 - alpha) * tf.gather(y_pad, interval_idx) + + alpha * tf.gather(y_pad, interval_idx + 1)) + + return interpolation + + +def _function_approximation_proto_to_tf_tensors(x_y_pairs_message): + """Extracts (x,y) pairs from a XYPairs message. + + Args: + x_y_pairs_message: calibration_pb2..XYPairs proto + Returns: + tf_x: tf.float32 tensor of shape (number_xy_pairs,) for function domain. + tf_y: tf.float32 tensor of shape (number_xy_pairs,) for function range. + """ + tf_x = tf.convert_to_tensor([x_y_pair.x + for x_y_pair + in x_y_pairs_message.x_y_pair], + dtype=tf.float32) + tf_y = tf.convert_to_tensor([x_y_pair.y + for x_y_pair + in x_y_pairs_message.x_y_pair], + dtype=tf.float32) + return tf_x, tf_y + + +def _get_class_id_function_dict(calibration_config): + """Create a dictionary mapping class id to function approximations. + + Args: + calibration_config: calibration_pb2 proto containing + id_function_approximations. + Returns: + Dictionary mapping a class id to a tuple of TF tensors to be used for + function approximation. + """ + class_id_function_dict = {} + class_id_xy_pairs_map = ( + calibration_config.class_id_function_approximations.class_id_xy_pairs_map) + for class_id in class_id_xy_pairs_map: + class_id_function_dict[class_id] = ( + _function_approximation_proto_to_tf_tensors( + class_id_xy_pairs_map[class_id])) + + return class_id_function_dict + + +def build(calibration_config): + """Returns a function that calibrates Tensorflow model scores. + + All returned functions are expected to apply positive monotonic + transformations to inputs (i.e. score ordering is strictly preserved or + adjacent scores are mapped to the same score, but an input of lower value + should never be exceed an input of higher value after transformation). For + class-agnostic calibration, positive monotonicity should hold across all + scores. In class-specific cases, positive monotonicity should hold within each + class. + + Args: + calibration_config: calibration_pb2.CalibrationConfig proto. + Returns: + Function that that accepts class_predictions_with_background and calibrates + the output based on calibration_config's parameters. + Raises: + ValueError: No calibration builder defined for "Oneof" in + calibration_config. + """ + + # Linear Interpolation (usually used as a result of calibration via + # isotonic regression). + if calibration_config.WhichOneof('calibrator') == 'function_approximation': + + def calibration_fn(class_predictions_with_background): + """Calibrate predictions via 1-d linear interpolation. + + Predictions scores are linearly interpolated based on a class-agnostic + function approximation. Note that the 0-indexed background class is also + transformed. + + Args: + class_predictions_with_background: tf.float32 tensor of shape + [batch_size, num_anchors, num_classes + 1] containing scores on the + interval [0,1]. This is usually produced by a sigmoid or softmax layer + and the result of calling the `predict` method of a detection model. + + Returns: + tf.float32 tensor of the same shape as the input with values on the + interval [0, 1]. + """ + # Flattening Tensors and then reshaping at the end. + flat_class_predictions_with_background = tf.reshape( + class_predictions_with_background, shape=[-1]) + fn_x, fn_y = _function_approximation_proto_to_tf_tensors( + calibration_config.function_approximation.x_y_pairs) + updated_scores = _tf_linear_interp1d( + flat_class_predictions_with_background, fn_x, fn_y) + + # Un-flatten the scores + original_detections_shape = shape_utils.combined_static_and_dynamic_shape( + class_predictions_with_background) + calibrated_class_predictions_with_background = tf.reshape( + updated_scores, + shape=original_detections_shape, + name='calibrate_scores') + return calibrated_class_predictions_with_background + + elif (calibration_config.WhichOneof('calibrator') == + 'class_id_function_approximations'): + + def calibration_fn(class_predictions_with_background): + """Calibrate predictions per class via 1-d linear interpolation. + + Prediction scores are linearly interpolated with class-specific function + approximations. Note that after calibration, an anchor's class scores will + not necessarily sum to 1, and score ordering may change, depending on each + class' calibration parameters. + + Args: + class_predictions_with_background: tf.float32 tensor of shape + [batch_size, num_anchors, num_classes + 1] containing scores on the + interval [0,1]. This is usually produced by a sigmoid or softmax layer + and the result of calling the `predict` method of a detection model. + + Returns: + tf.float32 tensor of the same shape as the input with values on the + interval [0, 1]. + + Raises: + KeyError: Calibration parameters are not present for a class. + """ + class_id_function_dict = _get_class_id_function_dict(calibration_config) + + # Tensors are split by class and then recombined at the end to recover + # the input's original shape. If a class id does not have calibration + # parameters, it is left unchanged. + class_tensors = tf.unstack(class_predictions_with_background, axis=-1) + calibrated_class_tensors = [] + for class_id, class_tensor in enumerate(class_tensors): + flat_class_tensor = tf.reshape(class_tensor, shape=[-1]) + if class_id in class_id_function_dict: + output_tensor = _tf_linear_interp1d( + x_to_interpolate=flat_class_tensor, + fn_x=class_id_function_dict[class_id][0], + fn_y=class_id_function_dict[class_id][1]) + else: + tf.logging.info( + 'Calibration parameters for class id `%d` not not found', + class_id) + output_tensor = flat_class_tensor + calibrated_class_tensors.append(output_tensor) + + combined_calibrated_tensor = tf.stack(calibrated_class_tensors, axis=1) + input_shape = shape_utils.combined_static_and_dynamic_shape( + class_predictions_with_background) + calibrated_class_predictions_with_background = tf.reshape( + combined_calibrated_tensor, + shape=input_shape, + name='calibrate_scores') + return calibrated_class_predictions_with_background + + elif (calibration_config.WhichOneof('calibrator') == + 'temperature_scaling_calibration'): + + def calibration_fn(class_predictions_with_background): + """Calibrate predictions via temperature scaling. + + Predictions logits scores are scaled by the temperature scaler. Note that + the 0-indexed background class is also transformed. + + Args: + class_predictions_with_background: tf.float32 tensor of shape + [batch_size, num_anchors, num_classes + 1] containing logits scores. + This is usually produced before a sigmoid or softmax layer. + + Returns: + tf.float32 tensor of the same shape as the input. + + Raises: + ValueError: If temperature scaler is of incorrect value. + """ + scaler = calibration_config.temperature_scaling_calibration.scaler + if scaler <= 0: + raise ValueError('The scaler in temperature scaling must be positive.') + calibrated_class_predictions_with_background = tf.math.divide( + class_predictions_with_background, + scaler, + name='calibrate_score') + return calibrated_class_predictions_with_background + + # TODO(zbeaver): Add sigmoid calibration. + else: + raise ValueError('No calibration builder defined for "Oneof" in ' + 'calibration_config.') + + return calibration_fn diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ad0a212887396e529adecacbfb1c3374c0b04ef Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a81d53a86e65bc400fe38cac8c96867aa1489607 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/calibration_builder_test.py @@ -0,0 +1,233 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for calibration_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from scipy import interpolate +from six.moves import zip +import tensorflow.compat.v1 as tf +from object_detection.builders import calibration_builder +from object_detection.protos import calibration_pb2 +from object_detection.utils import test_case + + +class CalibrationBuilderTest(test_case.TestCase): + + def test_tf_linear_interp1d_map(self): + """Tests TF linear interpolation mapping to a single number.""" + def graph_fn(): + tf_x = tf.constant([0., 0.5, 1.]) + tf_y = tf.constant([0.5, 0.5, 0.5]) + new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.]) + tf_map_outputs = calibration_builder._tf_linear_interp1d( + new_x, tf_x, tf_y) + return tf_map_outputs + tf_map_outputs_np = self.execute(graph_fn, []) + self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5]) + + def test_tf_linear_interp1d_interpolate(self): + """Tests TF 1d linear interpolation not mapping to a single number.""" + def graph_fn(): + tf_x = tf.constant([0., 0.5, 1.]) + tf_y = tf.constant([0.6, 0.7, 1.0]) + new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.]) + tf_interpolate_outputs = calibration_builder._tf_linear_interp1d( + new_x, tf_x, tf_y) + return tf_interpolate_outputs + tf_interpolate_outputs_np = self.execute(graph_fn, []) + self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.]) + + @staticmethod + def _get_scipy_interp1d(new_x, x, y): + """Helper performing 1d linear interpolation using SciPy.""" + interpolation1d_fn = interpolate.interp1d(x, y) + return interpolation1d_fn(new_x) + + def _get_tf_interp1d(self, new_x, x, y): + """Helper performing 1d linear interpolation using Tensorflow.""" + def graph_fn(): + tf_interp_outputs = calibration_builder._tf_linear_interp1d( + tf.convert_to_tensor(new_x, dtype=tf.float32), + tf.convert_to_tensor(x, dtype=tf.float32), + tf.convert_to_tensor(y, dtype=tf.float32)) + return tf_interp_outputs + np_tf_interp_outputs = self.execute(graph_fn, []) + return np_tf_interp_outputs + + def test_tf_linear_interp1d_against_scipy_map(self): + """Tests parity of TF linear interpolation with SciPy for simple mapping.""" + length = 10 + np_x = np.linspace(0, 1, length) + + # Mapping all numbers to 0.5 + np_y_map = np.repeat(0.5, length) + + # Scipy and TF interpolations + test_data_np = np.linspace(0, 1, length * 10) + scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map) + np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map) + self.assertAllClose(scipy_map_outputs, np_tf_map_outputs) + + def test_tf_linear_interp1d_against_scipy_interpolate(self): + """Tests parity of TF linear interpolation with SciPy.""" + length = 10 + np_x = np.linspace(0, 1, length) + + # Requires interpolation over 0.5 to 1 domain + np_y_interp = np.linspace(0.5, 1, length) + + # Scipy interpolation for comparison + test_data_np = np.linspace(0, 1, length * 10) + scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x, + np_y_interp) + np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x, + np_y_interp) + self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs) + + @staticmethod + def _add_function_approximation_to_calibration_proto(calibration_proto, + x_array, y_array, + class_id): + """Adds a function approximation to calibration proto for a class id.""" + # Per-class calibration. + if class_id is not None: + function_approximation = ( + calibration_proto.class_id_function_approximations + .class_id_xy_pairs_map[class_id]) + # Class-agnostic calibration. + else: + function_approximation = ( + calibration_proto.function_approximation.x_y_pairs) + + for x, y in zip(x_array, y_array): + x_y_pair_message = function_approximation.x_y_pair.add() + x_y_pair_message.x = x + x_y_pair_message.y = y + + def test_class_agnostic_function_approximation(self): + """Tests that calibration produces correct class-agnostic values.""" + # Generate fake calibration proto. For this interpolation, any input on + # [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have + # 0.25 subtracted from it. + class_agnostic_x = np.asarray([0.0, 0.5, 1.0]) + class_agnostic_y = np.asarray([0.0, 0.25, 0.75]) + calibration_config = calibration_pb2.CalibrationConfig() + self._add_function_approximation_to_calibration_proto( + calibration_config, class_agnostic_x, class_agnostic_y, class_id=None) + + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2, 0.3], + [0.4, 0.5, 0.0]], + [[0.6, 0.7, 0.8], + [0.9, 1.0, 1.0]]], dtype=tf.float32) + + # Everything should map to 0.5 if classes are ignored. + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15], + [0.2, 0.25, 0.0]], + [[0.35, 0.45, 0.55], + [0.65, 0.75, 0.75]]]) + + def test_multiclass_function_approximations(self): + """Tests that calibration produces correct multiclass values.""" + # Background class (0-index) maps all predictions to 0.5. + class_0_x = np.asarray([0.0, 0.5, 1.0]) + class_0_y = np.asarray([0.5, 0.5, 0.5]) + calibration_config = calibration_pb2.CalibrationConfig() + self._add_function_approximation_to_calibration_proto( + calibration_config, class_0_x, class_0_y, class_id=0) + + # Class id 1 will interpolate using these values. + class_1_x = np.asarray([0.0, 0.2, 1.0]) + class_1_y = np.asarray([0.0, 0.6, 1.0]) + self._add_function_approximation_to_calibration_proto( + calibration_config, class_1_x, class_1_y, class_id=1) + + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2], [0.9, 0.1]], + [[0.6, 0.4], [0.08, 0.92]]], + dtype=tf.float32) + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]], + [[0.5, 0.7], [0.5, 0.96]]]) + + def test_temperature_scaling(self): + """Tests that calibration produces correct temperature scaling values.""" + calibration_config = calibration_pb2.CalibrationConfig() + calibration_config.temperature_scaling_calibration.scaler = 2.0 + + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]], + [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]], + dtype=tf.float32) + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, + [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]], + [[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]]) + + def test_temperature_scaling_incorrect_value_error(self): + calibration_config = calibration_pb2.CalibrationConfig() + calibration_config.temperature_scaling_calibration.scaler = 0 + + calibration_fn = calibration_builder.build(calibration_config) + class_predictions_with_background = tf.constant( + [[[0.1, 0.2, 0.3]]], dtype=tf.float32) + with self.assertRaises(ValueError): + calibration_fn(class_predictions_with_background) + + def test_skips_class_when_calibration_parameters_not_present(self): + """Tests that graph fails when parameters not present for all classes.""" + # Only adding calibration parameters for class id = 0, even though class id + # 1 is present in the data. + class_0_x = np.asarray([0.0, 0.5, 1.0]) + class_0_y = np.asarray([0.5, 0.5, 0.5]) + calibration_config = calibration_pb2.CalibrationConfig() + self._add_function_approximation_to_calibration_proto( + calibration_config, class_0_x, class_0_y, class_id=0) + def graph_fn(): + calibration_fn = calibration_builder.build(calibration_config) + # batch_size = 2, num_classes = 2, num_anchors = 2. + class_predictions_with_background = tf.constant( + [[[0.1, 0.2], [0.9, 0.1]], + [[0.6, 0.4], [0.08, 0.92]]], + dtype=tf.float32) + calibrated_scores = calibration_fn(class_predictions_with_background) + return calibrated_scores + calibrated_scores_np = self.execute(graph_fn, []) + self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]], + [[0.5, 0.4], [0.5, 0.92]]]) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f0dd3002b407b6de900a042dc1a81f4b7878e022 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder.py @@ -0,0 +1,250 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""tf.data.Dataset builder. + +Creates data sources for DetectionModels from an InputReader config. See +input_reader.proto for options. + +Note: If users wishes to also use their own InputReaders with the Object +Detection configuration framework, they should define their own builder function +that wraps the build function. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import tensorflow.compat.v1 as tf + +from object_detection.builders import decoder_builder +from object_detection.protos import input_reader_pb2 + + +def make_initializable_iterator(dataset): + """Creates an iterator, and initializes tables. + + This is useful in cases where make_one_shot_iterator wouldn't work because + the graph contains a hash table that needs to be initialized. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.data.Iterator`. + """ + iterator = dataset.make_initializable_iterator() + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator + + +def _read_dataset_internal(file_read_func, + input_files, + config, + filename_shard_fn=None): + """Reads a dataset, and handles repetition and shuffling. + + Args: + file_read_func: Function to use in tf_data.parallel_interleave, to read + every individual file into a tf.data.Dataset. + input_files: A list of file paths to read. + config: A input_reader_builder.InputReader object. + filename_shard_fn: optional, A function used to shard filenames across + replicas. This function takes as input a TF dataset of filenames and is + expected to return its sharded version. It is useful when the dataset is + being loaded on one of possibly many replicas and we want to evenly shard + the files between the replicas. + + Returns: + A tf.data.Dataset of (undecoded) tf-records based on config. + + Raises: + RuntimeError: If no files are found at the supplied path(s). + """ + filenames = tf.gfile.Glob(input_files) + tf.logging.info('Reading record datasets for input file: %s' % input_files) + tf.logging.info('Number of filenames to read: %s' % len(filenames)) + if not filenames: + raise RuntimeError('Did not find any input files matching the glob pattern ' + '{}'.format(input_files)) + num_readers = config.num_readers + if num_readers > len(filenames): + num_readers = len(filenames) + tf.logging.warning('num_readers has been reduced to %d to match input file ' + 'shards.' % num_readers) + filename_dataset = tf.data.Dataset.from_tensor_slices(filenames) + if config.shuffle: + filename_dataset = filename_dataset.shuffle( + config.filenames_shuffle_buffer_size) + elif num_readers > 1: + tf.logging.warning('`shuffle` is false, but the input data stream is ' + 'still slightly shuffled since `num_readers` > 1.') + if filename_shard_fn: + filename_dataset = filename_shard_fn(filename_dataset) + + filename_dataset = filename_dataset.repeat(config.num_epochs or None) + records_dataset = filename_dataset.apply( + tf.data.experimental.parallel_interleave( + file_read_func, + cycle_length=num_readers, + block_length=config.read_block_length, + sloppy=config.shuffle)) + if config.shuffle: + records_dataset = records_dataset.shuffle(config.shuffle_buffer_size) + return records_dataset + + +def read_dataset(file_read_func, input_files, config, filename_shard_fn=None): + """Reads multiple datasets with sampling. + + Args: + file_read_func: Function to use in tf_data.parallel_interleave, to read + every individual file into a tf.data.Dataset. + input_files: A list of file paths to read. + config: A input_reader_builder.InputReader object. + filename_shard_fn: optional, A function used to shard filenames across + replicas. This function takes as input a TF dataset of filenames and is + expected to return its sharded version. It is useful when the dataset is + being loaded on one of possibly many replicas and we want to evenly shard + the files between the replicas. + + Returns: + A tf.data.Dataset of (undecoded) tf-records based on config. + + Raises: + RuntimeError: If no files are found at the supplied path(s). + """ + if config.sample_from_datasets_weights: + tf.logging.info('Reading weighted datasets: %s' % input_files) + if len(input_files) != len(config.sample_from_datasets_weights): + raise ValueError('Expected the number of input files to be the same as ' + 'the number of dataset sample weights. But got ' + '[input_files, sample_from_datasets_weights]: [' + + input_files + ', ' + + str(config.sample_from_datasets_weights) + ']') + tf.logging.info('Sampling from datasets %s with weights %s' % + (input_files, config.sample_from_datasets_weights)) + records_datasets = [] + for input_file in input_files: + records_dataset = _read_dataset_internal(file_read_func, [input_file], + config, filename_shard_fn) + records_datasets.append(records_dataset) + dataset_weights = list(config.sample_from_datasets_weights) + return tf.data.experimental.sample_from_datasets(records_datasets, + dataset_weights) + else: + tf.logging.info('Reading unweighted datasets: %s' % input_files) + return _read_dataset_internal(file_read_func, input_files, config, + filename_shard_fn) + + +def shard_function_for_context(input_context): + """Returns a function that shards filenames based on the input context.""" + + if input_context is None: + return None + + def shard_fn(dataset): + return dataset.shard( + input_context.num_input_pipelines, input_context.input_pipeline_id) + + return shard_fn + + +def build(input_reader_config, batch_size=None, transform_input_data_fn=None, + input_context=None, reduce_to_frame_fn=None): + """Builds a tf.data.Dataset. + + Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all + records. Applies a padded batch to the resulting dataset. + + Args: + input_reader_config: A input_reader_pb2.InputReader object. + batch_size: Batch size. If batch size is None, no batching is performed. + transform_input_data_fn: Function to apply transformation to all records, + or None if no extra decoding is required. + input_context: optional, A tf.distribute.InputContext object used to + shard filenames and compute per-replica batch_size when this function + is being called per-replica. + reduce_to_frame_fn: Function that extracts frames from tf.SequenceExample + type input data. + + Returns: + A tf.data.Dataset based on the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + ValueError: If no input paths are specified. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + decoder = decoder_builder.build(input_reader_config) + + if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': + config = input_reader_config.tf_record_input_reader + if not config.input_path: + raise ValueError('At least one input path must be specified in ' + '`input_reader_config`.') + def dataset_map_fn(dataset, fn_to_map, batch_size=None, + input_reader_config=None): + """Handles whether or not to use the legacy map function. + + Args: + dataset: A tf.Dataset. + fn_to_map: The function to be mapped for that dataset. + batch_size: Batch size. If batch size is None, no batching is performed. + input_reader_config: A input_reader_pb2.InputReader object. + + Returns: + A tf.data.Dataset mapped with fn_to_map. + """ + if hasattr(dataset, 'map_with_legacy_function'): + if batch_size: + num_parallel_calls = batch_size * ( + input_reader_config.num_parallel_batches) + else: + num_parallel_calls = input_reader_config.num_parallel_map_calls + dataset = dataset.map_with_legacy_function( + fn_to_map, num_parallel_calls=num_parallel_calls) + else: + dataset = dataset.map(fn_to_map, tf.data.experimental.AUTOTUNE) + return dataset + shard_fn = shard_function_for_context(input_context) + if input_context is not None: + batch_size = input_context.get_per_replica_batch_size(batch_size) + dataset = read_dataset( + functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000), + config.input_path[:], input_reader_config, filename_shard_fn=shard_fn) + if input_reader_config.sample_1_of_n_examples > 1: + dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0) + # TODO(rathodv): make batch size a required argument once the old binaries + # are deleted. + dataset = dataset_map_fn(dataset, decoder.decode, batch_size, + input_reader_config) + if reduce_to_frame_fn: + dataset = reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, + input_reader_config) + if transform_input_data_fn is not None: + dataset = dataset_map_fn(dataset, transform_input_data_fn, + batch_size, input_reader_config) + if batch_size: + dataset = dataset.batch(batch_size, + drop_remainder=input_reader_config.drop_remainder) + dataset = dataset.prefetch(input_reader_config.num_prefetch_batches) + return dataset + + raise ValueError('Unsupported input_reader_config.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98d90c3e5132fa66597a2c29f44e79beb42b1000 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3358ab4f130417a76a1292600a467b3be30863 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/dataset_builder_test.py @@ -0,0 +1,716 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for dataset_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import dataset_builder +from object_detection.core import standard_fields as fields +from object_detection.dataset_tools import seq_example_util +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import test_case + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import lookup as contrib_lookup +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +def get_iterator_next_for_testing(dataset, is_tf2): + iterator = dataset.make_initializable_iterator() + if not is_tf2: + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator.get_next() + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) + return os.path.join(parent_path, 'data', + 'pet_label_map.pbtxt') + + +class DatasetBuilderTest(test_case.TestCase): + + def create_tf_record(self, has_additional_channels=False, num_shards=1, + num_examples_per_shard=1): + + def dummy_jpeg_fn(): + image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + additional_channels_tensor = np.random.randint( + 255, size=(4, 5, 1)).astype(np.uint8) + encoded_jpeg = tf.image.encode_jpeg(image_tensor) + encoded_additional_channels_jpeg = tf.image.encode_jpeg( + additional_channels_tensor) + + return encoded_jpeg, encoded_additional_channels_jpeg + + encoded_jpeg, encoded_additional_channels_jpeg = self.execute( + dummy_jpeg_fn, []) + + tmp_dir = self.get_temp_dir() + flat_mask = (4 * 5) * [1.0] + + for i in range(num_shards): + path = os.path.join(tmp_dir, '%05d.tfrecord' % i) + writer = tf.python_io.TFRecordWriter(path) + + for j in range(num_examples_per_shard): + if num_shards > 1: + source_id = (str(i) + '_' + str(j)).encode() + else: + source_id = str(j).encode() + + features = { + 'image/source_id': dataset_util.bytes_feature(source_id), + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': dataset_util.int64_feature(4), + 'image/width': dataset_util.int64_feature(5), + 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), + 'image/object/class/label': dataset_util.int64_list_feature([2]), + 'image/object/mask': dataset_util.float_list_feature(flat_mask), + } + + if has_additional_channels: + additional_channels_key = 'image/additional_channels/encoded' + features[additional_channels_key] = dataset_util.bytes_list_feature( + [encoded_additional_channels_jpeg] * 2) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(example.SerializeToString()) + + writer.close() + + return os.path.join(self.get_temp_dir(), '?????.tfrecord') + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + def graph_fn(): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + return encoded_images_list + + encoded_images = self.execute(graph_fn, []) + return encoded_images + + def create_tf_record_sequence_example(self): + path = os.path.join(self.get_temp_dir(), 'seq_tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + num_frames = 4 + image_height = 4 + image_width = 5 + image_source_ids = [str(i) for i in range(num_frames)] + with self.test_session(): + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_source_ids=image_source_ids, + image_format='JPEG', + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[]], # Frame 0. + [[0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], + [0.1, 0.1, 0.2, 0.2]], # Frame 2. + [[]], # Frame 3. + ], + label_strings=[ + [], # Frame 0. + ['Abyssinian'], # Frame 1. + ['Abyssinian', 'american_bulldog'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + writer.write(sequence_example_serialized) + writer.close() + return path + + def test_build_tf_record_input_reader(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1), + self.is_tf2()) + + output_dict = self.execute(graph_fn, []) + + self.assertNotIn( + fields.InputDataFields.groundtruth_instance_masks, output_dict) + self.assertEqual((1, 4, 5, 3), + output_dict[fields.InputDataFields.image].shape) + self.assertAllEqual([[2]], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) + + def get_mock_reduce_to_frame_fn(self): + def mock_reduce_to_frame_fn(dataset, dataset_map_fn, batch_size, config): + def get_frame(tensor_dict): + out_tensor_dict = {} + out_tensor_dict[fields.InputDataFields.source_id] = ( + tensor_dict[fields.InputDataFields.source_id][0]) + return out_tensor_dict + return dataset_map_fn(dataset, get_frame, batch_size, config) + return mock_reduce_to_frame_fn + + def test_build_tf_record_input_reader_sequence_example_train(self): + tf_record_path = self.create_tf_record_sequence_example() + label_map_path = _get_labelmap_path() + input_type = 'TF_SEQUENCE_EXAMPLE' + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + input_type: {1} + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path, input_type) + input_reader_proto = input_reader_pb2.InputReader() + input_reader_proto.label_map_path = label_map_path + text_format.Merge(input_reader_text_proto, input_reader_proto) + reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn() + + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1, + reduce_to_frame_fn=reduce_to_frame_fn), + self.is_tf2()) + + output_dict = self.execute(graph_fn, []) + + self.assertEqual((1,), + output_dict[fields.InputDataFields.source_id].shape) + + def test_build_tf_record_input_reader_sequence_example_test(self): + tf_record_path = self.create_tf_record_sequence_example() + input_type = 'TF_SEQUENCE_EXAMPLE' + label_map_path = _get_labelmap_path() + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + input_type: {1} + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path, input_type) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + input_reader_proto.label_map_path = label_map_path + reduce_to_frame_fn = self.get_mock_reduce_to_frame_fn() + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1, + reduce_to_frame_fn=reduce_to_frame_fn), + self.is_tf2()) + + output_dict = self.execute(graph_fn, []) + + self.assertEqual((1,), + output_dict[fields.InputDataFields.source_id].shape) + + def test_build_tf_record_input_reader_and_load_instance_masks(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + return get_iterator_next_for_testing( + dataset_builder.build(input_reader_proto, batch_size=1), + self.is_tf2() + ) + + output_dict = self.execute(graph_fn, []) + self.assertAllEqual( + (1, 1, 4, 5), + output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) + + def test_build_tf_record_input_reader_with_batch_size_two(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def one_hot_class_encoding_fn(tensor_dict): + tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( + tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3) + return tensor_dict + + def graph_fn(): + return dataset_builder.make_initializable_iterator( + dataset_builder.build( + input_reader_proto, + transform_input_data_fn=one_hot_class_encoding_fn, + batch_size=2)).get_next() + + output_dict = self.execute(graph_fn, []) + + self.assertAllEqual([2, 4, 5, 3], + output_dict[fields.InputDataFields.image].shape) + self.assertAllEqual( + [2, 1, 3], + output_dict[fields.InputDataFields.groundtruth_classes].shape) + self.assertAllEqual( + [2, 1, 4], output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual([[[0.0, 0.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0]]], + output_dict[fields.InputDataFields.groundtruth_boxes]) + + def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def one_hot_class_encoding_fn(tensor_dict): + tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( + tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3) + return tensor_dict + + def graph_fn(): + return dataset_builder.make_initializable_iterator( + dataset_builder.build( + input_reader_proto, + transform_input_data_fn=one_hot_class_encoding_fn, + batch_size=2)).get_next() + + output_dict = self.execute(graph_fn, []) + + self.assertAllEqual( + [2, 1, 4, 5], + output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) + + def test_raises_error_with_no_input_paths(self): + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + with self.assertRaises(ValueError): + dataset_builder.build(input_reader_proto, batch_size=1) + + def test_sample_all_data(self): + tf_record_path = self.create_tf_record(num_examples_per_shard=2) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + sample_1_of_n_examples: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + dataset = dataset_builder.build(input_reader_proto, batch_size=1) + sample1_ds = dataset.take(1) + sample2_ds = dataset.skip(1) + iter1 = dataset_builder.make_initializable_iterator(sample1_ds) + iter2 = dataset_builder.make_initializable_iterator(sample2_ds) + + return iter1.get_next(), iter2.get_next() + + output_dict1, output_dict2 = self.execute(graph_fn, []) + self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id]) + self.assertEqual([b'1'], output_dict2[fields.InputDataFields.source_id]) + + def test_sample_one_of_n_shards(self): + tf_record_path = self.create_tf_record(num_examples_per_shard=4) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + sample_1_of_n_examples: 2 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + def graph_fn(): + dataset = dataset_builder.build(input_reader_proto, batch_size=1) + sample1_ds = dataset.take(1) + sample2_ds = dataset.skip(1) + iter1 = dataset_builder.make_initializable_iterator(sample1_ds) + iter2 = dataset_builder.make_initializable_iterator(sample2_ds) + + return iter1.get_next(), iter2.get_next() + + output_dict1, output_dict2 = self.execute(graph_fn, []) + self.assertAllEqual([b'0'], output_dict1[fields.InputDataFields.source_id]) + self.assertEqual([b'2'], output_dict2[fields.InputDataFields.source_id]) + + def test_no_input_context(self): + """Test that all samples are read with no input context given.""" + tf_record_path = self.create_tf_record(num_examples_per_shard=16, + num_shards=2) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + num_epochs: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + for i in range(4): + + # pylint:disable=cell-var-from-loop + def graph_fn(): + dataset = dataset_builder.build(input_reader_proto, batch_size=8) + dataset = dataset.skip(i) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + batch = self.execute(graph_fn, []) + self.assertEqual(batch['image'].shape, (8, 4, 5, 3)) + + def graph_fn_last_batch(): + dataset = dataset_builder.build(input_reader_proto, batch_size=8) + dataset = dataset.skip(4) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + self.assertRaises(tf.errors.OutOfRangeError, self.execute, + compute_fn=graph_fn_last_batch, inputs=[]) + + def test_with_input_context(self): + """Test that a subset is read with input context given.""" + tf_record_path = self.create_tf_record(num_examples_per_shard=16, + num_shards=2) + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + num_epochs: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + + input_context = tf.distribute.InputContext( + num_input_pipelines=2, input_pipeline_id=0, num_replicas_in_sync=4 + ) + + for i in range(8): + + # pylint:disable=cell-var-from-loop + def graph_fn(): + + dataset = dataset_builder.build(input_reader_proto, batch_size=8, + input_context=input_context) + dataset = dataset.skip(i) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + batch = self.execute(graph_fn, []) + self.assertEqual(batch['image'].shape, (2, 4, 5, 3)) + + def graph_fn_last_batch(): + dataset = dataset_builder.build(input_reader_proto, batch_size=8, + input_context=input_context) + dataset = dataset.skip(8) + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + self.assertRaises(tf.errors.OutOfRangeError, self.execute, + compute_fn=graph_fn_last_batch, inputs=[]) + + +class ReadDatasetTest(test_case.TestCase): + + def setUp(self): + self._path_template = os.path.join(self.get_temp_dir(), 'examples_%s.txt') + + for i in range(5): + path = self._path_template % i + with tf.gfile.Open(path, 'wb') as f: + f.write('\n'.join([str(i + 1), str((i + 1) * 10)])) + + self._shuffle_path_template = os.path.join(self.get_temp_dir(), + 'shuffle_%s.txt') + for i in range(2): + path = self._shuffle_path_template % i + with tf.gfile.Open(path, 'wb') as f: + f.write('\n'.join([str(i)] * 5)) + + super(ReadDatasetTest, self).setUp() + + def _get_dataset_next(self, files, config, batch_size, num_batches_skip=0): + + def decode_func(value): + return [tf.string_to_number(value, out_type=tf.int32)] + + dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, + config) + dataset = dataset.map(decode_func) + dataset = dataset.batch(batch_size) + + if num_batches_skip > 0: + dataset = dataset.skip(num_batches_skip) + + return get_iterator_next_for_testing(dataset, self.is_tf2()) + + def _assert_item_count(self, data, item, percentage): + self.assertAlmostEqual(data.count(item)/len(data), percentage, places=1) + + def test_make_initializable_iterator_with_hashTable(self): + + def graph_fn(): + keys = [1, 0, -1] + dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]]) + try: + # Dynamically try to load the tf v2 lookup, falling back to contrib + lookup = tf.compat.v2.lookup + hash_table_class = tf.compat.v2.lookup.StaticHashTable + except AttributeError: + lookup = contrib_lookup + hash_table_class = contrib_lookup.HashTable + table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=keys, values=list(reversed(keys))), + default_value=100) + dataset = dataset.map(table.lookup) + return dataset_builder.make_initializable_iterator(dataset).get_next() + + result = self.execute(graph_fn, []) + self.assertAllEqual(result, [-1, 100, 1, 100]) + + def test_read_dataset_sample_from_datasets_weights_equal_weight(self): + """Ensure that the files' values are equally-weighted.""" + config = input_reader_pb2.InputReader() + config.num_readers = 2 + config.shuffle = False + config.sample_from_datasets_weights.extend([0.5, 0.5]) + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '0', self._path_template % '1'], + config, + batch_size=1000) + + data = list(self.execute(graph_fn, [])) + self.assertEqual(len(data), 1000) + self._assert_item_count(data, 1, 0.25) + self._assert_item_count(data, 10, 0.25) + self._assert_item_count(data, 2, 0.25) + self._assert_item_count(data, 20, 0.25) + + def test_read_dataset_sample_from_datasets_weights_zero_weight(self): + """Ensure that the files' values are equally-weighted.""" + config = input_reader_pb2.InputReader() + config.num_readers = 2 + config.shuffle = False + config.sample_from_datasets_weights.extend([1.0, 0.0]) + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '0', self._path_template % '1'], + config, + batch_size=1000) + + data = list(self.execute(graph_fn, [])) + self.assertEqual(len(data), 1000) + self._assert_item_count(data, 1, 0.5) + self._assert_item_count(data, 10, 0.5) + self._assert_item_count(data, 2, 0.0) + self._assert_item_count(data, 20, 0.0) + + def test_read_dataset_sample_from_datasets_weights_unbalanced(self): + """Ensure that the files' values are equally-weighted.""" + config = input_reader_pb2.InputReader() + config.num_readers = 2 + config.shuffle = False + config.sample_from_datasets_weights.extend([0.1, 0.9]) + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '0', self._path_template % '1'], + config, + batch_size=1000) + + data = list(self.execute(graph_fn, [])) + self.assertEqual(len(data), 1000) + self._assert_item_count(data, 1, 0.05) + self._assert_item_count(data, 10, 0.05) + self._assert_item_count(data, 2, 0.45) + self._assert_item_count(data, 20, 0.45) + + def test_read_dataset(self): + config = input_reader_pb2.InputReader() + config.num_readers = 1 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '*'], config, batch_size=20) + + data = self.execute(graph_fn, []) + # Note that the execute function extracts single outputs if the return + # value is of size 1. + self.assertCountEqual( + data, [ + 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5, + 50 + ]) + + def test_reduce_num_reader(self): + config = input_reader_pb2.InputReader() + config.num_readers = 10 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '*'], config, batch_size=20) + + data = self.execute(graph_fn, []) + # Note that the execute function extracts single outputs if the return + # value is of size 1. + self.assertCountEqual( + data, [ + 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5, + 50 + ]) + + def test_enable_shuffle(self): + config = input_reader_pb2.InputReader() + config.num_readers = 1 + config.shuffle = True + + tf.set_random_seed(1) # Set graph level seed. + + def graph_fn(): + return self._get_dataset_next( + [self._shuffle_path_template % '*'], config, batch_size=10) + expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] + data = self.execute(graph_fn, []) + + self.assertTrue( + np.any(np.not_equal(data, expected_non_shuffle_output))) + + def test_disable_shuffle_(self): + config = input_reader_pb2.InputReader() + config.num_readers = 1 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._shuffle_path_template % '*'], config, batch_size=10) + expected_non_shuffle_output1 = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] + expected_non_shuffle_output2 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0] + + # Note that the execute function extracts single outputs if the return + # value is of size 1. + data = self.execute(graph_fn, []) + self.assertTrue(all(data == expected_non_shuffle_output1) or + all(data == expected_non_shuffle_output2)) + + def test_read_dataset_single_epoch(self): + config = input_reader_pb2.InputReader() + config.num_epochs = 1 + config.num_readers = 1 + config.shuffle = False + + def graph_fn(): + return self._get_dataset_next( + [self._path_template % '0'], config, batch_size=30) + + data = self.execute(graph_fn, []) + + # Note that the execute function extracts single outputs if the return + # value is of size 1. + self.assertAllEqual(data, [1, 10]) + + # First batch will retrieve as much as it can, second batch will fail. + def graph_fn_second_batch(): + return self._get_dataset_next( + [self._path_template % '0'], config, batch_size=30, + num_batches_skip=1) + + self.assertRaises(tf.errors.OutOfRangeError, self.execute, + compute_fn=graph_fn_second_batch, inputs=[]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf92a83de1e51199d92fccdd365617630636dd1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder.py @@ -0,0 +1,72 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DataDecoder builder. + +Creates DataDecoders from InputReader configs. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from object_detection.data_decoders import tf_example_decoder +from object_detection.data_decoders import tf_sequence_example_decoder +from object_detection.protos import input_reader_pb2 + + +def build(input_reader_config): + """Builds a DataDecoder based only on the open source config proto. + + Args: + input_reader_config: An input_reader_pb2.InputReader object. + + Returns: + A DataDecoder based on the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': + label_map_proto_file = None + if input_reader_config.HasField('label_map_path'): + label_map_proto_file = input_reader_config.label_map_path + input_type = input_reader_config.input_type + if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'): + decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=input_reader_config.load_instance_masks, + load_multiclass_scores=input_reader_config.load_multiclass_scores, + load_context_features=input_reader_config.load_context_features, + instance_mask_type=input_reader_config.mask_type, + label_map_proto_file=label_map_proto_file, + use_display_name=input_reader_config.use_display_name, + num_additional_channels=input_reader_config.num_additional_channels, + num_keypoints=input_reader_config.num_keypoints, + expand_hierarchy_labels=input_reader_config.expand_labels_hierarchy, + load_dense_pose=input_reader_config.load_dense_pose, + load_track_id=input_reader_config.load_track_id) + return decoder + elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'): + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file, + load_context_features=input_reader_config.load_context_features) + return decoder + raise ValueError('Unsupported input_type in config.') + + raise ValueError('Unsupported input_reader_config.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77f2ff49a04fcf5b89ac4306632daf6b98ca9ea2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d45285fd19f7648ab4d9365b155ba35a2ce0d3ed --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/decoder_builder_test.py @@ -0,0 +1,193 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for decoder_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import decoder_builder +from object_detection.core import standard_fields as fields +from object_detection.dataset_tools import seq_example_util +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import test_case + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) + return os.path.join(parent_path, 'data', + 'pet_label_map.pbtxt') + + +class DecoderBuilderTest(test_case.TestCase): + + def _make_serialized_tf_example(self, has_additional_channels=False): + image_tensor_np = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + additional_channels_tensor_np = np.random.randint( + 255, size=(4, 5, 1)).astype(np.uint8) + flat_mask = (4 * 5) * [1.0] + def graph_fn(image_tensor): + encoded_jpeg = tf.image.encode_jpeg(image_tensor) + return encoded_jpeg + encoded_jpeg = self.execute_cpu(graph_fn, [image_tensor_np]) + encoded_additional_channels_jpeg = self.execute_cpu( + graph_fn, [additional_channels_tensor_np]) + + features = { + 'image/source_id': dataset_util.bytes_feature('0'.encode()), + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': dataset_util.int64_feature(4), + 'image/width': dataset_util.int64_feature(5), + 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), + 'image/object/class/label': dataset_util.int64_list_feature([2]), + 'image/object/mask': dataset_util.float_list_feature(flat_mask), + } + if has_additional_channels: + additional_channels_key = 'image/additional_channels/encoded' + features[additional_channels_key] = dataset_util.bytes_list_feature( + [encoded_additional_channels_jpeg] * 2) + example = tf.train.Example(features=tf.train.Features(feature=features)) + return example.SerializeToString() + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + def graph_fn(): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images = [tf.io.encode_jpeg(image) for image in images_list] + return encoded_images + return self.execute_cpu(graph_fn, []) + + def _make_serialized_tf_sequence_example(self): + num_frames = 4 + image_height = 20 + image_width = 30 + image_source_ids = [str(i) for i in range(num_frames)] + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_source_ids=image_source_ids, + image_format='JPEG', + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[]], # Frame 0. + [[0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], + [0.1, 0.1, 0.2, 0.2]], # Frame 2. + [[]], # Frame 3. + ], + label_strings=[ + [], # Frame 0. + ['Abyssinian'], # Frame 1. + ['Abyssinian', 'american_bulldog'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + return sequence_example_serialized + + def test_build_tf_record_input_reader(self): + input_reader_text_proto = 'tf_record_input_reader {}' + input_reader_proto = input_reader_pb2.InputReader() + text_format.Parse(input_reader_text_proto, input_reader_proto) + + decoder = decoder_builder.build(input_reader_proto) + serialized_seq_example = self._make_serialized_tf_example() + def graph_fn(): + tensor_dict = decoder.decode(serialized_seq_example) + return (tensor_dict[fields.InputDataFields.image], + tensor_dict[fields.InputDataFields.groundtruth_classes], + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + (image, groundtruth_classes, + groundtruth_boxes) = self.execute_cpu(graph_fn, []) + self.assertEqual((4, 5, 3), image.shape) + self.assertAllEqual([2], groundtruth_classes) + self.assertEqual((1, 4), groundtruth_boxes.shape) + self.assertAllEqual([0.0, 0.0, 1.0, 1.0], groundtruth_boxes[0]) + + def test_build_tf_record_input_reader_sequence_example(self): + label_map_path = _get_labelmap_path() + input_reader_text_proto = """ + input_type: TF_SEQUENCE_EXAMPLE + tf_record_input_reader {} + """ + input_reader_proto = input_reader_pb2.InputReader() + input_reader_proto.label_map_path = label_map_path + text_format.Parse(input_reader_text_proto, input_reader_proto) + + serialized_seq_example = self._make_serialized_tf_sequence_example() + def graph_fn(): + decoder = decoder_builder.build(input_reader_proto) + tensor_dict = decoder.decode(serialized_seq_example) + return (tensor_dict[fields.InputDataFields.image], + tensor_dict[fields.InputDataFields.groundtruth_classes], + tensor_dict[fields.InputDataFields.groundtruth_boxes], + tensor_dict[fields.InputDataFields.num_groundtruth_boxes]) + (actual_image, actual_groundtruth_classes, actual_groundtruth_boxes, + actual_num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) + expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]] + expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] + expected_num_groundtruth_boxes = [0, 1, 2, 0] + + # Sequence example images are encoded. + self.assertEqual((4,), actual_image.shape) + self.assertAllEqual(expected_groundtruth_classes, + actual_groundtruth_classes) + self.assertAllClose(expected_groundtruth_boxes, + actual_groundtruth_boxes) + self.assertAllClose( + expected_num_groundtruth_boxes, actual_num_groundtruth_boxes) + + def test_build_tf_record_input_reader_and_load_instance_masks(self): + input_reader_text_proto = """ + load_instance_masks: true + tf_record_input_reader {} + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Parse(input_reader_text_proto, input_reader_proto) + + decoder = decoder_builder.build(input_reader_proto) + serialized_seq_example = self._make_serialized_tf_example() + def graph_fn(): + tensor_dict = decoder.decode(serialized_seq_example) + return tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + masks = self.execute_cpu(graph_fn, []) + self.assertAllEqual((1, 4, 5), masks.shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..9cbeb4a1f687a6c194d3d8226fea1629e2f34a18 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder.py @@ -0,0 +1,53 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for quantized training and evaluation.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import quantize as contrib_quantize +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +def build(graph_rewriter_config, is_training): + """Returns a function that modifies default graph based on options. + + Args: + graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto. + is_training: whether in training of eval mode. + """ + def graph_rewrite_fn(): + """Function to quantize weights and activation of the default graph.""" + if (graph_rewriter_config.quantization.weight_bits != 8 or + graph_rewriter_config.quantization.activation_bits != 8): + raise ValueError('Only 8bit quantization is supported') + + # Quantize the graph by inserting quantize ops for weights and activations + if is_training: + contrib_quantize.experimental_create_training_graph( + input_graph=tf.get_default_graph(), + quant_delay=graph_rewriter_config.quantization.delay + ) + else: + contrib_quantize.experimental_create_eval_graph( + input_graph=tf.get_default_graph() + ) + slim.summarize_collection('quant_vars') + + return graph_rewrite_fn diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02cab7cce4fb231303528bc0b34a4613db491607 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bec3cf8348f3c3803f2b2f2265e948426d363b22 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/graph_rewriter_builder_tf1_test.py @@ -0,0 +1,67 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for graph_rewriter_builder.""" +import unittest +from unittest import mock # pylint: disable=g-importing-member +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.builders import graph_rewriter_builder +from object_detection.protos import graph_rewriter_pb2 +from object_detection.utils import tf_version + + +if tf_version.is_tf1(): + from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class QuantizationBuilderTest(tf.test.TestCase): + + def testQuantizationBuilderSetsUpCorrectTrainArguments(self): + with mock.patch.object( + contrib_quantize, + 'experimental_create_training_graph') as mock_quant_fn: + with mock.patch.object(slim, + 'summarize_collection') as mock_summarize_col: + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewriter_proto.quantization.weight_bits = 8 + graph_rewriter_proto.quantization.activation_bits = 8 + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, is_training=True) + graph_rewrite_fn() + _, kwargs = mock_quant_fn.call_args + self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) + self.assertEqual(kwargs['quant_delay'], 10) + mock_summarize_col.assert_called_with('quant_vars') + + def testQuantizationBuilderSetsUpCorrectEvalArguments(self): + with mock.patch.object(contrib_quantize, + 'experimental_create_eval_graph') as mock_quant_fn: + with mock.patch.object(slim, + 'summarize_collection') as mock_summarize_col: + graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_proto.quantization.delay = 10 + graph_rewrite_fn = graph_rewriter_builder.build( + graph_rewriter_proto, is_training=False) + graph_rewrite_fn() + _, kwargs = mock_quant_fn.call_args + self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) + mock_summarize_col.assert_called_with('quant_vars') + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..90aef43ac1bd92fb86dbd730cdb0420858572c18 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder.py @@ -0,0 +1,434 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder function to construct tf-slim arg_scope for convolution, fc ops.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.core import freezable_batch_norm +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import context_manager + +# pylint: enable=g-import-not-at-top + + +class KerasLayerHyperparams(object): + """ + A hyperparameter configuration object for Keras layers used in + Object Detection models. + """ + + def __init__(self, hyperparams_config): + """Builds keras hyperparameter config for layers based on the proto config. + + It automatically converts from Slim layer hyperparameter configs to + Keras layer hyperparameters. Namely, it: + - Builds Keras initializers/regularizers instead of Slim ones + - sets weights_regularizer/initializer to kernel_regularizer/initializer + - converts batchnorm decay to momentum + - converts Slim l2 regularizer weights to the equivalent Keras l2 weights + + Contains a hyperparameter configuration for ops that specifies kernel + initializer, kernel regularizer, activation. Also contains parameters for + batch norm operators based on the configuration. + + Note that if the batch_norm parameters are not specified in the config + (i.e. left to default) then batch norm is excluded from the config. + + Args: + hyperparams_config: hyperparams.proto object containing + hyperparameters. + + Raises: + ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. + """ + if not isinstance(hyperparams_config, + hyperparams_pb2.Hyperparams): + raise ValueError('hyperparams_config not of type ' + 'hyperparams_pb.Hyperparams.') + + self._batch_norm_params = None + if hyperparams_config.HasField('batch_norm'): + self._batch_norm_params = _build_keras_batch_norm_params( + hyperparams_config.batch_norm) + + self._force_use_bias = hyperparams_config.force_use_bias + self._activation_fn = _build_activation_fn(hyperparams_config.activation) + # TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv + # (Those might use depthwise_* instead of kernel_*) + # We should probably switch to using build_conv2d_layer and + # build_depthwise_conv2d_layer methods instead. + self._op_params = { + 'kernel_regularizer': _build_keras_regularizer( + hyperparams_config.regularizer), + 'kernel_initializer': _build_initializer( + hyperparams_config.initializer, build_for_keras=True), + 'activation': _build_activation_fn(hyperparams_config.activation) + } + + def use_batch_norm(self): + return self._batch_norm_params is not None + + def force_use_bias(self): + return self._force_use_bias + + def use_bias(self): + return (self._force_use_bias or not + (self.use_batch_norm() and self.batch_norm_params()['center'])) + + def batch_norm_params(self, **overrides): + """Returns a dict containing batchnorm layer construction hyperparameters. + + Optionally overrides values in the batchnorm hyperparam dict. Overrides + only apply to individual calls of this method, and do not affect + future calls. + + Args: + **overrides: keyword arguments to override in the hyperparams dictionary + + Returns: dict containing the layer construction keyword arguments, with + values overridden by the `overrides` keyword arguments. + """ + if self._batch_norm_params is None: + new_batch_norm_params = dict() + else: + new_batch_norm_params = self._batch_norm_params.copy() + new_batch_norm_params.update(overrides) + return new_batch_norm_params + + def build_batch_norm(self, training=None, **overrides): + """Returns a Batch Normalization layer with the appropriate hyperparams. + + If the hyperparams are configured to not use batch normalization, + this will return a Keras Lambda layer that only applies tf.Identity, + without doing any normalization. + + Optionally overrides values in the batch_norm hyperparam dict. Overrides + only apply to individual calls of this method, and do not affect + future calls. + + Args: + training: if True, the normalization layer will normalize using the batch + statistics. If False, the normalization layer will be frozen and will + act as if it is being used for inference. If None, the layer + will look up the Keras learning phase at `call` time to decide what to + do. + **overrides: batch normalization construction args to override from the + batch_norm hyperparams dictionary. + + Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True), + or a Keras Lambda layer that applies the identity (if use_batch_norm() + is False) + """ + if self.use_batch_norm(): + return freezable_batch_norm.FreezableBatchNorm( + training=training, + **self.batch_norm_params(**overrides) + ) + else: + return tf.keras.layers.Lambda(tf.identity) + + def build_activation_layer(self, name='activation'): + """Returns a Keras layer that applies the desired activation function. + + Args: + name: The name to assign the Keras layer. + Returns: A Keras lambda layer that applies the activation function + specified in the hyperparam config, or applies the identity if the + activation function is None. + """ + if self._activation_fn: + return tf.keras.layers.Lambda(self._activation_fn, name=name) + else: + return tf.keras.layers.Lambda(tf.identity, name=name) + + def params(self, include_activation=False, **overrides): + """Returns a dict containing the layer construction hyperparameters to use. + + Optionally overrides values in the returned dict. Overrides + only apply to individual calls of this method, and do not affect + future calls. + + Args: + include_activation: If False, activation in the returned dictionary will + be set to `None`, and the activation must be applied via a separate + layer created by `build_activation_layer`. If True, `activation` in the + output param dictionary will be set to the activation function + specified in the hyperparams config. + **overrides: keyword arguments to override in the hyperparams dictionary. + + Returns: dict containing the layer construction keyword arguments, with + values overridden by the `overrides` keyword arguments. + """ + new_params = self._op_params.copy() + new_params['activation'] = None + if include_activation: + new_params['activation'] = self._activation_fn + new_params['use_bias'] = self.use_bias() + new_params.update(**overrides) + return new_params + + +def build(hyperparams_config, is_training): + """Builds tf-slim arg_scope for convolution ops based on the config. + + Returns an arg_scope to use for convolution ops containing weights + initializer, weights regularizer, activation function, batch norm function + and batch norm parameters based on the configuration. + + Note that if no normalization parameters are specified in the config, + (i.e. left to default) then both batch norm and group norm are excluded + from the arg_scope. + + The batch norm parameters are set for updates based on `is_training` argument + and conv_hyperparams_config.batch_norm.train parameter. During training, they + are updated only if batch_norm.train parameter is true. However, during eval, + no updates are made to the batch norm variables. In both cases, their current + values are used during forward pass. + + Args: + hyperparams_config: hyperparams.proto object containing + hyperparameters. + is_training: Whether the network is in training mode. + + Returns: + arg_scope_fn: A function to construct tf-slim arg_scope containing + hyperparameters for ops. + + Raises: + ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. + """ + if not isinstance(hyperparams_config, + hyperparams_pb2.Hyperparams): + raise ValueError('hyperparams_config not of type ' + 'hyperparams_pb.Hyperparams.') + + if hyperparams_config.force_use_bias: + raise ValueError('Hyperparams force_use_bias only supported by ' + 'KerasLayerHyperparams.') + + normalizer_fn = None + batch_norm_params = None + if hyperparams_config.HasField('batch_norm'): + normalizer_fn = slim.batch_norm + batch_norm_params = _build_batch_norm_params( + hyperparams_config.batch_norm, is_training) + if hyperparams_config.HasField('group_norm'): + normalizer_fn = slim.group_norm + affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose] + if hyperparams_config.HasField('op') and ( + hyperparams_config.op == hyperparams_pb2.Hyperparams.FC): + affected_ops = [slim.fully_connected] + def scope_fn(): + with (slim.arg_scope([slim.batch_norm], **batch_norm_params) + if batch_norm_params is not None else + context_manager.IdentityContextManager()): + with slim.arg_scope( + affected_ops, + weights_regularizer=_build_slim_regularizer( + hyperparams_config.regularizer), + weights_initializer=_build_initializer( + hyperparams_config.initializer), + activation_fn=_build_activation_fn(hyperparams_config.activation), + normalizer_fn=normalizer_fn) as sc: + return sc + + return scope_fn + + +def _build_activation_fn(activation_fn): + """Builds a callable activation from config. + + Args: + activation_fn: hyperparams_pb2.Hyperparams.activation + + Returns: + Callable activation function. + + Raises: + ValueError: On unknown activation function. + """ + if activation_fn == hyperparams_pb2.Hyperparams.NONE: + return None + if activation_fn == hyperparams_pb2.Hyperparams.RELU: + return tf.nn.relu + if activation_fn == hyperparams_pb2.Hyperparams.RELU_6: + return tf.nn.relu6 + if activation_fn == hyperparams_pb2.Hyperparams.SWISH: + return tf.nn.swish + raise ValueError('Unknown activation function: {}'.format(activation_fn)) + + +def _build_slim_regularizer(regularizer): + """Builds a tf-slim regularizer from config. + + Args: + regularizer: hyperparams_pb2.Hyperparams.regularizer proto. + + Returns: + tf-slim regularizer. + + Raises: + ValueError: On unknown regularizer. + """ + regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') + if regularizer_oneof == 'l1_regularizer': + return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight)) + if regularizer_oneof == 'l2_regularizer': + return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight)) + if regularizer_oneof is None: + return None + raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) + + +def _build_keras_regularizer(regularizer): + """Builds a keras regularizer from config. + + Args: + regularizer: hyperparams_pb2.Hyperparams.regularizer proto. + + Returns: + Keras regularizer. + + Raises: + ValueError: On unknown regularizer. + """ + regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') + if regularizer_oneof == 'l1_regularizer': + return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight)) + if regularizer_oneof == 'l2_regularizer': + # The Keras L2 regularizer weight differs from the Slim L2 regularizer + # weight by a factor of 2 + return tf.keras.regularizers.l2( + float(regularizer.l2_regularizer.weight * 0.5)) + if regularizer_oneof is None: + return None + raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) + + +def _build_initializer(initializer, build_for_keras=False): + """Build a tf initializer from config. + + Args: + initializer: hyperparams_pb2.Hyperparams.regularizer proto. + build_for_keras: Whether the initializers should be built for Keras + operators. If false builds for Slim. + + Returns: + tf initializer. + + Raises: + ValueError: On unknown initializer. + """ + initializer_oneof = initializer.WhichOneof('initializer_oneof') + if initializer_oneof == 'truncated_normal_initializer': + return tf.truncated_normal_initializer( + mean=initializer.truncated_normal_initializer.mean, + stddev=initializer.truncated_normal_initializer.stddev) + if initializer_oneof == 'random_normal_initializer': + return tf.random_normal_initializer( + mean=initializer.random_normal_initializer.mean, + stddev=initializer.random_normal_initializer.stddev) + if initializer_oneof == 'variance_scaling_initializer': + enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer. + DESCRIPTOR.enum_types_by_name['Mode']) + mode = enum_descriptor.values_by_number[initializer. + variance_scaling_initializer. + mode].name + if build_for_keras: + if initializer.variance_scaling_initializer.uniform: + return tf.variance_scaling_initializer( + scale=initializer.variance_scaling_initializer.factor, + mode=mode.lower(), + distribution='uniform') + else: + # In TF 1.9 release and earlier, the truncated_normal distribution was + # not supported correctly. So, in these earlier versions of tensorflow, + # the ValueError will be raised, and we manually truncate the + # distribution scale. + # + # It is insufficient to just set distribution to `normal` from the + # start, because the `normal` distribution in newer Tensorflow versions + # creates a truncated distribution, whereas it created untruncated + # distributions in older versions. + try: + return tf.variance_scaling_initializer( + scale=initializer.variance_scaling_initializer.factor, + mode=mode.lower(), + distribution='truncated_normal') + except ValueError: + truncate_constant = 0.87962566103423978 + truncated_scale = initializer.variance_scaling_initializer.factor / ( + truncate_constant * truncate_constant + ) + return tf.variance_scaling_initializer( + scale=truncated_scale, + mode=mode.lower(), + distribution='normal') + + else: + return slim.variance_scaling_initializer( + factor=initializer.variance_scaling_initializer.factor, + mode=mode, + uniform=initializer.variance_scaling_initializer.uniform) + if initializer_oneof is None: + return None + raise ValueError('Unknown initializer function: {}'.format( + initializer_oneof)) + + +def _build_batch_norm_params(batch_norm, is_training): + """Build a dictionary of batch_norm params from config. + + Args: + batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. + is_training: Whether the models is in training mode. + + Returns: + A dictionary containing batch_norm parameters. + """ + batch_norm_params = { + 'decay': batch_norm.decay, + 'center': batch_norm.center, + 'scale': batch_norm.scale, + 'epsilon': batch_norm.epsilon, + # Remove is_training parameter from here and deprecate it in the proto + # once we refactor Faster RCNN models to set is_training through an outer + # arg_scope in the meta architecture. + 'is_training': is_training and batch_norm.train, + } + return batch_norm_params + + +def _build_keras_batch_norm_params(batch_norm): + """Build a dictionary of Keras BatchNormalization params from config. + + Args: + batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. + + Returns: + A dictionary containing Keras BatchNormalization parameters. + """ + # Note: Although decay is defined to be 1 - momentum in batch_norm, + # decay in the slim batch_norm layers was erroneously defined and is + # actually the same as momentum in the Keras batch_norm layers. + # For context, see: github.com/keras-team/keras/issues/6839 + batch_norm_params = { + 'momentum': batch_norm.decay, + 'center': batch_norm.center, + 'scale': batch_norm.scale, + 'epsilon': batch_norm.epsilon, + } + return batch_norm_params diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c062c498d1f9e77ccf7096bc5bcd57e0ea76c743 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e48ac23bcb547c9729038b901a9612d3712d69cb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/hyperparams_builder_test.py @@ -0,0 +1,977 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests object_detection.core.hyperparams_builder.""" + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.core import freezable_batch_norm +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import tf_version + + +def _get_scope_key(op): + return getattr(op, '_key_op', str(op)) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests.') +class HyperparamsBuilderTest(tf.test.TestCase): + + def test_default_arg_scope_has_conv2d_op(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.conv2d), scope) + + def test_default_arg_scope_has_separable_conv2d_op(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.separable_conv2d), scope) + + def test_default_arg_scope_has_conv2d_transpose_op(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.conv2d_transpose), scope) + + def test_explicit_fc_op_arg_scope_has_fully_connected_op(self): + conv_hyperparams_text_proto = """ + op: FC + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + self.assertIn(_get_scope_key(slim.fully_connected), scope) + + def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + kwargs_1, kwargs_2, kwargs_3 = scope.values() + self.assertDictEqual(kwargs_1, kwargs_2) + self.assertDictEqual(kwargs_1, kwargs_3) + + def test_return_l1_regularized_weights(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.5 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = list(scope.values())[0] + regularizer = conv_scope_arguments['weights_regularizer'] + weights = np.array([1., -1, 4., 2.]) + with self.test_session() as sess: + result = sess.run(regularizer(tf.constant(weights))) + self.assertAllClose(np.abs(weights).sum() * 0.5, result) + + def test_return_l2_regularizer_weights(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + weight: 0.42 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + + regularizer = conv_scope_arguments['weights_regularizer'] + weights = np.array([1., -1, 4., 2.]) + with self.test_session() as sess: + result = sess.run(regularizer(tf.constant(weights))) + self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result) + + def test_return_non_default_batch_norm_params_with_train_during_train(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + train: true + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) + batch_norm_params = scope[_get_scope_key(slim.batch_norm)] + self.assertAlmostEqual(batch_norm_params['decay'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + self.assertTrue(batch_norm_params['is_training']) + + def test_return_batch_norm_params_with_notrain_during_eval(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + train: true + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=False) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) + batch_norm_params = scope[_get_scope_key(slim.batch_norm)] + self.assertAlmostEqual(batch_norm_params['decay'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + self.assertFalse(batch_norm_params['is_training']) + + def test_return_batch_norm_params_with_notrain_when_train_is_false(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + train: false + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) + batch_norm_params = scope[_get_scope_key(slim.batch_norm)] + self.assertAlmostEqual(batch_norm_params['decay'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + self.assertFalse(batch_norm_params['is_training']) + + def test_do_not_use_batch_norm_if_default(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['normalizer_fn'], None) + + def test_use_none_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: NONE + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], None) + + def test_use_relu_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu) + + def test_use_relu_6_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6) + + def test_use_swish_activation(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: SWISH + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.swish) + + def _assert_variance_in_range(self, initializer, shape, variance, + tol=1e-2): + with tf.Graph().as_default() as g: + with self.test_session(graph=g) as sess: + var = tf.get_variable( + name='test', + shape=shape, + dtype=tf.float32, + initializer=initializer) + sess.run(tf.global_variables_initializer()) + values = sess.run(var) + self.assertAllClose(np.var(values), variance, tol, tol) + + def test_variance_in_range_with_variance_scaling_initializer_fan_in(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_out(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_OUT + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 40.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_AVG + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=4. / (100. + 40.)) + + def test_variance_in_range_with_variance_scaling_initializer_uniform(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: true + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_truncated_normal_initializer(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.49, tol=1e-1) + + def test_variance_in_range_with_random_normal_initializer(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + scope_fn = hyperparams_builder.build(conv_hyperparams_proto, + is_training=True) + scope = scope_fn() + conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] + initializer = conv_scope_arguments['weights_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.64, tol=1e-1) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only tests.') +class KerasHyperparamsBuilderTest(tf.test.TestCase): + + def _assert_variance_in_range(self, initializer, shape, variance, + tol=1e-2): + var = tf.Variable(initializer(shape=shape, dtype=tf.float32)) + self.assertAllClose(np.var(var.numpy()), variance, tol, tol) + + def test_return_l1_regularized_weights_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l1_regularizer { + weight: 0.5 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + regularizer = keras_config.params()['kernel_regularizer'] + weights = np.array([1., -1, 4., 2.]) + result = regularizer(tf.constant(weights)).numpy() + self.assertAllClose(np.abs(weights).sum() * 0.5, result) + + def test_return_l2_regularizer_weights_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + weight: 0.42 + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + regularizer = keras_config.params()['kernel_regularizer'] + weights = np.array([1., -1, 4., 2.]) + result = regularizer(tf.constant(weights)).numpy() + self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result) + + def test_return_non_default_batch_norm_params_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + self.assertTrue(keras_config.use_batch_norm()) + batch_norm_params = keras_config.batch_norm_params() + self.assertAlmostEqual(batch_norm_params['momentum'], 0.7) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + + batch_norm_layer = keras_config.build_batch_norm() + self.assertIsInstance(batch_norm_layer, + freezable_batch_norm.FreezableBatchNorm) + + def test_return_non_default_batch_norm_params_keras_override( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: false + scale: true + epsilon: 0.03 + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + self.assertTrue(keras_config.use_batch_norm()) + batch_norm_params = keras_config.batch_norm_params(momentum=0.4) + self.assertAlmostEqual(batch_norm_params['momentum'], 0.4) + self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) + self.assertFalse(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + + def test_do_not_use_batch_norm_if_default_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertFalse(keras_config.use_batch_norm()) + self.assertEqual(keras_config.batch_norm_params(), {}) + + # The batch norm builder should build an identity Lambda layer + identity_layer = keras_config.build_batch_norm() + self.assertIsInstance(identity_layer, + tf.keras.layers.Lambda) + + def test_do_not_use_bias_if_batch_norm_center_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: true + scale: true + epsilon: 0.03 + train: true + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + self.assertTrue(keras_config.use_batch_norm()) + batch_norm_params = keras_config.batch_norm_params() + self.assertTrue(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + hyperparams = keras_config.params() + self.assertFalse(hyperparams['use_bias']) + + def test_force_use_bias_if_batch_norm_center_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + decay: 0.7 + center: true + scale: true + epsilon: 0.03 + train: true + } + force_use_bias: true + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + + self.assertTrue(keras_config.use_batch_norm()) + batch_norm_params = keras_config.batch_norm_params() + self.assertTrue(batch_norm_params['center']) + self.assertTrue(batch_norm_params['scale']) + hyperparams = keras_config.params() + self.assertTrue(hyperparams['use_bias']) + + def test_use_none_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: NONE + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertIsNone( + keras_config.params(include_activation=True)['activation']) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.identity) + + def test_use_relu_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertEqual( + keras_config.params(include_activation=True)['activation'], tf.nn.relu) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.nn.relu) + + def test_use_relu_6_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertEqual( + keras_config.params(include_activation=True)['activation'], tf.nn.relu6) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.nn.relu6) + + def test_use_swish_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: SWISH + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + self.assertIsNone(keras_config.params()['activation']) + self.assertEqual( + keras_config.params(include_activation=True)['activation'], tf.nn.swish) + activation_layer = keras_config.build_activation_layer() + self.assertIsInstance(activation_layer, tf.keras.layers.Lambda) + self.assertEqual(activation_layer.function, tf.nn.swish) + + def test_override_activation_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + activation: RELU_6 + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + new_params = keras_config.params(activation=tf.nn.relu) + self.assertEqual(new_params['activation'], tf.nn.relu) + + def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_OUT + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 40.) + + def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_AVG + uniform: false + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=4. / (100. + 40.)) + + def test_variance_in_range_with_variance_scaling_initializer_uniform_keras( + self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + variance_scaling_initializer { + factor: 2.0 + mode: FAN_IN + uniform: true + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=2. / 100.) + + def test_variance_in_range_with_truncated_normal_initializer_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.49, tol=1e-1) + + def test_variance_in_range_with_random_normal_initializer_keras(self): + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + mean: 0.0 + stddev: 0.8 + } + } + """ + conv_hyperparams_proto = hyperparams_pb2.Hyperparams() + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) + keras_config = hyperparams_builder.KerasLayerHyperparams( + conv_hyperparams_proto) + initializer = keras_config.params()['kernel_initializer'] + self._assert_variance_in_range(initializer, shape=[100, 40], + variance=0.64, tol=1e-1) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..1a3f096f1786bd47f9084a559c2657f72a164da0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder.py @@ -0,0 +1,187 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Builder function for image resizing operations.""" +import functools +import tensorflow.compat.v1 as tf + +from object_detection.core import preprocessor +from object_detection.protos import image_resizer_pb2 + + +def _tf_resize_method(resize_method): + """Maps image resize method from enumeration type to TensorFlow. + + Args: + resize_method: The resize_method attribute of keep_aspect_ratio_resizer or + fixed_shape_resizer. + + Returns: + method: The corresponding TensorFlow ResizeMethod. + + Raises: + ValueError: if `resize_method` is of unknown type. + """ + dict_method = { + image_resizer_pb2.BILINEAR: + tf.image.ResizeMethod.BILINEAR, + image_resizer_pb2.NEAREST_NEIGHBOR: + tf.image.ResizeMethod.NEAREST_NEIGHBOR, + image_resizer_pb2.BICUBIC: + tf.image.ResizeMethod.BICUBIC, + image_resizer_pb2.AREA: + tf.image.ResizeMethod.AREA + } + if resize_method in dict_method: + return dict_method[resize_method] + else: + raise ValueError('Unknown resize_method') + + +def build(image_resizer_config): + """Builds callable for image resizing operations. + + Args: + image_resizer_config: image_resizer.proto object containing parameters for + an image resizing operation. + + Returns: + image_resizer_fn: Callable for image resizing. This callable always takes + a rank-3 image tensor (corresponding to a single image) and returns a + rank-3 image tensor, possibly with new spatial dimensions. + + Raises: + ValueError: if `image_resizer_config` is of incorrect type. + ValueError: if `image_resizer_config.image_resizer_oneof` is of expected + type. + ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer + is used. + """ + if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer): + raise ValueError('image_resizer_config not of type ' + 'image_resizer_pb2.ImageResizer.') + + image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof') + if image_resizer_oneof == 'keep_aspect_ratio_resizer': + keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer + if not (keep_aspect_ratio_config.min_dimension <= + keep_aspect_ratio_config.max_dimension): + raise ValueError('min_dimension > max_dimension') + method = _tf_resize_method(keep_aspect_ratio_config.resize_method) + per_channel_pad_value = (0, 0, 0) + if keep_aspect_ratio_config.per_channel_pad_value: + per_channel_pad_value = tuple(keep_aspect_ratio_config. + per_channel_pad_value) + image_resizer_fn = functools.partial( + preprocessor.resize_to_range, + min_dimension=keep_aspect_ratio_config.min_dimension, + max_dimension=keep_aspect_ratio_config.max_dimension, + method=method, + pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension, + per_channel_pad_value=per_channel_pad_value) + if not keep_aspect_ratio_config.convert_to_grayscale: + return image_resizer_fn + elif image_resizer_oneof == 'fixed_shape_resizer': + fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer + method = _tf_resize_method(fixed_shape_resizer_config.resize_method) + image_resizer_fn = functools.partial( + preprocessor.resize_image, + new_height=fixed_shape_resizer_config.height, + new_width=fixed_shape_resizer_config.width, + method=method) + if not fixed_shape_resizer_config.convert_to_grayscale: + return image_resizer_fn + elif image_resizer_oneof == 'identity_resizer': + def image_resizer_fn(image, masks=None, **kwargs): + del kwargs + if masks is None: + return [image, tf.shape(image)] + else: + return [image, masks, tf.shape(image)] + return image_resizer_fn + elif image_resizer_oneof == 'conditional_shape_resizer': + conditional_shape_resize_config = ( + image_resizer_config.conditional_shape_resizer) + method = _tf_resize_method(conditional_shape_resize_config.resize_method) + + if conditional_shape_resize_config.condition == ( + image_resizer_pb2.ConditionalShapeResizer.GREATER): + image_resizer_fn = functools.partial( + preprocessor.resize_to_max_dimension, + max_dimension=conditional_shape_resize_config.size_threshold, + method=method) + + elif conditional_shape_resize_config.condition == ( + image_resizer_pb2.ConditionalShapeResizer.SMALLER): + image_resizer_fn = functools.partial( + preprocessor.resize_to_min_dimension, + min_dimension=conditional_shape_resize_config.size_threshold, + method=method) + else: + raise ValueError( + 'Invalid image resizer condition option for ' + 'ConditionalShapeResizer: \'%s\'.' + % conditional_shape_resize_config.condition) + if not conditional_shape_resize_config.convert_to_grayscale: + return image_resizer_fn + elif image_resizer_oneof == 'pad_to_multiple_resizer': + pad_to_multiple_resizer_config = ( + image_resizer_config.pad_to_multiple_resizer) + + if pad_to_multiple_resizer_config.multiple < 0: + raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.') + + else: + image_resizer_fn = functools.partial( + preprocessor.resize_pad_to_multiple, + multiple=pad_to_multiple_resizer_config.multiple) + + if not pad_to_multiple_resizer_config.convert_to_grayscale: + return image_resizer_fn + else: + raise ValueError( + 'Invalid image resizer option: \'%s\'.' % image_resizer_oneof) + + def grayscale_image_resizer(image, masks=None): + """Convert to grayscale before applying image_resizer_fn. + + Args: + image: A 3D tensor of shape [height, width, 3] + masks: (optional) rank 3 float32 tensor with shape [num_instances, height, + width] containing instance masks. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A 3D tensor of shape [new_height, new_width, 1], + where the image has been resized (with bilinear interpolation) so that + min(new_height, new_width) == min_dimension or + max(new_height, new_width) == max_dimension. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width]. + resized_image_shape: A 1D tensor of shape [3] containing shape of the + resized image. + """ + # image_resizer_fn returns [resized_image, resized_image_shape] if + # mask==None, otherwise it returns + # [resized_image, resized_mask, resized_image_shape]. In either case, we + # only deal with first and last element of the returned list. + retval = image_resizer_fn(image, masks) + resized_image = retval[0] + resized_image_shape = retval[-1] + retval[0] = preprocessor.rgb_to_gray(resized_image) + retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0) + return retval + + return functools.partial(grayscale_image_resizer) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..183dd4bfed5c5b09b177ceb751b5e2e63d79b275 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc456eab1da1ea7952d17be4d14fab9ca8bf9a4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/image_resizer_builder_test.py @@ -0,0 +1,243 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.builders.image_resizer_builder.""" +import numpy as np +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection.builders import image_resizer_builder +from object_detection.protos import image_resizer_pb2 +from object_detection.utils import test_case + + +class ImageResizerBuilderTest(test_case.TestCase): + + def _shape_of_resized_random_image_given_text_proto(self, input_shape, + text_proto): + image_resizer_config = image_resizer_pb2.ImageResizer() + text_format.Merge(text_proto, image_resizer_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + def graph_fn(): + images = tf.cast( + tf.random_uniform(input_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + resized_images, _ = image_resizer_fn(images) + return resized_images + return self.execute_cpu(graph_fn, []).shape + + def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self): + image_resizer_text_proto = """ + keep_aspect_ratio_resizer { + min_dimension: 10 + max_dimension: 20 + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (20, 10, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_keep_aspect_ratio_resizer_grayscale(self): + image_resizer_text_proto = """ + keep_aspect_ratio_resizer { + min_dimension: 10 + max_dimension: 20 + convert_to_grayscale: true + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (20, 10, 1) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_keep_aspect_ratio_resizer_with_padding(self): + image_resizer_text_proto = """ + keep_aspect_ratio_resizer { + min_dimension: 10 + max_dimension: 20 + pad_to_max_dimension: true + per_channel_pad_value: 3 + per_channel_pad_value: 4 + per_channel_pad_value: 5 + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (20, 20, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_built_fixed_shape_resizer_returns_expected_shape(self): + image_resizer_text_proto = """ + fixed_shape_resizer { + height: 10 + width: 20 + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (10, 20, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_built_fixed_shape_resizer_grayscale(self): + image_resizer_text_proto = """ + fixed_shape_resizer { + height: 10 + width: 20 + convert_to_grayscale: true + } + """ + input_shape = (50, 25, 3) + expected_output_shape = (10, 20, 1) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_identity_resizer_returns_expected_shape(self): + image_resizer_text_proto = """ + identity_resizer { + } + """ + input_shape = (10, 20, 3) + expected_output_shape = (10, 20, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_raises_error_on_invalid_input(self): + invalid_input = 'invalid_input' + with self.assertRaises(ValueError): + image_resizer_builder.build(invalid_input) + + def _resized_image_given_text_proto(self, image, text_proto): + image_resizer_config = image_resizer_pb2.ImageResizer() + text_format.Merge(text_proto, image_resizer_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + def graph_fn(image): + resized_image, _ = image_resizer_fn(image) + return resized_image + return self.execute_cpu(graph_fn, [image]) + + def test_fixed_shape_resizer_nearest_neighbor_method(self): + image_resizer_text_proto = """ + fixed_shape_resizer { + height: 1 + width: 1 + resize_method: NEAREST_NEIGHBOR + } + """ + image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + image = np.expand_dims(image, axis=2) + image = np.tile(image, (1, 1, 3)) + image = np.expand_dims(image, axis=0) + resized_image = self._resized_image_given_text_proto( + image, image_resizer_text_proto) + vals = np.unique(resized_image).tolist() + self.assertEqual(len(vals), 1) + self.assertEqual(vals[0], 1) + + def test_build_conditional_shape_resizer_greater_returns_expected_shape(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: GREATER + size_threshold: 30 + } + """ + input_shape = (60, 30, 3) + expected_output_shape = (30, 15, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_same_shape_with_no_resize(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: GREATER + size_threshold: 30 + } + """ + input_shape = (15, 15, 3) + expected_output_shape = (15, 15, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_smaller_returns_expected_shape(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: SMALLER + size_threshold: 30 + } + """ + input_shape = (30, 15, 3) + expected_output_shape = (60, 30, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_grayscale(self): + image_resizer_text_proto = """ + conditional_shape_resizer { + condition: GREATER + size_threshold: 30 + convert_to_grayscale: true + } + """ + input_shape = (60, 30, 3) + expected_output_shape = (30, 15, 1) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_conditional_shape_resizer_error_on_invalid_condition(self): + invalid_image_resizer_text_proto = """ + conditional_shape_resizer { + condition: INVALID + size_threshold: 30 + } + """ + with self.assertRaises(ValueError): + image_resizer_builder.build(invalid_image_resizer_text_proto) + + def test_build_pad_to_multiple_resizer(self): + """Test building a pad_to_multiple_resizer from proto.""" + image_resizer_text_proto = """ + pad_to_multiple_resizer { + multiple: 32 + } + """ + input_shape = (60, 30, 3) + expected_output_shape = (64, 32, 3) + output_shape = self._shape_of_resized_random_image_given_text_proto( + input_shape, image_resizer_text_proto) + self.assertEqual(output_shape, expected_output_shape) + + def test_build_pad_to_multiple_resizer_invalid_multiple(self): + """Test that building a pad_to_multiple_resizer errors with invalid multiple.""" + + image_resizer_text_proto = """ + pad_to_multiple_resizer { + multiple: -10 + } + """ + + with self.assertRaises(ValueError): + image_resizer_builder.build(image_resizer_text_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/input_reader_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/input_reader_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..c7755177e70d528984ea425f21fb9afaf11d9eaa --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/input_reader_builder.py @@ -0,0 +1,91 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Input reader builder. + +Creates data sources for DetectionModels from an InputReader config. See +input_reader.proto for options. + +Note: If users wishes to also use their own InputReaders with the Object +Detection configuration framework, they should define their own builder function +that wraps the build function. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.data_decoders import tf_example_decoder +from object_detection.data_decoders import tf_sequence_example_decoder +from object_detection.protos import input_reader_pb2 + +parallel_reader = slim.parallel_reader + + +def build(input_reader_config): + """Builds a tensor dictionary based on the InputReader config. + + Args: + input_reader_config: A input_reader_pb2.InputReader object. + + Returns: + A tensor dict based on the input_reader_config. + + Raises: + ValueError: On invalid input reader proto. + ValueError: If no input paths are specified. + """ + if not isinstance(input_reader_config, input_reader_pb2.InputReader): + raise ValueError('input_reader_config not of type ' + 'input_reader_pb2.InputReader.') + + if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': + config = input_reader_config.tf_record_input_reader + if not config.input_path: + raise ValueError('At least one input path must be specified in ' + '`input_reader_config`.') + _, string_tensor = parallel_reader.parallel_read( + config.input_path[:], # Convert `RepeatedScalarContainer` to list. + reader_class=tf.TFRecordReader, + num_epochs=(input_reader_config.num_epochs + if input_reader_config.num_epochs else None), + num_readers=input_reader_config.num_readers, + shuffle=input_reader_config.shuffle, + dtypes=[tf.string, tf.string], + capacity=input_reader_config.queue_capacity, + min_after_dequeue=input_reader_config.min_after_dequeue) + + label_map_proto_file = None + if input_reader_config.HasField('label_map_path'): + label_map_proto_file = input_reader_config.label_map_path + input_type = input_reader_config.input_type + if input_type == input_reader_pb2.InputType.Value('TF_EXAMPLE'): + decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=input_reader_config.load_instance_masks, + instance_mask_type=input_reader_config.mask_type, + label_map_proto_file=label_map_proto_file, + load_context_features=input_reader_config.load_context_features) + return decoder.decode(string_tensor) + elif input_type == input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE'): + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file, + load_context_features=input_reader_config.load_context_features) + return decoder.decode(string_tensor) + raise ValueError('Unsupported input_type.') + raise ValueError('Unsupported input_reader_config.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/input_reader_builder_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/input_reader_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6049128b03f55501ddcd2a1b3334821800d826a1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/input_reader_builder_tf1_test.py @@ -0,0 +1,306 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for input_reader_builder.""" + +import os +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import input_reader_builder +from object_detection.core import standard_fields as fields +from object_detection.dataset_tools import seq_example_util +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import tf_version + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + parent_path = os.path.dirname(tf.resource_loader.get_data_files_path()) + return os.path.join(parent_path, 'data', + 'pet_label_map.pbtxt') + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class InputReaderBuilderTest(tf.test.TestCase): + + def create_tf_record(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + flat_mask = (4 * 5) * [1.0] + with self.test_session(): + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': dataset_util.int64_feature(4), + 'image/width': dataset_util.int64_feature(5), + 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), + 'image/object/class/label': dataset_util.int64_list_feature([2]), + 'image/object/mask': dataset_util.float_list_feature(flat_mask), + })) + writer.write(example.SerializeToString()) + writer.close() + + return path + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + with tf.Session() as sess: + encoded_images = sess.run(encoded_images_list) + return encoded_images + + def create_tf_record_sequence_example(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + num_frames = 4 + image_height = 20 + image_width = 30 + image_source_ids = [str(i) for i in range(num_frames)] + with self.test_session(): + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_source_ids=image_source_ids, + image_format='JPEG', + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[]], # Frame 0. + [[0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], + [0.1, 0.1, 0.2, 0.2]], # Frame 2. + [[]], # Frame 3. + ], + label_strings=[ + [], # Frame 0. + ['Abyssinian'], # Frame 1. + ['Abyssinian', 'american_bulldog'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + + writer.write(sequence_example_serialized) + writer.close() + + return path + + def create_tf_record_with_context(self): + path = os.path.join(self.get_temp_dir(), 'tfrecord') + writer = tf.python_io.TFRecordWriter(path) + + image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) + flat_mask = (4 * 5) * [1.0] + context_features = (10 * 3) * [1.0] + with self.test_session(): + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/height': + dataset_util.int64_feature(4), + 'image/width': + dataset_util.int64_feature(5), + 'image/object/bbox/xmin': + dataset_util.float_list_feature([0.0]), + 'image/object/bbox/xmax': + dataset_util.float_list_feature([1.0]), + 'image/object/bbox/ymin': + dataset_util.float_list_feature([0.0]), + 'image/object/bbox/ymax': + dataset_util.float_list_feature([1.0]), + 'image/object/class/label': + dataset_util.int64_list_feature([2]), + 'image/object/mask': + dataset_util.float_list_feature(flat_mask), + 'image/context_features': + dataset_util.float_list_feature(context_features), + 'image/context_feature_length': + dataset_util.int64_list_feature([10]), + })) + writer.write(example.SerializeToString()) + writer.close() + + return path + + def test_build_tf_record_input_reader(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks, + output_dict) + self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape) + self.assertEqual([2], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + + def test_build_tf_record_input_reader_sequence_example(self): + tf_record_path = self.create_tf_record_sequence_example() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + input_type: TF_SEQUENCE_EXAMPLE + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + input_reader_proto.label_map_path = _get_labelmap_path() + text_format.Merge(input_reader_text_proto, input_reader_proto) + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]] + expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]] + expected_num_groundtruth_boxes = [0, 1, 2, 0] + + self.assertNotIn( + fields.InputDataFields.groundtruth_instance_masks, output_dict) + # sequence example images are encoded + self.assertEqual((4,), output_dict[fields.InputDataFields.image].shape) + self.assertAllEqual(expected_groundtruth_classes, + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (4, 2, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllClose(expected_groundtruth_boxes, + output_dict[fields.InputDataFields.groundtruth_boxes]) + self.assertAllClose( + expected_num_groundtruth_boxes, + output_dict[fields.InputDataFields.num_groundtruth_boxes]) + + def test_build_tf_record_input_reader_with_context(self): + tf_record_path = self.create_tf_record_with_context() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + input_reader_proto.load_context_features = True + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks, + output_dict) + self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape) + self.assertEqual([2], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + self.assertAllEqual( + (3, 10), output_dict[fields.InputDataFields.context_features].shape) + self.assertAllEqual( + (10), output_dict[fields.InputDataFields.context_feature_length]) + + def test_build_tf_record_input_reader_and_load_instance_masks(self): + tf_record_path = self.create_tf_record() + + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + tf_record_input_reader {{ + input_path: '{0}' + }} + """.format(tf_record_path) + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + tensor_dict = input_reader_builder.build(input_reader_proto) + + with tf.train.MonitoredSession() as sess: + output_dict = sess.run(tensor_dict) + + self.assertEqual((4, 5, 3), output_dict[fields.InputDataFields.image].shape) + self.assertEqual([2], + output_dict[fields.InputDataFields.groundtruth_classes]) + self.assertEqual( + (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) + self.assertAllEqual( + [0.0, 0.0, 1.0, 1.0], + output_dict[fields.InputDataFields.groundtruth_boxes][0]) + self.assertAllEqual( + (1, 4, 5), + output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) + + def test_raises_error_with_no_input_paths(self): + input_reader_text_proto = """ + shuffle: false + num_readers: 1 + load_instance_masks: true + """ + input_reader_proto = input_reader_pb2.InputReader() + text_format.Merge(input_reader_text_proto, input_reader_proto) + with self.assertRaises(ValueError): + input_reader_builder.build(input_reader_proto) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..5a69c9b602c95ab6c8368638b2e38448ae113b9c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder.py @@ -0,0 +1,260 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build localization and classification losses from config.""" + +import functools +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import losses +from object_detection.protos import losses_pb2 +from object_detection.utils import ops + + +def build(loss_config): + """Build losses based on the config. + + Builds classification, localization losses and optionally a hard example miner + based on the config. + + Args: + loss_config: A losses_pb2.Loss object. + + Returns: + classification_loss: Classification loss object. + localization_loss: Localization loss object. + classification_weight: Classification loss weight. + localization_weight: Localization loss weight. + hard_example_miner: Hard example miner object. + random_example_sampler: BalancedPositiveNegativeSampler object. + + Raises: + ValueError: If hard_example_miner is used with sigmoid_focal_loss. + ValueError: If random_example_sampler is getting non-positive value as + desired positive example fraction. + """ + classification_loss = _build_classification_loss( + loss_config.classification_loss) + localization_loss = _build_localization_loss( + loss_config.localization_loss) + classification_weight = loss_config.classification_weight + localization_weight = loss_config.localization_weight + hard_example_miner = None + if loss_config.HasField('hard_example_miner'): + if (loss_config.classification_loss.WhichOneof('classification_loss') == + 'weighted_sigmoid_focal'): + raise ValueError('HardExampleMiner should not be used with sigmoid focal ' + 'loss') + hard_example_miner = build_hard_example_miner( + loss_config.hard_example_miner, + classification_weight, + localization_weight) + random_example_sampler = None + if loss_config.HasField('random_example_sampler'): + if loss_config.random_example_sampler.positive_sample_fraction <= 0: + raise ValueError('RandomExampleSampler should not use non-positive' + 'value as positive sample fraction.') + random_example_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=loss_config.random_example_sampler. + positive_sample_fraction) + + if loss_config.expected_loss_weights == loss_config.NONE: + expected_loss_weights_fn = None + elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING: + expected_loss_weights_fn = functools.partial( + ops.expected_classification_loss_by_expected_sampling, + min_num_negative_samples=loss_config.min_num_negative_samples, + desired_negative_sampling_ratio=loss_config + .desired_negative_sampling_ratio) + elif (loss_config.expected_loss_weights == loss_config + .REWEIGHTING_UNMATCHED_ANCHORS): + expected_loss_weights_fn = functools.partial( + ops.expected_classification_loss_by_reweighting_unmatched_anchors, + min_num_negative_samples=loss_config.min_num_negative_samples, + desired_negative_sampling_ratio=loss_config + .desired_negative_sampling_ratio) + else: + raise ValueError('Not a valid value for expected_classification_loss.') + + return (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, random_example_sampler, + expected_loss_weights_fn) + + +def build_hard_example_miner(config, + classification_weight, + localization_weight): + """Builds hard example miner based on the config. + + Args: + config: A losses_pb2.HardExampleMiner object. + classification_weight: Classification loss weight. + localization_weight: Localization loss weight. + + Returns: + Hard example miner. + + """ + loss_type = None + if config.loss_type == losses_pb2.HardExampleMiner.BOTH: + loss_type = 'both' + if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION: + loss_type = 'cls' + if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION: + loss_type = 'loc' + + max_negatives_per_positive = None + num_hard_examples = None + if config.max_negatives_per_positive > 0: + max_negatives_per_positive = config.max_negatives_per_positive + if config.num_hard_examples > 0: + num_hard_examples = config.num_hard_examples + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=num_hard_examples, + iou_threshold=config.iou_threshold, + loss_type=loss_type, + cls_loss_weight=classification_weight, + loc_loss_weight=localization_weight, + max_negatives_per_positive=max_negatives_per_positive, + min_negatives_per_image=config.min_negatives_per_image) + return hard_example_miner + + +def build_faster_rcnn_classification_loss(loss_config): + """Builds a classification loss for Faster RCNN based on the loss config. + + Args: + loss_config: A losses_pb2.ClassificationLoss object. + + Returns: + Loss based on the config. + + Raises: + ValueError: On invalid loss_config. + """ + if not isinstance(loss_config, losses_pb2.ClassificationLoss): + raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') + + loss_type = loss_config.WhichOneof('classification_loss') + + if loss_type == 'weighted_sigmoid': + return losses.WeightedSigmoidClassificationLoss() + if loss_type == 'weighted_softmax': + config = loss_config.weighted_softmax + return losses.WeightedSoftmaxClassificationLoss( + logit_scale=config.logit_scale) + if loss_type == 'weighted_logits_softmax': + config = loss_config.weighted_logits_softmax + return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( + logit_scale=config.logit_scale) + if loss_type == 'weighted_sigmoid_focal': + config = loss_config.weighted_sigmoid_focal + alpha = None + if config.HasField('alpha'): + alpha = config.alpha + return losses.SigmoidFocalClassificationLoss( + gamma=config.gamma, + alpha=alpha) + + # By default, Faster RCNN second stage classifier uses Softmax loss + # with anchor-wise outputs. + config = loss_config.weighted_softmax + return losses.WeightedSoftmaxClassificationLoss( + logit_scale=config.logit_scale) + + +def _build_localization_loss(loss_config): + """Builds a localization loss based on the loss config. + + Args: + loss_config: A losses_pb2.LocalizationLoss object. + + Returns: + Loss based on the config. + + Raises: + ValueError: On invalid loss_config. + """ + if not isinstance(loss_config, losses_pb2.LocalizationLoss): + raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.') + + loss_type = loss_config.WhichOneof('localization_loss') + + if loss_type == 'weighted_l2': + return losses.WeightedL2LocalizationLoss() + + if loss_type == 'weighted_smooth_l1': + return losses.WeightedSmoothL1LocalizationLoss( + loss_config.weighted_smooth_l1.delta) + + if loss_type == 'weighted_iou': + return losses.WeightedIOULocalizationLoss() + + if loss_type == 'l1_localization_loss': + return losses.L1LocalizationLoss() + + raise ValueError('Empty loss config.') + + +def _build_classification_loss(loss_config): + """Builds a classification loss based on the loss config. + + Args: + loss_config: A losses_pb2.ClassificationLoss object. + + Returns: + Loss based on the config. + + Raises: + ValueError: On invalid loss_config. + """ + if not isinstance(loss_config, losses_pb2.ClassificationLoss): + raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') + + loss_type = loss_config.WhichOneof('classification_loss') + + if loss_type == 'weighted_sigmoid': + return losses.WeightedSigmoidClassificationLoss() + + if loss_type == 'weighted_sigmoid_focal': + config = loss_config.weighted_sigmoid_focal + alpha = None + if config.HasField('alpha'): + alpha = config.alpha + return losses.SigmoidFocalClassificationLoss( + gamma=config.gamma, + alpha=alpha) + + if loss_type == 'weighted_softmax': + config = loss_config.weighted_softmax + return losses.WeightedSoftmaxClassificationLoss( + logit_scale=config.logit_scale) + + if loss_type == 'weighted_logits_softmax': + config = loss_config.weighted_logits_softmax + return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( + logit_scale=config.logit_scale) + + if loss_type == 'bootstrapped_sigmoid': + config = loss_config.bootstrapped_sigmoid + return losses.BootstrappedSigmoidClassificationLoss( + alpha=config.alpha, + bootstrap_type=('hard' if config.hard_bootstrap else 'soft')) + + if loss_type == 'penalty_reduced_logistic_focal_loss': + config = loss_config.penalty_reduced_logistic_focal_loss + return losses.PenaltyReducedLogisticFocalLoss( + alpha=config.alpha, beta=config.beta) + + raise ValueError('Empty loss config.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eb9a9b0a9b65e8d745da7130d85e41f46037953 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b37b7f3195427b951e2c508f0df191f176b9d835 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/losses_builder_test.py @@ -0,0 +1,558 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for losses_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import losses_builder +from object_detection.core import losses +from object_detection.protos import losses_pb2 +from object_detection.utils import ops + + +class LocalizationLossBuilderTest(tf.test.TestCase): + + def test_build_weighted_l2_localization_loss(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedL2LocalizationLoss) + + def test_build_weighted_smooth_l1_localization_loss_default_delta(self): + losses_text_proto = """ + localization_loss { + weighted_smooth_l1 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedSmoothL1LocalizationLoss) + self.assertAlmostEqual(localization_loss._delta, 1.0) + + def test_build_weighted_smooth_l1_localization_loss_non_default_delta(self): + losses_text_proto = """ + localization_loss { + weighted_smooth_l1 { + delta: 0.1 + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedSmoothL1LocalizationLoss) + self.assertAlmostEqual(localization_loss._delta, 0.1) + + def test_build_weighted_iou_localization_loss(self): + losses_text_proto = """ + localization_loss { + weighted_iou { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedIOULocalizationLoss) + + def test_anchorwise_output(self): + losses_text_proto = """ + localization_loss { + weighted_smooth_l1 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(localization_loss, + losses.WeightedSmoothL1LocalizationLoss) + predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) + targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) + weights = tf.constant([[1.0, 1.0]]) + loss = localization_loss(predictions, targets, weights=weights) + self.assertEqual(loss.shape, [1, 2]) + + def test_raise_error_on_empty_localization_config(self): + losses_text_proto = """ + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + with self.assertRaises(ValueError): + losses_builder._build_localization_loss(losses_proto) + + + +class ClassificationLossBuilderTest(tf.test.TestCase): + + def test_build_weighted_sigmoid_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSigmoidClassificationLoss) + + def test_build_weighted_sigmoid_focal_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid_focal { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.SigmoidFocalClassificationLoss) + self.assertAlmostEqual(classification_loss._alpha, None) + self.assertAlmostEqual(classification_loss._gamma, 2.0) + + def test_build_weighted_sigmoid_focal_loss_non_default(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 3.0 + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.SigmoidFocalClassificationLoss) + self.assertAlmostEqual(classification_loss._alpha, 0.25) + self.assertAlmostEqual(classification_loss._gamma, 3.0) + + def test_build_weighted_softmax_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + def test_build_weighted_logits_softmax_classification_loss(self): + losses_text_proto = """ + classification_loss { + weighted_logits_softmax { + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance( + classification_loss, + losses.WeightedSoftmaxClassificationAgainstLogitsLoss) + + def test_build_weighted_softmax_classification_loss_with_logit_scale(self): + losses_text_proto = """ + classification_loss { + weighted_softmax { + logit_scale: 2.0 + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + def test_build_bootstrapped_sigmoid_classification_loss(self): + losses_text_proto = """ + classification_loss { + bootstrapped_sigmoid { + alpha: 0.5 + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.BootstrappedSigmoidClassificationLoss) + + def test_anchorwise_output(self): + losses_text_proto = """ + classification_loss { + weighted_sigmoid { + anchorwise_output: true + } + } + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSigmoidClassificationLoss) + predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]]) + targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]) + weights = tf.constant([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]) + loss = classification_loss(predictions, targets, weights=weights) + self.assertEqual(loss.shape, [1, 2, 3]) + + def test_raise_error_on_empty_config(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + with self.assertRaises(ValueError): + losses_builder.build(losses_proto) + + + +class HardExampleMinerBuilderTest(tf.test.TestCase): + + def test_do_not_build_hard_example_miner_by_default(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertEqual(hard_example_miner, None) + + def test_build_hard_example_miner_for_classification_loss(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + loss_type: CLASSIFICATION + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertEqual(hard_example_miner._loss_type, 'cls') + + def test_build_hard_example_miner_for_localization_loss(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + loss_type: LOCALIZATION + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertEqual(hard_example_miner._loss_type, 'loc') + + def test_build_hard_example_miner_with_non_default_values(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + num_hard_examples: 32 + iou_threshold: 0.5 + loss_type: LOCALIZATION + max_negatives_per_positive: 10 + min_negatives_per_image: 3 + } + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertEqual(hard_example_miner._num_hard_examples, 32) + self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5) + self.assertEqual(hard_example_miner._max_negatives_per_positive, 10) + self.assertEqual(hard_example_miner._min_negatives_per_image, 3) + + +class LossBuilderTest(tf.test.TestCase): + + def test_build_all_loss_parameters(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, _, + _) = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(localization_loss, + losses.WeightedL2LocalizationLoss) + self.assertAlmostEqual(classification_weight, 0.8) + self.assertAlmostEqual(localization_weight, 0.2) + + def test_build_expected_sampling(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, _, + _) = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) + self.assertAlmostEqual(classification_weight, 0.8) + self.assertAlmostEqual(localization_weight, 0.2) + + + def test_build_reweighting_unmatched_anchors(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_softmax { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, _, + _) = losses_builder.build(losses_proto) + self.assertIsInstance(hard_example_miner, losses.HardExampleMiner) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(localization_loss, losses.WeightedL2LocalizationLoss) + self.assertAlmostEqual(classification_weight, 0.8) + self.assertAlmostEqual(localization_weight, 0.2) + + def test_raise_error_when_both_focal_loss_and_hard_example_miner(self): + losses_text_proto = """ + localization_loss { + weighted_l2 { + } + } + classification_loss { + weighted_sigmoid_focal { + } + } + hard_example_miner { + } + classification_weight: 0.8 + localization_weight: 0.2 + """ + losses_proto = losses_pb2.Loss() + text_format.Merge(losses_text_proto, losses_proto) + with self.assertRaises(ValueError): + losses_builder.build(losses_proto) + + +class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase): + + def test_build_sigmoid_loss(self): + losses_text_proto = """ + weighted_sigmoid { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSigmoidClassificationLoss) + + def test_build_softmax_loss(self): + losses_text_proto = """ + weighted_softmax { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + def test_build_logits_softmax_loss(self): + losses_text_proto = """ + weighted_logits_softmax { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertTrue( + isinstance(classification_loss, + losses.WeightedSoftmaxClassificationAgainstLogitsLoss)) + + def test_build_sigmoid_focal_loss(self): + losses_text_proto = """ + weighted_sigmoid_focal { + } + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.SigmoidFocalClassificationLoss) + + def test_build_softmax_loss_by_default(self): + losses_text_proto = """ + """ + losses_proto = losses_pb2.ClassificationLoss() + text_format.Merge(losses_text_proto, losses_proto) + classification_loss = losses_builder.build_faster_rcnn_classification_loss( + losses_proto) + self.assertIsInstance(classification_loss, + losses.WeightedSoftmaxClassificationLoss) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..086f74b5c45f81cd555207f0ad593a52a0c0f307 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder.py @@ -0,0 +1,58 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection matcher from configuration.""" + +from object_detection.matchers import argmax_matcher +from object_detection.protos import matcher_pb2 +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + + +def build(matcher_config): + """Builds a matcher object based on the matcher config. + + Args: + matcher_config: A matcher.proto object containing the config for the desired + Matcher. + + Returns: + Matcher based on the config. + + Raises: + ValueError: On empty matcher proto. + """ + if not isinstance(matcher_config, matcher_pb2.Matcher): + raise ValueError('matcher_config not of type matcher_pb2.Matcher.') + if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher': + matcher = matcher_config.argmax_matcher + matched_threshold = unmatched_threshold = None + if not matcher.ignore_thresholds: + matched_threshold = matcher.matched_threshold + unmatched_threshold = matcher.unmatched_threshold + return argmax_matcher.ArgMaxMatcher( + matched_threshold=matched_threshold, + unmatched_threshold=unmatched_threshold, + negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched, + force_match_for_each_row=matcher.force_match_for_each_row, + use_matmul_gather=matcher.use_matmul_gather) + if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher': + if tf_version.is_tf2(): + raise ValueError('bipartite_matcher is not supported in TF 2.X') + matcher = matcher_config.bipartite_matcher + return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather) + raise ValueError('Empty matcher.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71f3a6cf351d5f7e6c0abf87867133ea022442dd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa55ff94fb7a12dbf78787ffbbf762d1890e3bc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/matcher_builder_test.py @@ -0,0 +1,105 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for matcher_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import matcher_builder +from object_detection.matchers import argmax_matcher +from object_detection.protos import matcher_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + + +class MatcherBuilderTest(test_case.TestCase): + + def test_build_arg_max_matcher_with_defaults(self): + matcher_text_proto = """ + argmax_matcher { + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher) + self.assertAlmostEqual(matcher_object._matched_threshold, 0.5) + self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5) + self.assertTrue(matcher_object._negatives_lower_than_unmatched) + self.assertFalse(matcher_object._force_match_for_each_row) + + def test_build_arg_max_matcher_without_thresholds(self): + matcher_text_proto = """ + argmax_matcher { + ignore_thresholds: true + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher) + self.assertEqual(matcher_object._matched_threshold, None) + self.assertEqual(matcher_object._unmatched_threshold, None) + self.assertTrue(matcher_object._negatives_lower_than_unmatched) + self.assertFalse(matcher_object._force_match_for_each_row) + + def test_build_arg_max_matcher_with_non_default_parameters(self): + matcher_text_proto = """ + argmax_matcher { + matched_threshold: 0.7 + unmatched_threshold: 0.3 + negatives_lower_than_unmatched: false + force_match_for_each_row: true + use_matmul_gather: true + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, argmax_matcher.ArgMaxMatcher) + self.assertAlmostEqual(matcher_object._matched_threshold, 0.7) + self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3) + self.assertFalse(matcher_object._negatives_lower_than_unmatched) + self.assertTrue(matcher_object._force_match_for_each_row) + self.assertTrue(matcher_object._use_matmul_gather) + + def test_build_bipartite_matcher(self): + if tf_version.is_tf2(): + self.skipTest('BipartiteMatcher unsupported in TF 2.X. Skipping.') + matcher_text_proto = """ + bipartite_matcher { + } + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + matcher_object = matcher_builder.build(matcher_proto) + self.assertIsInstance(matcher_object, + bipartite_matcher.GreedyBipartiteMatcher) + + def test_raise_error_on_empty_matcher(self): + matcher_text_proto = """ + """ + matcher_proto = matcher_pb2.Matcher() + text_format.Merge(matcher_text_proto, matcher_proto) + with self.assertRaises(ValueError): + matcher_builder.build(matcher_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..969b635873e50932eb2fa10c8febdaa0cf8b3953 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder.py @@ -0,0 +1,1090 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build a DetectionModel from configuration.""" + +import functools +import sys +from object_detection.builders import anchor_generator_builder +from object_detection.builders import box_coder_builder +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import image_resizer_builder +from object_detection.builders import losses_builder +from object_detection.builders import matcher_builder +from object_detection.builders import post_processing_builder +from object_detection.builders import region_similarity_calculator_builder as sim_calc +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import post_processing +from object_detection.core import target_assigner +from object_detection.meta_architectures import center_net_meta_arch +from object_detection.meta_architectures import context_rcnn_meta_arch +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.meta_architectures import rfcn_meta_arch +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.predictors.heads import mask_head +from object_detection.protos import losses_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import label_map_util +from object_detection.utils import ops +from object_detection.utils import spatial_transform_ops as spatial_ops +from object_detection.utils import tf_version + +## Feature Extractors for TF +## This section conditionally imports different feature extractors based on the +## Tensorflow version. +## +# pylint: disable=g-import-not-at-top +if tf_version.is_tf2(): + from object_detection.models import center_net_hourglass_feature_extractor + from object_detection.models import center_net_mobilenet_v2_feature_extractor + from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor + from object_detection.models import center_net_resnet_feature_extractor + from object_detection.models import center_net_resnet_v1_fpn_feature_extractor + from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras + from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras + from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras + from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras + from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor + from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor + from object_detection.predictors import rfcn_keras_box_predictor + if sys.version_info[0] >= 3: + from object_detection.models import ssd_efficientnet_bifpn_feature_extractor as ssd_efficientnet_bifpn + +if tf_version.is_tf1(): + from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res + from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 + from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas + from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas + from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 + from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn + from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn + from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor + from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor + from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_mnasfpn_feature_extractor import SSDMobileNetV2MnasFPNFeatureExtractor + from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor + from object_detection.models.ssd_mobilenet_edgetpu_feature_extractor import SSDMobileNetEdgeTPUFeatureExtractor + from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor + from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor + from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor + from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor + from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3LargeFeatureExtractor + from object_detection.models.ssd_mobilenet_v3_feature_extractor import SSDMobileNetV3SmallFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetCPUFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetDSPFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetEdgeTPUFeatureExtractor + from object_detection.models.ssd_mobiledet_feature_extractor import SSDMobileDetGPUFeatureExtractor + from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor + from object_detection.predictors import rfcn_box_predictor +# pylint: enable=g-import-not-at-top + +if tf_version.is_tf2(): + SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { + 'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor, + 'ssd_mobilenet_v1_fpn_keras': SSDMobileNetV1FpnKerasFeatureExtractor, + 'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor, + 'ssd_mobilenet_v2_fpn_keras': SSDMobileNetV2FpnKerasFeatureExtractor, + 'ssd_resnet50_v1_fpn_keras': + ssd_resnet_v1_fpn_keras.SSDResNet50V1FpnKerasFeatureExtractor, + 'ssd_resnet101_v1_fpn_keras': + ssd_resnet_v1_fpn_keras.SSDResNet101V1FpnKerasFeatureExtractor, + 'ssd_resnet152_v1_fpn_keras': + ssd_resnet_v1_fpn_keras.SSDResNet152V1FpnKerasFeatureExtractor, + 'ssd_efficientnet-b0_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB0BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b1_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB1BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b2_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB2BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b3_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB3BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b4_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB4BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b5_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB5BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b6_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB6BiFPNKerasFeatureExtractor, + 'ssd_efficientnet-b7_bifpn_keras': + ssd_efficientnet_bifpn.SSDEfficientNetB7BiFPNKerasFeatureExtractor, + } + + FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { + 'faster_rcnn_resnet50_keras': + frcnn_resnet_keras.FasterRCNNResnet50KerasFeatureExtractor, + 'faster_rcnn_resnet101_keras': + frcnn_resnet_keras.FasterRCNNResnet101KerasFeatureExtractor, + 'faster_rcnn_resnet152_keras': + frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor, + 'faster_rcnn_inception_resnet_v2_keras': + frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, + 'faster_rcnn_resnet50_fpn_keras': + frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor, + 'faster_rcnn_resnet101_fpn_keras': + frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor, + 'faster_rcnn_resnet152_fpn_keras': + frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor, + } + + CENTER_NET_EXTRACTOR_FUNCTION_MAP = { + 'resnet_v2_50': + center_net_resnet_feature_extractor.resnet_v2_50, + 'resnet_v2_101': + center_net_resnet_feature_extractor.resnet_v2_101, + 'resnet_v1_18_fpn': + center_net_resnet_v1_fpn_feature_extractor.resnet_v1_18_fpn, + 'resnet_v1_34_fpn': + center_net_resnet_v1_fpn_feature_extractor.resnet_v1_34_fpn, + 'resnet_v1_50_fpn': + center_net_resnet_v1_fpn_feature_extractor.resnet_v1_50_fpn, + 'resnet_v1_101_fpn': + center_net_resnet_v1_fpn_feature_extractor.resnet_v1_101_fpn, + 'hourglass_104': + center_net_hourglass_feature_extractor.hourglass_104, + 'mobilenet_v2': + center_net_mobilenet_v2_feature_extractor.mobilenet_v2, + 'mobilenet_v2_fpn': + center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn, + } + + FEATURE_EXTRACTOR_MAPS = [ + CENTER_NET_EXTRACTOR_FUNCTION_MAP, + FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP, + SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP + ] + +if tf_version.is_tf1(): + SSD_FEATURE_EXTRACTOR_CLASS_MAP = { + 'ssd_inception_v2': + SSDInceptionV2FeatureExtractor, + 'ssd_inception_v3': + SSDInceptionV3FeatureExtractor, + 'ssd_mobilenet_v1': + SSDMobileNetV1FeatureExtractor, + 'ssd_mobilenet_v1_fpn': + SSDMobileNetV1FpnFeatureExtractor, + 'ssd_mobilenet_v1_ppn': + SSDMobileNetV1PpnFeatureExtractor, + 'ssd_mobilenet_v2': + SSDMobileNetV2FeatureExtractor, + 'ssd_mobilenet_v2_fpn': + SSDMobileNetV2FpnFeatureExtractor, + 'ssd_mobilenet_v2_mnasfpn': + SSDMobileNetV2MnasFPNFeatureExtractor, + 'ssd_mobilenet_v3_large': + SSDMobileNetV3LargeFeatureExtractor, + 'ssd_mobilenet_v3_small': + SSDMobileNetV3SmallFeatureExtractor, + 'ssd_mobilenet_edgetpu': + SSDMobileNetEdgeTPUFeatureExtractor, + 'ssd_resnet50_v1_fpn': + ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, + 'ssd_resnet101_v1_fpn': + ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, + 'ssd_resnet152_v1_fpn': + ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, + 'ssd_resnet50_v1_ppn': + ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, + 'ssd_resnet101_v1_ppn': + ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, + 'ssd_resnet152_v1_ppn': + ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor, + 'embedded_ssd_mobilenet_v1': + EmbeddedSSDMobileNetV1FeatureExtractor, + 'ssd_pnasnet': + SSDPNASNetFeatureExtractor, + 'ssd_mobiledet_cpu': + SSDMobileDetCPUFeatureExtractor, + 'ssd_mobiledet_dsp': + SSDMobileDetDSPFeatureExtractor, + 'ssd_mobiledet_edgetpu': + SSDMobileDetEdgeTPUFeatureExtractor, + 'ssd_mobiledet_gpu': + SSDMobileDetGPUFeatureExtractor, + } + + FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = { + 'faster_rcnn_nas': + frcnn_nas.FasterRCNNNASFeatureExtractor, + 'faster_rcnn_pnas': + frcnn_pnas.FasterRCNNPNASFeatureExtractor, + 'faster_rcnn_inception_resnet_v2': + frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor, + 'faster_rcnn_inception_v2': + frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor, + 'faster_rcnn_resnet50': + frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, + 'faster_rcnn_resnet101': + frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, + 'faster_rcnn_resnet152': + frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor, + } + + FEATURE_EXTRACTOR_MAPS = [ + SSD_FEATURE_EXTRACTOR_CLASS_MAP, + FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP + ] + + +def _check_feature_extractor_exists(feature_extractor_type): + feature_extractors = set().union(*FEATURE_EXTRACTOR_MAPS) + if feature_extractor_type not in feature_extractors: + raise ValueError('{} is not supported. See `model_builder.py` for features ' + 'extractors compatible with different versions of ' + 'Tensorflow'.format(feature_extractor_type)) + + +def _build_ssd_feature_extractor(feature_extractor_config, + is_training, + freeze_batchnorm, + reuse_weights=None): + """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. + + Args: + feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. + is_training: True if this feature extractor is being built for training. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + reuse_weights: if the feature extractor should reuse weights. + + Returns: + ssd_meta_arch.SSDFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + feature_type = feature_extractor_config.type + depth_multiplier = feature_extractor_config.depth_multiplier + min_depth = feature_extractor_config.min_depth + pad_to_multiple = feature_extractor_config.pad_to_multiple + use_explicit_padding = feature_extractor_config.use_explicit_padding + use_depthwise = feature_extractor_config.use_depthwise + + is_keras = tf_version.is_tf2() + if is_keras: + conv_hyperparams = hyperparams_builder.KerasLayerHyperparams( + feature_extractor_config.conv_hyperparams) + else: + conv_hyperparams = hyperparams_builder.build( + feature_extractor_config.conv_hyperparams, is_training) + override_base_feature_extractor_hyperparams = ( + feature_extractor_config.override_base_feature_extractor_hyperparams) + + if not is_keras and feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type)) + + if is_keras: + feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ + feature_type] + else: + feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type] + kwargs = { + 'is_training': + is_training, + 'depth_multiplier': + depth_multiplier, + 'min_depth': + min_depth, + 'pad_to_multiple': + pad_to_multiple, + 'use_explicit_padding': + use_explicit_padding, + 'use_depthwise': + use_depthwise, + 'override_base_feature_extractor_hyperparams': + override_base_feature_extractor_hyperparams + } + + if feature_extractor_config.HasField('replace_preprocessor_with_placeholder'): + kwargs.update({ + 'replace_preprocessor_with_placeholder': + feature_extractor_config.replace_preprocessor_with_placeholder + }) + + if feature_extractor_config.HasField('num_layers'): + kwargs.update({'num_layers': feature_extractor_config.num_layers}) + + if is_keras: + kwargs.update({ + 'conv_hyperparams': conv_hyperparams, + 'inplace_batchnorm_update': False, + 'freeze_batchnorm': freeze_batchnorm + }) + else: + kwargs.update({ + 'conv_hyperparams_fn': conv_hyperparams, + 'reuse_weights': reuse_weights, + }) + + + if feature_extractor_config.HasField('fpn'): + kwargs.update({ + 'fpn_min_level': + feature_extractor_config.fpn.min_level, + 'fpn_max_level': + feature_extractor_config.fpn.max_level, + 'additional_layer_depth': + feature_extractor_config.fpn.additional_layer_depth, + }) + + if feature_extractor_config.HasField('bifpn'): + kwargs.update({ + 'bifpn_min_level': feature_extractor_config.bifpn.min_level, + 'bifpn_max_level': feature_extractor_config.bifpn.max_level, + 'bifpn_num_iterations': feature_extractor_config.bifpn.num_iterations, + 'bifpn_num_filters': feature_extractor_config.bifpn.num_filters, + 'bifpn_combine_method': feature_extractor_config.bifpn.combine_method, + }) + + return feature_extractor_class(**kwargs) + + +def _build_ssd_model(ssd_config, is_training, add_summaries): + """Builds an SSD detection model based on the model config. + + Args: + ssd_config: A ssd.proto object containing the config for the desired + SSDMetaArch. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tf summaries in the model. + Returns: + SSDMetaArch based on the config. + + Raises: + ValueError: If ssd_config.type is not recognized (i.e. not registered in + model_class_map). + """ + num_classes = ssd_config.num_classes + _check_feature_extractor_exists(ssd_config.feature_extractor.type) + + # Feature extractor + feature_extractor = _build_ssd_feature_extractor( + feature_extractor_config=ssd_config.feature_extractor, + freeze_batchnorm=ssd_config.freeze_batchnorm, + is_training=is_training) + + box_coder = box_coder_builder.build(ssd_config.box_coder) + matcher = matcher_builder.build(ssd_config.matcher) + region_similarity_calculator = sim_calc.build( + ssd_config.similarity_calculator) + encode_background_as_zeros = ssd_config.encode_background_as_zeros + negative_class_weight = ssd_config.negative_class_weight + anchor_generator = anchor_generator_builder.build( + ssd_config.anchor_generator) + if feature_extractor.is_keras_model: + ssd_box_predictor = box_predictor_builder.build_keras( + hyperparams_fn=hyperparams_builder.KerasLayerHyperparams, + freeze_batchnorm=ssd_config.freeze_batchnorm, + inplace_batchnorm_update=False, + num_predictions_per_location_list=anchor_generator + .num_anchors_per_location(), + box_predictor_config=ssd_config.box_predictor, + is_training=is_training, + num_classes=num_classes, + add_background_class=ssd_config.add_background_class) + else: + ssd_box_predictor = box_predictor_builder.build( + hyperparams_builder.build, ssd_config.box_predictor, is_training, + num_classes, ssd_config.add_background_class) + image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) + non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( + ssd_config.post_processing) + (classification_loss, localization_loss, classification_weight, + localization_weight, hard_example_miner, random_example_sampler, + expected_loss_weights_fn) = losses_builder.build(ssd_config.loss) + normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches + normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize + + equalization_loss_config = ops.EqualizationLossConfig( + weight=ssd_config.loss.equalization_loss.weight, + exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes) + + target_assigner_instance = target_assigner.TargetAssigner( + region_similarity_calculator, + matcher, + box_coder, + negative_class_weight=negative_class_weight) + + ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch + kwargs = {} + + return ssd_meta_arch_fn( + is_training=is_training, + anchor_generator=anchor_generator, + box_predictor=ssd_box_predictor, + box_coder=box_coder, + feature_extractor=feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=score_conversion_fn, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_weight, + localization_loss_weight=localization_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=hard_example_miner, + target_assigner_instance=target_assigner_instance, + add_summaries=add_summaries, + normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, + freeze_batchnorm=ssd_config.freeze_batchnorm, + inplace_batchnorm_update=ssd_config.inplace_batchnorm_update, + add_background_class=ssd_config.add_background_class, + explicit_background_class=ssd_config.explicit_background_class, + random_example_sampler=random_example_sampler, + expected_loss_weights_fn=expected_loss_weights_fn, + use_confidences_as_targets=ssd_config.use_confidences_as_targets, + implicit_example_weight=ssd_config.implicit_example_weight, + equalization_loss_config=equalization_loss_config, + return_raw_detections_during_predict=( + ssd_config.return_raw_detections_during_predict), + **kwargs) + + +def _build_faster_rcnn_feature_extractor( + feature_extractor_config, is_training, reuse_weights=True, + inplace_batchnorm_update=False): + """Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. + + Args: + feature_extractor_config: A FasterRcnnFeatureExtractor proto config from + faster_rcnn.proto. + is_training: True if this feature extractor is being built for training. + reuse_weights: if the feature extractor should reuse weights. + inplace_batchnorm_update: Whether to update batch_norm inplace during + training. This is required for batch norm to work correctly on TPUs. When + this is false, user must add a control dependency on + tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch + norm moving average parameters. + + Returns: + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + if inplace_batchnorm_update: + raise ValueError('inplace batchnorm updates not supported.') + feature_type = feature_extractor_config.type + first_stage_features_stride = ( + feature_extractor_config.first_stage_features_stride) + batch_norm_trainable = feature_extractor_config.batch_norm_trainable + + if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( + feature_type)) + feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[ + feature_type] + return feature_extractor_class( + is_training, first_stage_features_stride, + batch_norm_trainable, reuse_weights=reuse_weights) + + +def _build_faster_rcnn_keras_feature_extractor( + feature_extractor_config, is_training, + inplace_batchnorm_update=False): + """Builds a faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor from config. + + Args: + feature_extractor_config: A FasterRcnnFeatureExtractor proto config from + faster_rcnn.proto. + is_training: True if this feature extractor is being built for training. + inplace_batchnorm_update: Whether to update batch_norm inplace during + training. This is required for batch norm to work correctly on TPUs. When + this is false, user must add a control dependency on + tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch + norm moving average parameters. + + Returns: + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor based on config. + + Raises: + ValueError: On invalid feature extractor type. + """ + if inplace_batchnorm_update: + raise ValueError('inplace batchnorm updates not supported.') + feature_type = feature_extractor_config.type + first_stage_features_stride = ( + feature_extractor_config.first_stage_features_stride) + batch_norm_trainable = feature_extractor_config.batch_norm_trainable + + if feature_type not in FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP: + raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( + feature_type)) + feature_extractor_class = FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ + feature_type] + + kwargs = {} + + if feature_extractor_config.HasField('conv_hyperparams'): + kwargs.update({ + 'conv_hyperparams': + hyperparams_builder.KerasLayerHyperparams( + feature_extractor_config.conv_hyperparams), + 'override_base_feature_extractor_hyperparams': + feature_extractor_config.override_base_feature_extractor_hyperparams + }) + + if feature_extractor_config.HasField('fpn'): + kwargs.update({ + 'fpn_min_level': + feature_extractor_config.fpn.min_level, + 'fpn_max_level': + feature_extractor_config.fpn.max_level, + 'additional_layer_depth': + feature_extractor_config.fpn.additional_layer_depth, + }) + + return feature_extractor_class( + is_training, first_stage_features_stride, + batch_norm_trainable, **kwargs) + + +def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries): + """Builds a Faster R-CNN or R-FCN detection model based on the model config. + + Builds R-FCN model if the second_stage_box_predictor in the config is of type + `rfcn_box_predictor` else builds a Faster R-CNN model. + + Args: + frcnn_config: A faster_rcnn.proto object containing the config for the + desired FasterRCNNMetaArch or RFCNMetaArch. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tf summaries in the model. + + Returns: + FasterRCNNMetaArch based on the config. + + Raises: + ValueError: If frcnn_config.type is not recognized (i.e. not registered in + model_class_map). + """ + num_classes = frcnn_config.num_classes + image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) + _check_feature_extractor_exists(frcnn_config.feature_extractor.type) + is_keras = tf_version.is_tf2() + + if is_keras: + feature_extractor = _build_faster_rcnn_keras_feature_extractor( + frcnn_config.feature_extractor, is_training, + inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) + else: + feature_extractor = _build_faster_rcnn_feature_extractor( + frcnn_config.feature_extractor, is_training, + inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) + + number_of_stages = frcnn_config.number_of_stages + first_stage_anchor_generator = anchor_generator_builder.build( + frcnn_config.first_stage_anchor_generator) + + first_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'proposal', + use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) + first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate + if is_keras: + first_stage_box_predictor_arg_scope_fn = ( + hyperparams_builder.KerasLayerHyperparams( + frcnn_config.first_stage_box_predictor_conv_hyperparams)) + else: + first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build( + frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) + first_stage_box_predictor_kernel_size = ( + frcnn_config.first_stage_box_predictor_kernel_size) + first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth + first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size + use_static_shapes = frcnn_config.use_static_shapes and ( + frcnn_config.use_static_shapes_for_eval or is_training) + first_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=frcnn_config.first_stage_positive_balance_fraction, + is_static=(frcnn_config.use_static_balanced_label_sampler and + use_static_shapes)) + first_stage_max_proposals = frcnn_config.first_stage_max_proposals + if (frcnn_config.first_stage_nms_iou_threshold < 0 or + frcnn_config.first_stage_nms_iou_threshold > 1.0): + raise ValueError('iou_threshold not in [0, 1.0].') + if (is_training and frcnn_config.second_stage_batch_size > + first_stage_max_proposals): + raise ValueError('second_stage_batch_size should be no greater than ' + 'first_stage_max_proposals.') + first_stage_non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=frcnn_config.first_stage_nms_score_threshold, + iou_thresh=frcnn_config.first_stage_nms_iou_threshold, + max_size_per_class=frcnn_config.first_stage_max_proposals, + max_total_size=frcnn_config.first_stage_max_proposals, + use_static_shapes=use_static_shapes, + use_partitioned_nms=frcnn_config.use_partitioned_nms_in_first_stage, + use_combined_nms=frcnn_config.use_combined_nms_in_first_stage) + first_stage_loc_loss_weight = ( + frcnn_config.first_stage_localization_loss_weight) + first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight + + initial_crop_size = frcnn_config.initial_crop_size + maxpool_kernel_size = frcnn_config.maxpool_kernel_size + maxpool_stride = frcnn_config.maxpool_stride + + second_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'detection', + use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) + if is_keras: + second_stage_box_predictor = box_predictor_builder.build_keras( + hyperparams_builder.KerasLayerHyperparams, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[1], + box_predictor_config=frcnn_config.second_stage_box_predictor, + is_training=is_training, + num_classes=num_classes) + else: + second_stage_box_predictor = box_predictor_builder.build( + hyperparams_builder.build, + frcnn_config.second_stage_box_predictor, + is_training=is_training, + num_classes=num_classes) + second_stage_batch_size = frcnn_config.second_stage_batch_size + second_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=frcnn_config.second_stage_balance_fraction, + is_static=(frcnn_config.use_static_balanced_label_sampler and + use_static_shapes)) + (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn + ) = post_processing_builder.build(frcnn_config.second_stage_post_processing) + second_stage_localization_loss_weight = ( + frcnn_config.second_stage_localization_loss_weight) + second_stage_classification_loss = ( + losses_builder.build_faster_rcnn_classification_loss( + frcnn_config.second_stage_classification_loss)) + second_stage_classification_loss_weight = ( + frcnn_config.second_stage_classification_loss_weight) + second_stage_mask_prediction_loss_weight = ( + frcnn_config.second_stage_mask_prediction_loss_weight) + + hard_example_miner = None + if frcnn_config.HasField('hard_example_miner'): + hard_example_miner = losses_builder.build_hard_example_miner( + frcnn_config.hard_example_miner, + second_stage_classification_loss_weight, + second_stage_localization_loss_weight) + + crop_and_resize_fn = ( + spatial_ops.multilevel_matmul_crop_and_resize + if frcnn_config.use_matmul_crop_and_resize + else spatial_ops.multilevel_native_crop_and_resize) + clip_anchors_to_image = ( + frcnn_config.clip_anchors_to_image) + + common_kwargs = { + 'is_training': + is_training, + 'num_classes': + num_classes, + 'image_resizer_fn': + image_resizer_fn, + 'feature_extractor': + feature_extractor, + 'number_of_stages': + number_of_stages, + 'first_stage_anchor_generator': + first_stage_anchor_generator, + 'first_stage_target_assigner': + first_stage_target_assigner, + 'first_stage_atrous_rate': + first_stage_atrous_rate, + 'first_stage_box_predictor_arg_scope_fn': + first_stage_box_predictor_arg_scope_fn, + 'first_stage_box_predictor_kernel_size': + first_stage_box_predictor_kernel_size, + 'first_stage_box_predictor_depth': + first_stage_box_predictor_depth, + 'first_stage_minibatch_size': + first_stage_minibatch_size, + 'first_stage_sampler': + first_stage_sampler, + 'first_stage_non_max_suppression_fn': + first_stage_non_max_suppression_fn, + 'first_stage_max_proposals': + first_stage_max_proposals, + 'first_stage_localization_loss_weight': + first_stage_loc_loss_weight, + 'first_stage_objectness_loss_weight': + first_stage_obj_loss_weight, + 'second_stage_target_assigner': + second_stage_target_assigner, + 'second_stage_batch_size': + second_stage_batch_size, + 'second_stage_sampler': + second_stage_sampler, + 'second_stage_non_max_suppression_fn': + second_stage_non_max_suppression_fn, + 'second_stage_score_conversion_fn': + second_stage_score_conversion_fn, + 'second_stage_localization_loss_weight': + second_stage_localization_loss_weight, + 'second_stage_classification_loss': + second_stage_classification_loss, + 'second_stage_classification_loss_weight': + second_stage_classification_loss_weight, + 'hard_example_miner': + hard_example_miner, + 'add_summaries': + add_summaries, + 'crop_and_resize_fn': + crop_and_resize_fn, + 'clip_anchors_to_image': + clip_anchors_to_image, + 'use_static_shapes': + use_static_shapes, + 'resize_masks': + frcnn_config.resize_masks, + 'return_raw_detections_during_predict': + frcnn_config.return_raw_detections_during_predict, + 'output_final_box_features': + frcnn_config.output_final_box_features + } + + if ((not is_keras and isinstance(second_stage_box_predictor, + rfcn_box_predictor.RfcnBoxPredictor)) or + (is_keras and + isinstance(second_stage_box_predictor, + rfcn_keras_box_predictor.RfcnKerasBoxPredictor))): + return rfcn_meta_arch.RFCNMetaArch( + second_stage_rfcn_box_predictor=second_stage_box_predictor, + **common_kwargs) + elif frcnn_config.HasField('context_config'): + context_config = frcnn_config.context_config + common_kwargs.update({ + 'attention_bottleneck_dimension': + context_config.attention_bottleneck_dimension, + 'attention_temperature': + context_config.attention_temperature + }) + return context_rcnn_meta_arch.ContextRCNNMetaArch( + initial_crop_size=initial_crop_size, + maxpool_kernel_size=maxpool_kernel_size, + maxpool_stride=maxpool_stride, + second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, + second_stage_mask_prediction_loss_weight=( + second_stage_mask_prediction_loss_weight), + **common_kwargs) + else: + return faster_rcnn_meta_arch.FasterRCNNMetaArch( + initial_crop_size=initial_crop_size, + maxpool_kernel_size=maxpool_kernel_size, + maxpool_stride=maxpool_stride, + second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, + second_stage_mask_prediction_loss_weight=( + second_stage_mask_prediction_loss_weight), + **common_kwargs) + +EXPERIMENTAL_META_ARCH_BUILDER_MAP = { +} + + +def _build_experimental_model(config, is_training, add_summaries=True): + return EXPERIMENTAL_META_ARCH_BUILDER_MAP[config.name]( + is_training, add_summaries) + + +# The class ID in the groundtruth/model architecture is usually 0-based while +# the ID in the label map is 1-based. The offset is used to convert between the +# the two. +CLASS_ID_OFFSET = 1 +KEYPOINT_STD_DEV_DEFAULT = 1.0 + + +def keypoint_proto_to_params(kp_config, keypoint_map_dict): + """Converts CenterNet.KeypointEstimation proto to parameter namedtuple.""" + label_map_item = keypoint_map_dict[kp_config.keypoint_class_name] + + classification_loss, localization_loss, _, _, _, _, _ = ( + losses_builder.build(kp_config.loss)) + + keypoint_indices = [ + keypoint.id for keypoint in label_map_item.keypoints + ] + keypoint_labels = [ + keypoint.label for keypoint in label_map_item.keypoints + ] + keypoint_std_dev_dict = { + label: KEYPOINT_STD_DEV_DEFAULT for label in keypoint_labels + } + if kp_config.keypoint_label_to_std: + for label, value in kp_config.keypoint_label_to_std.items(): + keypoint_std_dev_dict[label] = value + keypoint_std_dev = [keypoint_std_dev_dict[label] for label in keypoint_labels] + return center_net_meta_arch.KeypointEstimationParams( + task_name=kp_config.task_name, + class_id=label_map_item.id - CLASS_ID_OFFSET, + keypoint_indices=keypoint_indices, + classification_loss=classification_loss, + localization_loss=localization_loss, + keypoint_labels=keypoint_labels, + keypoint_std_dev=keypoint_std_dev, + task_loss_weight=kp_config.task_loss_weight, + keypoint_regression_loss_weight=kp_config.keypoint_regression_loss_weight, + keypoint_heatmap_loss_weight=kp_config.keypoint_heatmap_loss_weight, + keypoint_offset_loss_weight=kp_config.keypoint_offset_loss_weight, + heatmap_bias_init=kp_config.heatmap_bias_init, + keypoint_candidate_score_threshold=( + kp_config.keypoint_candidate_score_threshold), + num_candidates_per_keypoint=kp_config.num_candidates_per_keypoint, + peak_max_pool_kernel_size=kp_config.peak_max_pool_kernel_size, + unmatched_keypoint_score=kp_config.unmatched_keypoint_score, + box_scale=kp_config.box_scale, + candidate_search_scale=kp_config.candidate_search_scale, + candidate_ranking_mode=kp_config.candidate_ranking_mode, + offset_peak_radius=kp_config.offset_peak_radius, + per_keypoint_offset=kp_config.per_keypoint_offset) + + +def object_detection_proto_to_params(od_config): + """Converts CenterNet.ObjectDetection proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy classification loss to avoid the loss_builder throwing error. + # TODO(yuhuic): update the loss builder to take the classification loss + # directly. + loss.classification_loss.weighted_sigmoid.CopyFrom( + losses_pb2.WeightedSigmoidClassificationLoss()) + loss.localization_loss.CopyFrom(od_config.localization_loss) + _, localization_loss, _, _, _, _, _ = (losses_builder.build(loss)) + return center_net_meta_arch.ObjectDetectionParams( + localization_loss=localization_loss, + scale_loss_weight=od_config.scale_loss_weight, + offset_loss_weight=od_config.offset_loss_weight, + task_loss_weight=od_config.task_loss_weight) + + +def object_center_proto_to_params(oc_config): + """Converts CenterNet.ObjectCenter proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy localization loss to avoid the loss_builder throwing error. + # TODO(yuhuic): update the loss builder to take the localization loss + # directly. + loss.localization_loss.weighted_l2.CopyFrom( + losses_pb2.WeightedL2LocalizationLoss()) + loss.classification_loss.CopyFrom(oc_config.classification_loss) + classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) + return center_net_meta_arch.ObjectCenterParams( + classification_loss=classification_loss, + object_center_loss_weight=oc_config.object_center_loss_weight, + heatmap_bias_init=oc_config.heatmap_bias_init, + min_box_overlap_iou=oc_config.min_box_overlap_iou, + max_box_predictions=oc_config.max_box_predictions, + use_labeled_classes=oc_config.use_labeled_classes) + + +def mask_proto_to_params(mask_config): + """Converts CenterNet.MaskEstimation proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy localization loss to avoid the loss_builder throwing error. + loss.localization_loss.weighted_l2.CopyFrom( + losses_pb2.WeightedL2LocalizationLoss()) + loss.classification_loss.CopyFrom(mask_config.classification_loss) + classification_loss, _, _, _, _, _, _ = (losses_builder.build(loss)) + return center_net_meta_arch.MaskParams( + classification_loss=classification_loss, + task_loss_weight=mask_config.task_loss_weight, + mask_height=mask_config.mask_height, + mask_width=mask_config.mask_width, + score_threshold=mask_config.score_threshold, + heatmap_bias_init=mask_config.heatmap_bias_init) + + +def densepose_proto_to_params(densepose_config): + """Converts CenterNet.DensePoseEstimation proto to parameter namedtuple.""" + classification_loss, localization_loss, _, _, _, _, _ = ( + losses_builder.build(densepose_config.loss)) + return center_net_meta_arch.DensePoseParams( + class_id=densepose_config.class_id, + classification_loss=classification_loss, + localization_loss=localization_loss, + part_loss_weight=densepose_config.part_loss_weight, + coordinate_loss_weight=densepose_config.coordinate_loss_weight, + num_parts=densepose_config.num_parts, + task_loss_weight=densepose_config.task_loss_weight, + upsample_to_input_res=densepose_config.upsample_to_input_res, + heatmap_bias_init=densepose_config.heatmap_bias_init) + + +def tracking_proto_to_params(tracking_config): + """Converts CenterNet.TrackEstimation proto to parameter namedtuple.""" + loss = losses_pb2.Loss() + # Add dummy localization loss to avoid the loss_builder throwing error. + # TODO(yuhuic): update the loss builder to take the localization loss + # directly. + loss.localization_loss.weighted_l2.CopyFrom( + losses_pb2.WeightedL2LocalizationLoss()) + loss.classification_loss.CopyFrom(tracking_config.classification_loss) + classification_loss, _, _, _, _, _, _ = losses_builder.build(loss) + return center_net_meta_arch.TrackParams( + num_track_ids=tracking_config.num_track_ids, + reid_embed_size=tracking_config.reid_embed_size, + classification_loss=classification_loss, + num_fc_layers=tracking_config.num_fc_layers, + task_loss_weight=tracking_config.task_loss_weight) + + +def temporal_offset_proto_to_params(temporal_offset_config): + """Converts CenterNet.TemporalOffsetEstimation proto to param-tuple.""" + loss = losses_pb2.Loss() + # Add dummy classification loss to avoid the loss_builder throwing error. + # TODO(yuhuic): update the loss builder to take the classification loss + # directly. + loss.classification_loss.weighted_sigmoid.CopyFrom( + losses_pb2.WeightedSigmoidClassificationLoss()) + loss.localization_loss.CopyFrom(temporal_offset_config.localization_loss) + _, localization_loss, _, _, _, _, _ = losses_builder.build(loss) + return center_net_meta_arch.TemporalOffsetParams( + localization_loss=localization_loss, + task_loss_weight=temporal_offset_config.task_loss_weight) + + +def _build_center_net_model(center_net_config, is_training, add_summaries): + """Build a CenterNet detection model. + + Args: + center_net_config: A CenterNet proto object with model configuration. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tf summaries in the model. + + Returns: + CenterNetMetaArch based on the config. + + """ + + image_resizer_fn = image_resizer_builder.build( + center_net_config.image_resizer) + _check_feature_extractor_exists(center_net_config.feature_extractor.type) + feature_extractor = _build_center_net_feature_extractor( + center_net_config.feature_extractor) + object_center_params = object_center_proto_to_params( + center_net_config.object_center_params) + + object_detection_params = None + if center_net_config.HasField('object_detection_task'): + object_detection_params = object_detection_proto_to_params( + center_net_config.object_detection_task) + + keypoint_params_dict = None + if center_net_config.keypoint_estimation_task: + label_map_proto = label_map_util.load_labelmap( + center_net_config.keypoint_label_map_path) + keypoint_map_dict = { + item.name: item for item in label_map_proto.item if item.keypoints + } + keypoint_params_dict = {} + keypoint_class_id_set = set() + all_keypoint_indices = [] + for task in center_net_config.keypoint_estimation_task: + kp_params = keypoint_proto_to_params(task, keypoint_map_dict) + keypoint_params_dict[task.task_name] = kp_params + all_keypoint_indices.extend(kp_params.keypoint_indices) + if kp_params.class_id in keypoint_class_id_set: + raise ValueError(('Multiple keypoint tasks map to the same class id is ' + 'not allowed: %d' % kp_params.class_id)) + else: + keypoint_class_id_set.add(kp_params.class_id) + if len(all_keypoint_indices) > len(set(all_keypoint_indices)): + raise ValueError('Some keypoint indices are used more than once.') + + mask_params = None + if center_net_config.HasField('mask_estimation_task'): + mask_params = mask_proto_to_params(center_net_config.mask_estimation_task) + + densepose_params = None + if center_net_config.HasField('densepose_estimation_task'): + densepose_params = densepose_proto_to_params( + center_net_config.densepose_estimation_task) + + track_params = None + if center_net_config.HasField('track_estimation_task'): + track_params = tracking_proto_to_params( + center_net_config.track_estimation_task) + + temporal_offset_params = None + if center_net_config.HasField('temporal_offset_task'): + temporal_offset_params = temporal_offset_proto_to_params( + center_net_config.temporal_offset_task) + + return center_net_meta_arch.CenterNetMetaArch( + is_training=is_training, + add_summaries=add_summaries, + num_classes=center_net_config.num_classes, + feature_extractor=feature_extractor, + image_resizer_fn=image_resizer_fn, + object_center_params=object_center_params, + object_detection_params=object_detection_params, + keypoint_params_dict=keypoint_params_dict, + mask_params=mask_params, + densepose_params=densepose_params, + track_params=track_params, + temporal_offset_params=temporal_offset_params, + use_depthwise=center_net_config.use_depthwise, + compute_heatmap_sparse=center_net_config.compute_heatmap_sparse) + + +def _build_center_net_feature_extractor( + feature_extractor_config): + """Build a CenterNet feature extractor from the given config.""" + + if feature_extractor_config.type not in CENTER_NET_EXTRACTOR_FUNCTION_MAP: + raise ValueError('\'{}\' is not a known CenterNet feature extractor type' + .format(feature_extractor_config.type)) + + return CENTER_NET_EXTRACTOR_FUNCTION_MAP[feature_extractor_config.type]( + channel_means=list(feature_extractor_config.channel_means), + channel_stds=list(feature_extractor_config.channel_stds), + bgr_ordering=feature_extractor_config.bgr_ordering + ) + + +META_ARCH_BUILDER_MAP = { + 'ssd': _build_ssd_model, + 'faster_rcnn': _build_faster_rcnn_model, + 'experimental_model': _build_experimental_model, + 'center_net': _build_center_net_model +} + + +def build(model_config, is_training, add_summaries=True): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + is_training: True if this model is being built for training purposes. + add_summaries: Whether to add tensorflow summaries in the model graph. + Returns: + DetectionModel based on the config. + + Raises: + ValueError: On invalid meta architecture or model. + """ + if not isinstance(model_config, model_pb2.DetectionModel): + raise ValueError('model_config not of type model_pb2.DetectionModel.') + + meta_architecture = model_config.WhichOneof('model') + + if meta_architecture not in META_ARCH_BUILDER_MAP: + raise ValueError('Unknown meta architecture: {}'.format(meta_architecture)) + else: + build_func = META_ARCH_BUILDER_MAP[meta_architecture] + return build_func(getattr(model_config, meta_architecture), is_training, + add_summaries) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e992de3ff1ffee0ed8da62da7b4cc09cbc767326 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..34d3a06c3adc50f8dd8561d88cd2eb7b47902bba --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_test.py @@ -0,0 +1,356 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.models.model_builder.""" + +from absl.testing import parameterized + +from google.protobuf import text_format +from object_detection.builders import model_builder +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.meta_architectures import rfcn_meta_arch +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.protos import hyperparams_pb2 +from object_detection.protos import losses_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import test_case + + +class ModelBuilderTest(test_case.TestCase, parameterized.TestCase): + + def default_ssd_feature_extractor(self): + raise NotImplementedError + + def default_faster_rcnn_feature_extractor(self): + raise NotImplementedError + + def ssd_feature_extractors(self): + raise NotImplementedError + + def get_override_base_feature_extractor_hyperparams(self, extractor_type): + raise NotImplementedError + + def faster_rcnn_feature_extractors(self): + raise NotImplementedError + + def create_model(self, model_config, is_training=True): + """Builds a DetectionModel based on the model config. + + Args: + model_config: A model.proto object containing the config for the desired + DetectionModel. + is_training: True if this model is being built for training purposes. + + Returns: + DetectionModel based on the config. + """ + return model_builder.build(model_config, is_training=is_training) + + def create_default_ssd_model_proto(self): + """Creates a DetectionModel proto with ssd model fields populated.""" + model_text_proto = """ + ssd { + feature_extractor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + box_coder { + faster_rcnn_box_coder { + } + } + matcher { + argmax_matcher { + } + } + similarity_calculator { + iou_similarity { + } + } + anchor_generator { + ssd_anchor_generator { + aspect_ratios: 1.0 + } + } + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + box_predictor { + convolutional_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + loss { + classification_loss { + weighted_softmax { + } + } + localization_loss { + weighted_smooth_l1 { + } + } + } + }""" + model_proto = model_pb2.DetectionModel() + text_format.Merge(model_text_proto, model_proto) + model_proto.ssd.feature_extractor.type = (self. + default_ssd_feature_extractor()) + return model_proto + + def create_default_faster_rcnn_model_proto(self): + """Creates a DetectionModel proto with FasterRCNN model fields populated.""" + model_text_proto = """ + faster_rcnn { + inplace_batchnorm_update: false + num_classes: 3 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 600 + max_dimension: 1024 + } + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + conv_hyperparams { + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + } + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.01 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + }""" + model_proto = model_pb2.DetectionModel() + text_format.Merge(model_text_proto, model_proto) + (model_proto.faster_rcnn.feature_extractor.type + ) = self.default_faster_rcnn_feature_extractor() + return model_proto + + def test_create_ssd_models_from_config(self): + model_proto = self.create_default_ssd_model_proto() + for extractor_type, extractor_class in self.ssd_feature_extractors().items( + ): + model_proto.ssd.feature_extractor.type = extractor_type + model_proto.ssd.feature_extractor.override_base_feature_extractor_hyperparams = ( + self.get_override_base_feature_extractor_hyperparams(extractor_type)) + model = model_builder.build(model_proto, is_training=True) + self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) + self.assertIsInstance(model._feature_extractor, extractor_class) + + def test_create_ssd_fpn_model_from_config(self): + model_proto = self.create_default_ssd_model_proto() + model_proto.ssd.feature_extractor.fpn.min_level = 3 + model_proto.ssd.feature_extractor.fpn.max_level = 7 + model = model_builder.build(model_proto, is_training=True) + self.assertEqual(model._feature_extractor._fpn_min_level, 3) + self.assertEqual(model._feature_extractor._fpn_max_level, 7) + + + @parameterized.named_parameters( + { + 'testcase_name': 'mask_rcnn_with_matmul', + 'use_matmul_crop_and_resize': False, + 'enable_mask_prediction': True + }, + { + 'testcase_name': 'mask_rcnn_without_matmul', + 'use_matmul_crop_and_resize': True, + 'enable_mask_prediction': True + }, + { + 'testcase_name': 'faster_rcnn_with_matmul', + 'use_matmul_crop_and_resize': False, + 'enable_mask_prediction': False + }, + { + 'testcase_name': 'faster_rcnn_without_matmul', + 'use_matmul_crop_and_resize': True, + 'enable_mask_prediction': False + }, + ) + def test_create_faster_rcnn_models_from_config(self, + use_matmul_crop_and_resize, + enable_mask_prediction): + model_proto = self.create_default_faster_rcnn_model_proto() + faster_rcnn_config = model_proto.faster_rcnn + faster_rcnn_config.use_matmul_crop_and_resize = use_matmul_crop_and_resize + if enable_mask_prediction: + faster_rcnn_config.second_stage_mask_prediction_loss_weight = 3.0 + mask_predictor_config = ( + faster_rcnn_config.second_stage_box_predictor.mask_rcnn_box_predictor) + mask_predictor_config.predict_instance_masks = True + + for extractor_type, extractor_class in ( + self.faster_rcnn_feature_extractors().items()): + faster_rcnn_config.feature_extractor.type = extractor_type + model = model_builder.build(model_proto, is_training=True) + self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) + self.assertIsInstance(model._feature_extractor, extractor_class) + if enable_mask_prediction: + self.assertAlmostEqual(model._second_stage_mask_loss_weight, 3.0) + + def test_create_faster_rcnn_model_from_config_with_example_miner(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.hard_example_miner.num_hard_examples = 64 + model = model_builder.build(model_proto, is_training=True) + self.assertIsNotNone(model._hard_example_miner) + + def test_create_rfcn_model_from_config(self): + model_proto = self.create_default_faster_rcnn_model_proto() + rfcn_predictor_config = ( + model_proto.faster_rcnn.second_stage_box_predictor.rfcn_box_predictor) + rfcn_predictor_config.conv_hyperparams.op = hyperparams_pb2.Hyperparams.CONV + for extractor_type, extractor_class in ( + self.faster_rcnn_feature_extractors().items()): + model_proto.faster_rcnn.feature_extractor.type = extractor_type + model = model_builder.build(model_proto, is_training=True) + self.assertIsInstance(model, rfcn_meta_arch.RFCNMetaArch) + self.assertIsInstance(model._feature_extractor, extractor_class) + + @parameterized.parameters(True, False) + def test_create_faster_rcnn_from_config_with_crop_feature( + self, output_final_box_features): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.output_final_box_features = ( + output_final_box_features) + _ = model_builder.build(model_proto, is_training=True) + + def test_invalid_model_config_proto(self): + model_proto = '' + with self.assertRaisesRegex( + ValueError, 'model_config not of type model_pb2.DetectionModel.'): + model_builder.build(model_proto, is_training=True) + + def test_unknown_meta_architecture(self): + model_proto = model_pb2.DetectionModel() + with self.assertRaisesRegex(ValueError, 'Unknown meta architecture'): + model_builder.build(model_proto, is_training=True) + + def test_unknown_ssd_feature_extractor(self): + model_proto = self.create_default_ssd_model_proto() + model_proto.ssd.feature_extractor.type = 'unknown_feature_extractor' + with self.assertRaises(ValueError): + model_builder.build(model_proto, is_training=True) + + def test_unknown_faster_rcnn_feature_extractor(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.feature_extractor.type = 'unknown_feature_extractor' + with self.assertRaises(ValueError): + model_builder.build(model_proto, is_training=True) + + def test_invalid_first_stage_nms_iou_threshold(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.first_stage_nms_iou_threshold = 1.1 + with self.assertRaisesRegex(ValueError, + r'iou_threshold not in \[0, 1\.0\]'): + model_builder.build(model_proto, is_training=True) + model_proto.faster_rcnn.first_stage_nms_iou_threshold = -0.1 + with self.assertRaisesRegex(ValueError, + r'iou_threshold not in \[0, 1\.0\]'): + model_builder.build(model_proto, is_training=True) + + def test_invalid_second_stage_batch_size(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.first_stage_max_proposals = 1 + model_proto.faster_rcnn.second_stage_batch_size = 2 + with self.assertRaisesRegex( + ValueError, 'second_stage_batch_size should be no greater ' + 'than first_stage_max_proposals.'): + model_builder.build(model_proto, is_training=True) + + def test_invalid_faster_rcnn_batchnorm_update(self): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.inplace_batchnorm_update = True + with self.assertRaisesRegex(ValueError, + 'inplace batchnorm updates not supported'): + model_builder.build(model_proto, is_training=True) + + def test_create_experimental_model(self): + + model_text_proto = """ + experimental_model { + name: 'model42' + }""" + + build_func = lambda *args: 42 + model_builder.EXPERIMENTAL_META_ARCH_BUILDER_MAP['model42'] = build_func + model_proto = model_pb2.DetectionModel() + text_format.Merge(model_text_proto, model_proto) + + self.assertEqual(model_builder.build(model_proto, is_training=True), 42) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b8877a5f6e4e3d4a33fb9e3973384ef0685fc56d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_tf1_test.py @@ -0,0 +1,58 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for model_builder under TensorFlow 1.X.""" +import unittest +from absl.testing import parameterized +import tensorflow.compat.v1 as tf + +from object_detection.builders import model_builder +from object_detection.builders import model_builder_test +from object_detection.meta_architectures import context_rcnn_meta_arch +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.protos import losses_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest): + + def default_ssd_feature_extractor(self): + return 'ssd_resnet50_v1_fpn' + + def default_faster_rcnn_feature_extractor(self): + return 'faster_rcnn_resnet101' + + def ssd_feature_extractors(self): + return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP + + def get_override_base_feature_extractor_hyperparams(self, extractor_type): + return extractor_type in {'ssd_inception_v2', 'ssd_inception_v3'} + + def faster_rcnn_feature_extractors(self): + return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP + + + @parameterized.parameters(True, False) + def test_create_context_rcnn_from_config_with_params(self, is_training): + model_proto = self.create_default_faster_rcnn_model_proto() + model_proto.faster_rcnn.context_config.attention_bottleneck_dimension = 10 + model_proto.faster_rcnn.context_config.attention_temperature = 0.5 + model = model_builder.build(model_proto, is_training=is_training) + self.assertIsInstance(model, context_rcnn_meta_arch.ContextRCNNMetaArch) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9cbefdc0f1f598b380570d0b0ab140c29855d8d0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/model_builder_tf2_test.py @@ -0,0 +1,303 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for model_builder under TensorFlow 2.X.""" + +import os +import unittest + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import model_builder +from object_detection.builders import model_builder_test +from object_detection.core import losses +from object_detection.models import center_net_resnet_feature_extractor +from object_detection.protos import center_net_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): + + def default_ssd_feature_extractor(self): + return 'ssd_resnet50_v1_fpn_keras' + + def default_faster_rcnn_feature_extractor(self): + return 'faster_rcnn_resnet101_keras' + + def ssd_feature_extractors(self): + return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP + + def get_override_base_feature_extractor_hyperparams(self, extractor_type): + return extractor_type in {} + + def faster_rcnn_feature_extractors(self): + return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP + + def get_fake_label_map_file_path(self): + keypoint_spec_text = """ + item { + name: "/m/01g317" + id: 1 + display_name: "person" + keypoints { + id: 0 + label: 'nose' + } + keypoints { + id: 1 + label: 'left_shoulder' + } + keypoints { + id: 2 + label: 'right_shoulder' + } + keypoints { + id: 3 + label: 'hip' + } + } + """ + keypoint_label_map_path = os.path.join( + self.get_temp_dir(), 'keypoint_label_map') + with tf.gfile.Open(keypoint_label_map_path, 'wb') as f: + f.write(keypoint_spec_text) + return keypoint_label_map_path + + def get_fake_keypoint_proto(self): + task_proto_txt = """ + task_name: "human_pose" + task_loss_weight: 0.9 + keypoint_regression_loss_weight: 1.0 + keypoint_heatmap_loss_weight: 0.1 + keypoint_offset_loss_weight: 0.5 + heatmap_bias_init: 2.14 + keypoint_class_name: "/m/01g317" + loss { + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 3.0 + beta: 4.0 + } + } + localization_loss { + l1_localization_loss { + } + } + } + keypoint_label_to_std { + key: "nose" + value: 0.3 + } + keypoint_label_to_std { + key: "hip" + value: 0.0 + } + keypoint_candidate_score_threshold: 0.3 + num_candidates_per_keypoint: 12 + peak_max_pool_kernel_size: 5 + unmatched_keypoint_score: 0.05 + box_scale: 1.7 + candidate_search_scale: 0.2 + candidate_ranking_mode: "score_distance_ratio" + offset_peak_radius: 3 + per_keypoint_offset: true + """ + config = text_format.Merge(task_proto_txt, + center_net_pb2.CenterNet.KeypointEstimation()) + return config + + def get_fake_object_center_proto(self): + proto_txt = """ + object_center_loss_weight: 0.5 + heatmap_bias_init: 3.14 + min_box_overlap_iou: 0.2 + max_box_predictions: 15 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 3.0 + beta: 4.0 + } + } + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.ObjectCenterParams()) + + def get_fake_object_detection_proto(self): + proto_txt = """ + task_loss_weight: 0.5 + offset_loss_weight: 0.1 + scale_loss_weight: 0.2 + localization_loss { + l1_localization_loss { + } + } + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.ObjectDetection()) + + def get_fake_mask_proto(self): + proto_txt = """ + task_loss_weight: 0.7 + classification_loss { + weighted_softmax {} + } + mask_height: 8 + mask_width: 8 + score_threshold: 0.7 + heatmap_bias_init: -2.0 + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.MaskEstimation()) + + def get_fake_densepose_proto(self): + proto_txt = """ + task_loss_weight: 0.5 + class_id: 0 + loss { + classification_loss { + weighted_softmax {} + } + localization_loss { + l1_localization_loss { + } + } + } + num_parts: 24 + part_loss_weight: 1.0 + coordinate_loss_weight: 2.0 + upsample_to_input_res: true + heatmap_bias_init: -2.0 + """ + return text_format.Merge(proto_txt, + center_net_pb2.CenterNet.DensePoseEstimation()) + + def test_create_center_net_model(self): + """Test building a CenterNet model from proto txt.""" + proto_txt = """ + center_net { + num_classes: 10 + feature_extractor { + type: "resnet_v2_101" + channel_stds: [4, 5, 6] + bgr_ordering: true + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + } + """ + # Set up the configuration proto. + config = text_format.Merge(proto_txt, model_pb2.DetectionModel()) + config.center_net.object_center_params.CopyFrom( + self.get_fake_object_center_proto()) + config.center_net.object_detection_task.CopyFrom( + self.get_fake_object_detection_proto()) + config.center_net.keypoint_estimation_task.append( + self.get_fake_keypoint_proto()) + config.center_net.keypoint_label_map_path = ( + self.get_fake_label_map_file_path()) + config.center_net.mask_estimation_task.CopyFrom( + self.get_fake_mask_proto()) + config.center_net.densepose_estimation_task.CopyFrom( + self.get_fake_densepose_proto()) + + # Build the model from the configuration. + model = model_builder.build(config, is_training=True) + + # Check object center related parameters. + self.assertEqual(model._num_classes, 10) + self.assertIsInstance(model._center_params.classification_loss, + losses.PenaltyReducedLogisticFocalLoss) + self.assertEqual(model._center_params.classification_loss._alpha, 3.0) + self.assertEqual(model._center_params.classification_loss._beta, 4.0) + self.assertAlmostEqual(model._center_params.min_box_overlap_iou, 0.2) + self.assertAlmostEqual( + model._center_params.heatmap_bias_init, 3.14, places=4) + self.assertEqual(model._center_params.max_box_predictions, 15) + + # Check object detection related parameters. + self.assertAlmostEqual(model._od_params.offset_loss_weight, 0.1) + self.assertAlmostEqual(model._od_params.scale_loss_weight, 0.2) + self.assertAlmostEqual(model._od_params.task_loss_weight, 0.5) + self.assertIsInstance(model._od_params.localization_loss, + losses.L1LocalizationLoss) + + # Check keypoint estimation related parameters. + kp_params = model._kp_params_dict['human_pose'] + self.assertAlmostEqual(kp_params.task_loss_weight, 0.9) + self.assertAlmostEqual(kp_params.keypoint_regression_loss_weight, 1.0) + self.assertAlmostEqual(kp_params.keypoint_offset_loss_weight, 0.5) + self.assertAlmostEqual(kp_params.heatmap_bias_init, 2.14, places=4) + self.assertEqual(kp_params.classification_loss._alpha, 3.0) + self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3]) + self.assertEqual(kp_params.keypoint_labels, + ['nose', 'left_shoulder', 'right_shoulder', 'hip']) + self.assertAllClose(kp_params.keypoint_std_dev, [0.3, 1.0, 1.0, 0.0]) + self.assertEqual(kp_params.classification_loss._beta, 4.0) + self.assertIsInstance(kp_params.localization_loss, + losses.L1LocalizationLoss) + self.assertAlmostEqual(kp_params.keypoint_candidate_score_threshold, 0.3) + self.assertEqual(kp_params.num_candidates_per_keypoint, 12) + self.assertEqual(kp_params.peak_max_pool_kernel_size, 5) + self.assertAlmostEqual(kp_params.unmatched_keypoint_score, 0.05) + self.assertAlmostEqual(kp_params.box_scale, 1.7) + self.assertAlmostEqual(kp_params.candidate_search_scale, 0.2) + self.assertEqual(kp_params.candidate_ranking_mode, 'score_distance_ratio') + self.assertEqual(kp_params.offset_peak_radius, 3) + self.assertEqual(kp_params.per_keypoint_offset, True) + + # Check mask related parameters. + self.assertAlmostEqual(model._mask_params.task_loss_weight, 0.7) + self.assertIsInstance(model._mask_params.classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertEqual(model._mask_params.mask_height, 8) + self.assertEqual(model._mask_params.mask_width, 8) + self.assertAlmostEqual(model._mask_params.score_threshold, 0.7) + self.assertAlmostEqual( + model._mask_params.heatmap_bias_init, -2.0, places=4) + + # Check DensePose related parameters. + self.assertEqual(model._densepose_params.class_id, 0) + self.assertIsInstance(model._densepose_params.classification_loss, + losses.WeightedSoftmaxClassificationLoss) + self.assertIsInstance(model._densepose_params.localization_loss, + losses.L1LocalizationLoss) + self.assertAlmostEqual(model._densepose_params.part_loss_weight, 1.0) + self.assertAlmostEqual(model._densepose_params.coordinate_loss_weight, 2.0) + self.assertEqual(model._densepose_params.num_parts, 24) + self.assertAlmostEqual(model._densepose_params.task_loss_weight, 0.5) + self.assertTrue(model._densepose_params.upsample_to_input_res) + self.assertEqual(model._densepose_params.upsample_method, 'bilinear') + self.assertAlmostEqual( + model._densepose_params.heatmap_bias_init, -2.0, places=4) + + # Check feature extractor parameters. + self.assertIsInstance( + model._feature_extractor, + center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor) + self.assertAllClose(model._feature_extractor._channel_means, [0, 0, 0]) + self.assertAllClose(model._feature_extractor._channel_stds, [4, 5, 6]) + self.assertTrue(model._feature_extractor._bgr_ordering) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..d602bad1292e222b5cbc532a873299dd918ef011 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder.py @@ -0,0 +1,205 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to build DetectionModel training optimizers.""" + +import tensorflow.compat.v1 as tf + +from object_detection.utils import learning_schedules + +try: + from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + + +def build_optimizers_tf_v1(optimizer_config, global_step=None): + """Create a TF v1 compatible optimizer based on config. + + Args: + optimizer_config: A Optimizer proto message. + global_step: A variable representing the current step. + If None, defaults to tf.train.get_or_create_global_step() + + Returns: + An optimizer and a list of variables for summary. + + Raises: + ValueError: when using an unsupported input data type. + """ + optimizer_type = optimizer_config.WhichOneof('optimizer') + optimizer = None + + summary_vars = [] + if optimizer_type == 'rms_prop_optimizer': + config = optimizer_config.rms_prop_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.train.RMSPropOptimizer( + learning_rate, + decay=config.decay, + momentum=config.momentum_optimizer_value, + epsilon=config.epsilon) + + if optimizer_type == 'momentum_optimizer': + config = optimizer_config.momentum_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.train.MomentumOptimizer( + learning_rate, + momentum=config.momentum_optimizer_value) + + if optimizer_type == 'adam_optimizer': + config = optimizer_config.adam_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon) + + + if optimizer is None: + raise ValueError('Optimizer %s not supported.' % optimizer_type) + + if optimizer_config.use_moving_average: + optimizer = tf_opt.MovingAverageOptimizer( + optimizer, average_decay=optimizer_config.moving_average_decay) + + return optimizer, summary_vars + + +def build_optimizers_tf_v2(optimizer_config, global_step=None): + """Create a TF v2 compatible optimizer based on config. + + Args: + optimizer_config: A Optimizer proto message. + global_step: A variable representing the current step. + If None, defaults to tf.train.get_or_create_global_step() + + Returns: + An optimizer and a list of variables for summary. + + Raises: + ValueError: when using an unsupported input data type. + """ + optimizer_type = optimizer_config.WhichOneof('optimizer') + optimizer = None + + summary_vars = [] + if optimizer_type == 'rms_prop_optimizer': + config = optimizer_config.rms_prop_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.keras.optimizers.RMSprop( + learning_rate, + decay=config.decay, + momentum=config.momentum_optimizer_value, + epsilon=config.epsilon) + + if optimizer_type == 'momentum_optimizer': + config = optimizer_config.momentum_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.keras.optimizers.SGD( + learning_rate, + momentum=config.momentum_optimizer_value) + + if optimizer_type == 'adam_optimizer': + config = optimizer_config.adam_optimizer + learning_rate = _create_learning_rate(config.learning_rate, + global_step=global_step) + summary_vars.append(learning_rate) + optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon) + + if optimizer is None: + raise ValueError('Optimizer %s not supported.' % optimizer_type) + + if optimizer_config.use_moving_average: + raise ValueError('Moving average not supported in eager mode.') + + return optimizer, summary_vars + + +def build(config, global_step=None): + + if tf.executing_eagerly(): + return build_optimizers_tf_v2(config, global_step) + else: + return build_optimizers_tf_v1(config, global_step) + + +def _create_learning_rate(learning_rate_config, global_step=None): + """Create optimizer learning rate based on config. + + Args: + learning_rate_config: A LearningRate proto message. + global_step: A variable representing the current step. + If None, defaults to tf.train.get_or_create_global_step() + + Returns: + A learning rate. + + Raises: + ValueError: when using an unsupported input data type. + """ + if global_step is None: + global_step = tf.train.get_or_create_global_step() + learning_rate = None + learning_rate_type = learning_rate_config.WhichOneof('learning_rate') + if learning_rate_type == 'constant_learning_rate': + config = learning_rate_config.constant_learning_rate + learning_rate = tf.constant(config.learning_rate, dtype=tf.float32, + name='learning_rate') + + if learning_rate_type == 'exponential_decay_learning_rate': + config = learning_rate_config.exponential_decay_learning_rate + learning_rate = learning_schedules.exponential_decay_with_burnin( + global_step, + config.initial_learning_rate, + config.decay_steps, + config.decay_factor, + burnin_learning_rate=config.burnin_learning_rate, + burnin_steps=config.burnin_steps, + min_learning_rate=config.min_learning_rate, + staircase=config.staircase) + + if learning_rate_type == 'manual_step_learning_rate': + config = learning_rate_config.manual_step_learning_rate + if not config.schedule: + raise ValueError('Empty learning rate schedule.') + learning_rate_step_boundaries = [x.step for x in config.schedule] + learning_rate_sequence = [config.initial_learning_rate] + learning_rate_sequence += [x.learning_rate for x in config.schedule] + learning_rate = learning_schedules.manual_stepping( + global_step, learning_rate_step_boundaries, + learning_rate_sequence, config.warmup) + + if learning_rate_type == 'cosine_decay_learning_rate': + config = learning_rate_config.cosine_decay_learning_rate + learning_rate = learning_schedules.cosine_decay_with_warmup( + global_step, + config.learning_rate_base, + config.total_steps, + config.warmup_learning_rate, + config.warmup_steps, + config.hold_base_rate_steps) + + if learning_rate is None: + raise ValueError('Learning_rate %s not supported.' % learning_rate_type) + + return learning_rate diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aaa57ee039c11566981eb3dfe78fdd5165f1c4b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..350ecb84b11b3fbd87e584a5d8d23ae877089078 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder_tf1_test.py @@ -0,0 +1,224 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for optimizer_builder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import six +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import optimizer_builder +from object_detection.protos import optimizer_pb2 +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top +if tf_version.is_tf1(): + from tensorflow.contrib import opt as contrib_opt +# pylint: enable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class LearningRateBuilderTest(tf.test.TestCase): + + def testBuildConstantLearningRate(self): + learning_rate_text_proto = """ + constant_learning_rate { + learning_rate: 0.004 + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertTrue( + six.ensure_str(learning_rate.op.name).endswith('learning_rate')) + with self.test_session(): + learning_rate_out = learning_rate.eval() + self.assertAlmostEqual(learning_rate_out, 0.004) + + def testBuildExponentialDecayLearningRate(self): + learning_rate_text_proto = """ + exponential_decay_learning_rate { + initial_learning_rate: 0.004 + decay_steps: 99999 + decay_factor: 0.85 + staircase: false + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertTrue( + six.ensure_str(learning_rate.op.name).endswith('learning_rate')) + self.assertIsInstance(learning_rate, tf.Tensor) + + def testBuildManualStepLearningRate(self): + learning_rate_text_proto = """ + manual_step_learning_rate { + initial_learning_rate: 0.002 + schedule { + step: 100 + learning_rate: 0.006 + } + schedule { + step: 90000 + learning_rate: 0.00006 + } + warmup: true + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertIsInstance(learning_rate, tf.Tensor) + + def testBuildCosineDecayLearningRate(self): + learning_rate_text_proto = """ + cosine_decay_learning_rate { + learning_rate_base: 0.002 + total_steps: 20000 + warmup_learning_rate: 0.0001 + warmup_steps: 1000 + hold_base_rate_steps: 20000 + } + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + learning_rate = optimizer_builder._create_learning_rate( + learning_rate_proto) + self.assertIsInstance(learning_rate, tf.Tensor) + + def testRaiseErrorOnEmptyLearningRate(self): + learning_rate_text_proto = """ + """ + learning_rate_proto = optimizer_pb2.LearningRate() + text_format.Merge(learning_rate_text_proto, learning_rate_proto) + with self.assertRaises(ValueError): + optimizer_builder._create_learning_rate(learning_rate_proto) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class OptimizerBuilderTest(tf.test.TestCase): + + def testBuildRMSPropOptimizer(self): + optimizer_text_proto = """ + rms_prop_optimizer: { + learning_rate: { + exponential_decay_learning_rate { + initial_learning_rate: 0.004 + decay_steps: 800720 + decay_factor: 0.95 + } + } + momentum_optimizer_value: 0.9 + decay: 0.9 + epsilon: 1.0 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.train.RMSPropOptimizer) + + def testBuildMomentumOptimizer(self): + optimizer_text_proto = """ + momentum_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.001 + } + } + momentum_optimizer_value: 0.99 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.train.MomentumOptimizer) + + def testBuildAdamOptimizer(self): + optimizer_text_proto = """ + adam_optimizer: { + epsilon: 1e-6 + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.train.AdamOptimizer) + + def testBuildMovingAverageOptimizer(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: True + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer) + + def testBuildMovingAverageOptimizerWithNonDefaultDecay(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: True + moving_average_decay: 0.2 + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer) + # TODO(rathodv): Find a way to not depend on the private members. + self.assertAlmostEqual(optimizer._ema._decay, 0.2) + + def testBuildEmptyOptimizer(self): + optimizer_text_proto = """ + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + with self.assertRaises(ValueError): + optimizer_builder.build(optimizer_proto) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2c555f9a0f4c22b7c27955c92eaa3655c8fae5c6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/optimizer_builder_tf2_test.py @@ -0,0 +1,104 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for optimizer_builder.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import optimizer_builder +from object_detection.protos import optimizer_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class OptimizerBuilderV2Test(tf.test.TestCase): + """Test building optimizers in V2 mode.""" + + def testBuildRMSPropOptimizer(self): + optimizer_text_proto = """ + rms_prop_optimizer: { + learning_rate: { + exponential_decay_learning_rate { + initial_learning_rate: 0.004 + decay_steps: 800720 + decay_factor: 0.95 + } + } + momentum_optimizer_value: 0.9 + decay: 0.9 + epsilon: 1.0 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.keras.optimizers.RMSprop) + + def testBuildMomentumOptimizer(self): + optimizer_text_proto = """ + momentum_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.001 + } + } + momentum_optimizer_value: 0.99 + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.keras.optimizers.SGD) + + def testBuildAdamOptimizer(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: false + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + optimizer, _ = optimizer_builder.build(optimizer_proto) + self.assertIsInstance(optimizer, tf.keras.optimizers.Adam) + + def testMovingAverageOptimizerUnsupported(self): + optimizer_text_proto = """ + adam_optimizer: { + learning_rate: { + constant_learning_rate { + learning_rate: 0.002 + } + } + } + use_moving_average: True + """ + optimizer_proto = optimizer_pb2.Optimizer() + text_format.Merge(optimizer_text_proto, optimizer_proto) + with self.assertRaises(ValueError): + optimizer_builder.build(optimizer_proto) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..c61f6891e29eeced8ba5fbe3f78fe1c95eb60501 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder.py @@ -0,0 +1,183 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder function for post processing operations.""" +import functools + +import tensorflow.compat.v1 as tf +from object_detection.builders import calibration_builder +from object_detection.core import post_processing +from object_detection.protos import post_processing_pb2 + + +def build(post_processing_config): + """Builds callables for post-processing operations. + + Builds callables for non-max suppression, score conversion, and (optionally) + calibration based on the configuration. + + Non-max suppression callable takes `boxes`, `scores`, and optionally + `clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns + `nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See + post_processing.batch_multiclass_non_max_suppression for the type and shape + of these tensors. + + Score converter callable should be called with `input` tensor. The callable + returns the output from one of 3 tf operations based on the configuration - + tf.identity, tf.sigmoid or tf.nn.softmax. If a calibration config is provided, + score_converter also applies calibration transformations, as defined in + calibration_builder.py. See tensorflow documentation for argument and return + value descriptions. + + Args: + post_processing_config: post_processing.proto object containing the + parameters for the post-processing operations. + + Returns: + non_max_suppressor_fn: Callable for non-max suppression. + score_converter_fn: Callable for score conversion. + + Raises: + ValueError: if the post_processing_config is of incorrect type. + """ + if not isinstance(post_processing_config, post_processing_pb2.PostProcessing): + raise ValueError('post_processing_config not of type ' + 'post_processing_pb2.Postprocessing.') + non_max_suppressor_fn = _build_non_max_suppressor( + post_processing_config.batch_non_max_suppression) + score_converter_fn = _build_score_converter( + post_processing_config.score_converter, + post_processing_config.logit_scale) + if post_processing_config.HasField('calibration_config'): + score_converter_fn = _build_calibrated_score_converter( + score_converter_fn, + post_processing_config.calibration_config) + return non_max_suppressor_fn, score_converter_fn + + +def _build_non_max_suppressor(nms_config): + """Builds non-max suppresson based on the nms config. + + Args: + nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto. + + Returns: + non_max_suppressor_fn: Callable non-max suppressor. + + Raises: + ValueError: On incorrect iou_threshold or on incompatible values of + max_total_detections and max_detections_per_class or on negative + soft_nms_sigma. + """ + if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0: + raise ValueError('iou_threshold not in [0, 1.0].') + if nms_config.max_detections_per_class > nms_config.max_total_detections: + raise ValueError('max_detections_per_class should be no greater than ' + 'max_total_detections.') + if nms_config.soft_nms_sigma < 0.0: + raise ValueError('soft_nms_sigma should be non-negative.') + if nms_config.use_combined_nms and nms_config.use_class_agnostic_nms: + raise ValueError('combined_nms does not support class_agnostic_nms.') + non_max_suppressor_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=nms_config.score_threshold, + iou_thresh=nms_config.iou_threshold, + max_size_per_class=nms_config.max_detections_per_class, + max_total_size=nms_config.max_total_detections, + use_static_shapes=nms_config.use_static_shapes, + use_class_agnostic_nms=nms_config.use_class_agnostic_nms, + max_classes_per_detection=nms_config.max_classes_per_detection, + soft_nms_sigma=nms_config.soft_nms_sigma, + use_partitioned_nms=nms_config.use_partitioned_nms, + use_combined_nms=nms_config.use_combined_nms, + change_coordinate_frame=nms_config.change_coordinate_frame, + use_hard_nms=nms_config.use_hard_nms, + use_cpu_nms=nms_config.use_cpu_nms) + + return non_max_suppressor_fn + + +def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale): + """Create a function to scale logits then apply a Tensorflow function.""" + def score_converter_fn(logits): + scaled_logits = tf.multiply(logits, 1.0 / logit_scale, name='scale_logits') + return tf_score_converter_fn(scaled_logits, name='convert_scores') + score_converter_fn.__name__ = '%s_with_logit_scale' % ( + tf_score_converter_fn.__name__) + return score_converter_fn + + +def _build_score_converter(score_converter_config, logit_scale): + """Builds score converter based on the config. + + Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on + the config. + + Args: + score_converter_config: post_processing_pb2.PostProcessing.score_converter. + logit_scale: temperature to use for SOFTMAX score_converter. + + Returns: + Callable score converter op. + + Raises: + ValueError: On unknown score converter. + """ + if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY: + return _score_converter_fn_with_logit_scale(tf.identity, logit_scale) + if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID: + return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale) + if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX: + return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale) + raise ValueError('Unknown score converter.') + + +def _build_calibrated_score_converter(score_converter_fn, calibration_config): + """Wraps a score_converter_fn, adding a calibration step. + + Builds a score converter function with a calibration transformation according + to calibration_builder.py. The score conversion function may be applied before + or after the calibration transformation, depending on the calibration method. + If the method is temperature scaling, the score conversion is + after the calibration transformation. Otherwise, the score conversion is + before the calibration transformation. Calibration applies positive monotonic + transformations to inputs (i.e. score ordering is strictly preserved or + adjacent scores are mapped to the same score). When calibration is + class-agnostic, the highest-scoring class remains unchanged, unless two + adjacent scores are mapped to the same value and one class arbitrarily + selected to break the tie. In per-class calibration, it's possible (though + rare in practice) that the highest-scoring class will change, since positive + monotonicity is only required to hold within each class. + + Args: + score_converter_fn: callable that takes logit scores as input. + calibration_config: post_processing_pb2.PostProcessing.calibration_config. + + Returns: + Callable calibrated score coverter op. + """ + calibration_fn = calibration_builder.build(calibration_config) + def calibrated_score_converter_fn(logits): + if (calibration_config.WhichOneof('calibrator') == + 'temperature_scaling_calibration'): + calibrated_logits = calibration_fn(logits) + return score_converter_fn(calibrated_logits) + else: + converted_logits = score_converter_fn(logits) + return calibration_fn(converted_logits) + + calibrated_score_converter_fn.__name__ = ( + 'calibrate_with_%s' % calibration_config.WhichOneof('calibrator')) + return calibrated_score_converter_fn diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54046288901ce3a8e643a15818b7591cb4607ca6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b7383c92f99637ebf660d40a6074c65b03abd3c5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/post_processing_builder_test.py @@ -0,0 +1,185 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for post_processing_builder.""" + +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection.builders import post_processing_builder +from object_detection.protos import post_processing_pb2 +from object_detection.utils import test_case + + +class PostProcessingBuilderTest(test_case.TestCase): + + def test_build_non_max_suppressor_with_correct_parameters(self): + post_processing_text_proto = """ + batch_non_max_suppression { + score_threshold: 0.7 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + soft_nms_sigma: 0.4 + } + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + non_max_suppressor, _ = post_processing_builder.build( + post_processing_config) + self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100) + self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300) + self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7) + self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6) + self.assertAlmostEqual(non_max_suppressor.keywords['soft_nms_sigma'], 0.4) + + def test_build_non_max_suppressor_with_correct_parameters_classagnostic_nms( + self): + post_processing_text_proto = """ + batch_non_max_suppression { + score_threshold: 0.7 + iou_threshold: 0.6 + max_detections_per_class: 10 + max_total_detections: 300 + use_class_agnostic_nms: True + max_classes_per_detection: 1 + } + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + non_max_suppressor, _ = post_processing_builder.build( + post_processing_config) + self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 10) + self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300) + self.assertEqual(non_max_suppressor.keywords['max_classes_per_detection'], + 1) + self.assertEqual(non_max_suppressor.keywords['use_class_agnostic_nms'], + True) + self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7) + self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6) + + def test_build_identity_score_converter(self): + post_processing_text_proto = """ + score_converter: IDENTITY + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build( + post_processing_config) + self.assertEqual(score_converter.__name__, 'identity_with_logit_scale') + def graph_fn(): + inputs = tf.constant([1, 1], tf.float32) + outputs = score_converter(inputs) + return outputs + converted_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(converted_scores, [1, 1]) + + def test_build_identity_score_converter_with_logit_scale(self): + post_processing_text_proto = """ + score_converter: IDENTITY + logit_scale: 2.0 + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'identity_with_logit_scale') + + def graph_fn(): + inputs = tf.constant([1, 1], tf.float32) + outputs = score_converter(inputs) + return outputs + converted_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(converted_scores, [.5, .5]) + + def test_build_sigmoid_score_converter(self): + post_processing_text_proto = """ + score_converter: SIGMOID + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'sigmoid_with_logit_scale') + + def test_build_softmax_score_converter(self): + post_processing_text_proto = """ + score_converter: SOFTMAX + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale') + + def test_build_softmax_score_converter_with_temperature(self): + post_processing_text_proto = """ + score_converter: SOFTMAX + logit_scale: 2.0 + """ + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, score_converter = post_processing_builder.build(post_processing_config) + self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale') + + def test_build_calibrator_with_nonempty_config(self): + """Test that identity function used when no calibration_config specified.""" + # Calibration config maps all scores to 0.5. + post_processing_text_proto = """ + score_converter: SOFTMAX + calibration_config { + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: 0.5 + } + x_y_pair { + x: 1.0 + y: 0.5 + }}}}""" + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, calibrated_score_conversion_fn = post_processing_builder.build( + post_processing_config) + self.assertEqual(calibrated_score_conversion_fn.__name__, + 'calibrate_with_function_approximation') + + def graph_fn(): + input_scores = tf.constant([1, 1], tf.float32) + outputs = calibrated_score_conversion_fn(input_scores) + return outputs + calibrated_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(calibrated_scores, [0.5, 0.5]) + + def test_build_temperature_scaling_calibrator(self): + post_processing_text_proto = """ + score_converter: SOFTMAX + calibration_config { + temperature_scaling_calibration { + scaler: 2.0 + }}""" + post_processing_config = post_processing_pb2.PostProcessing() + text_format.Merge(post_processing_text_proto, post_processing_config) + _, calibrated_score_conversion_fn = post_processing_builder.build( + post_processing_config) + self.assertEqual(calibrated_score_conversion_fn.__name__, + 'calibrate_with_temperature_scaling_calibration') + + def graph_fn(): + input_scores = tf.constant([1, 1], tf.float32) + outputs = calibrated_score_conversion_fn(input_scores) + return outputs + calibrated_scores = self.execute_cpu(graph_fn, []) + self.assertAllClose(calibrated_scores, [0.5, 0.5]) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..b61239d2e1ec87c232fbfdb53d0cb3c39da26e3e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder.py @@ -0,0 +1,428 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder for preprocessing steps.""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import preprocessor +from object_detection.protos import preprocessor_pb2 + + +def _get_step_config_from_proto(preprocessor_step_config, step_name): + """Returns the value of a field named step_name from proto. + + Args: + preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object. + step_name: Name of the field to get value from. + + Returns: + result_dict: a sub proto message from preprocessor_step_config which will be + later converted to a dictionary. + + Raises: + ValueError: If field does not exist in proto. + """ + for field, value in preprocessor_step_config.ListFields(): + if field.name == step_name: + return value + + raise ValueError('Could not get field %s from proto!' % step_name) + + +def _get_dict_from_proto(config): + """Helper function to put all proto fields into a dictionary. + + For many preprocessing steps, there's an trivial 1-1 mapping from proto fields + to function arguments. This function automatically populates a dictionary with + the arguments from the proto. + + Protos that CANNOT be trivially populated include: + * nested messages. + * steps that check if an optional field is set (ie. where None != 0). + * protos that don't map 1-1 to arguments (ie. list should be reshaped). + * fields requiring additional validation (ie. repeated field has n elements). + + Args: + config: A protobuf object that does not violate the conditions above. + + Returns: + result_dict: |config| converted into a python dictionary. + """ + result_dict = {} + for field, value in config.ListFields(): + result_dict[field.name] = value + return result_dict + + +# A map from a PreprocessingStep proto config field name to the preprocessing +# function that should be used. The PreprocessingStep proto should be parsable +# with _get_dict_from_proto. +PREPROCESSING_FUNCTION_MAP = { + 'normalize_image': + preprocessor.normalize_image, + 'random_pixel_value_scale': + preprocessor.random_pixel_value_scale, + 'random_image_scale': + preprocessor.random_image_scale, + 'random_rgb_to_gray': + preprocessor.random_rgb_to_gray, + 'random_adjust_brightness': + preprocessor.random_adjust_brightness, + 'random_adjust_contrast': + preprocessor.random_adjust_contrast, + 'random_adjust_hue': + preprocessor.random_adjust_hue, + 'random_adjust_saturation': + preprocessor.random_adjust_saturation, + 'random_distort_color': + preprocessor.random_distort_color, + 'random_jitter_boxes': + preprocessor.random_jitter_boxes, + 'random_crop_to_aspect_ratio': + preprocessor.random_crop_to_aspect_ratio, + 'random_black_patches': + preprocessor.random_black_patches, + 'random_jpeg_quality': + preprocessor.random_jpeg_quality, + 'random_downscale_to_target_pixels': + preprocessor.random_downscale_to_target_pixels, + 'random_patch_gaussian': + preprocessor.random_patch_gaussian, + 'rgb_to_gray': + preprocessor.rgb_to_gray, + 'scale_boxes_to_pixel_coordinates': ( + preprocessor.scale_boxes_to_pixel_coordinates), + 'subtract_channel_mean': + preprocessor.subtract_channel_mean, + 'convert_class_logits_to_softmax': + preprocessor.convert_class_logits_to_softmax, +} + + +# A map to convert from preprocessor_pb2.ResizeImage.Method enum to +# tf.image.ResizeMethod. +RESIZE_METHOD_MAP = { + preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA, + preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC, + preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR, + preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: ( + tf.image.ResizeMethod.NEAREST_NEIGHBOR), +} + + +def build(preprocessor_step_config): + """Builds preprocessing step based on the configuration. + + Args: + preprocessor_step_config: PreprocessingStep configuration proto. + + Returns: + function, argmap: A callable function and an argument map to call function + with. + + Raises: + ValueError: On invalid configuration. + """ + step_type = preprocessor_step_config.WhichOneof('preprocessing_step') + + if step_type in PREPROCESSING_FUNCTION_MAP: + preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type] + step_config = _get_step_config_from_proto(preprocessor_step_config, + step_type) + function_args = _get_dict_from_proto(step_config) + return (preprocessing_function, function_args) + + if step_type == 'random_horizontal_flip': + config = preprocessor_step_config.random_horizontal_flip + return (preprocessor.random_horizontal_flip, + { + 'keypoint_flip_permutation': tuple( + config.keypoint_flip_permutation) or None, + 'probability': config.probability or None, + }) + + if step_type == 'random_vertical_flip': + config = preprocessor_step_config.random_vertical_flip + return (preprocessor.random_vertical_flip, + { + 'keypoint_flip_permutation': tuple( + config.keypoint_flip_permutation) or None, + 'probability': config.probability or None, + }) + + if step_type == 'random_rotation90': + config = preprocessor_step_config.random_rotation90 + return (preprocessor.random_rotation90, + { + 'keypoint_rot_permutation': tuple( + config.keypoint_rot_permutation) or None, + 'probability': config.probability or None, + }) + + if step_type == 'random_crop_image': + config = preprocessor_step_config.random_crop_image + return (preprocessor.random_crop_image, + { + 'min_object_covered': config.min_object_covered, + 'aspect_ratio_range': (config.min_aspect_ratio, + config.max_aspect_ratio), + 'area_range': (config.min_area, config.max_area), + 'overlap_thresh': config.overlap_thresh, + 'clip_boxes': config.clip_boxes, + 'random_coef': config.random_coef, + }) + + if step_type == 'random_pad_image': + config = preprocessor_step_config.random_pad_image + min_image_size = None + if (config.HasField('min_image_height') != + config.HasField('min_image_width')): + raise ValueError('min_image_height and min_image_width should be either ' + 'both set or both unset.') + if config.HasField('min_image_height'): + min_image_size = (config.min_image_height, config.min_image_width) + + max_image_size = None + if (config.HasField('max_image_height') != + config.HasField('max_image_width')): + raise ValueError('max_image_height and max_image_width should be either ' + 'both set or both unset.') + if config.HasField('max_image_height'): + max_image_size = (config.max_image_height, config.max_image_width) + + pad_color = config.pad_color or None + if pad_color: + if len(pad_color) != 3: + tf.logging.warn('pad_color should have 3 elements (RGB) if set!') + + pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) + return (preprocessor.random_pad_image, + { + 'min_image_size': min_image_size, + 'max_image_size': max_image_size, + 'pad_color': pad_color, + }) + + if step_type == 'random_absolute_pad_image': + config = preprocessor_step_config.random_absolute_pad_image + + max_height_padding = config.max_height_padding or 1 + max_width_padding = config.max_width_padding or 1 + + pad_color = config.pad_color or None + if pad_color: + if len(pad_color) != 3: + tf.logging.warn('pad_color should have 3 elements (RGB) if set!') + + pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) + + return (preprocessor.random_absolute_pad_image, + { + 'max_height_padding': max_height_padding, + 'max_width_padding': max_width_padding, + 'pad_color': pad_color, + }) + if step_type == 'random_crop_pad_image': + config = preprocessor_step_config.random_crop_pad_image + min_padded_size_ratio = config.min_padded_size_ratio + if min_padded_size_ratio and len(min_padded_size_ratio) != 2: + raise ValueError('min_padded_size_ratio should have 2 elements if set!') + max_padded_size_ratio = config.max_padded_size_ratio + if max_padded_size_ratio and len(max_padded_size_ratio) != 2: + raise ValueError('max_padded_size_ratio should have 2 elements if set!') + pad_color = config.pad_color or None + if pad_color: + if len(pad_color) != 3: + tf.logging.warn('pad_color should have 3 elements (RGB) if set!') + + pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) + + kwargs = { + 'min_object_covered': config.min_object_covered, + 'aspect_ratio_range': (config.min_aspect_ratio, + config.max_aspect_ratio), + 'area_range': (config.min_area, config.max_area), + 'overlap_thresh': config.overlap_thresh, + 'clip_boxes': config.clip_boxes, + 'random_coef': config.random_coef, + 'pad_color': pad_color, + } + if min_padded_size_ratio: + kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) + if max_padded_size_ratio: + kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) + return (preprocessor.random_crop_pad_image, kwargs) + + if step_type == 'random_resize_method': + config = preprocessor_step_config.random_resize_method + return (preprocessor.random_resize_method, + { + 'target_size': [config.target_height, config.target_width], + }) + + if step_type == 'resize_image': + config = preprocessor_step_config.resize_image + method = RESIZE_METHOD_MAP[config.method] + return (preprocessor.resize_image, + { + 'new_height': config.new_height, + 'new_width': config.new_width, + 'method': method + }) + + if step_type == 'random_self_concat_image': + config = preprocessor_step_config.random_self_concat_image + return (preprocessor.random_self_concat_image, { + 'concat_vertical_probability': config.concat_vertical_probability, + 'concat_horizontal_probability': config.concat_horizontal_probability + }) + + if step_type == 'ssd_random_crop': + config = preprocessor_step_config.ssd_random_crop + if config.operations: + min_object_covered = [op.min_object_covered for op in config.operations] + aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) + for op in config.operations] + area_range = [(op.min_area, op.max_area) for op in config.operations] + overlap_thresh = [op.overlap_thresh for op in config.operations] + clip_boxes = [op.clip_boxes for op in config.operations] + random_coef = [op.random_coef for op in config.operations] + return (preprocessor.ssd_random_crop, + { + 'min_object_covered': min_object_covered, + 'aspect_ratio_range': aspect_ratio_range, + 'area_range': area_range, + 'overlap_thresh': overlap_thresh, + 'clip_boxes': clip_boxes, + 'random_coef': random_coef, + }) + return (preprocessor.ssd_random_crop, {}) + + if step_type == 'autoaugment_image': + config = preprocessor_step_config.autoaugment_image + return (preprocessor.autoaugment_image, { + 'policy_name': config.policy_name, + }) + + if step_type == 'drop_label_probabilistically': + config = preprocessor_step_config.drop_label_probabilistically + return (preprocessor.drop_label_probabilistically, { + 'dropped_label': config.label, + 'drop_probability': config.drop_probability, + }) + + if step_type == 'remap_labels': + config = preprocessor_step_config.remap_labels + return (preprocessor.remap_labels, { + 'original_labels': config.original_labels, + 'new_label': config.new_label + }) + + if step_type == 'ssd_random_crop_pad': + config = preprocessor_step_config.ssd_random_crop_pad + if config.operations: + min_object_covered = [op.min_object_covered for op in config.operations] + aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) + for op in config.operations] + area_range = [(op.min_area, op.max_area) for op in config.operations] + overlap_thresh = [op.overlap_thresh for op in config.operations] + clip_boxes = [op.clip_boxes for op in config.operations] + random_coef = [op.random_coef for op in config.operations] + min_padded_size_ratio = [tuple(op.min_padded_size_ratio) + for op in config.operations] + max_padded_size_ratio = [tuple(op.max_padded_size_ratio) + for op in config.operations] + pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b) + for op in config.operations] + return (preprocessor.ssd_random_crop_pad, + { + 'min_object_covered': min_object_covered, + 'aspect_ratio_range': aspect_ratio_range, + 'area_range': area_range, + 'overlap_thresh': overlap_thresh, + 'clip_boxes': clip_boxes, + 'random_coef': random_coef, + 'min_padded_size_ratio': min_padded_size_ratio, + 'max_padded_size_ratio': max_padded_size_ratio, + 'pad_color': pad_color, + }) + return (preprocessor.ssd_random_crop_pad, {}) + + if step_type == 'ssd_random_crop_fixed_aspect_ratio': + config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio + if config.operations: + min_object_covered = [op.min_object_covered for op in config.operations] + area_range = [(op.min_area, op.max_area) for op in config.operations] + overlap_thresh = [op.overlap_thresh for op in config.operations] + clip_boxes = [op.clip_boxes for op in config.operations] + random_coef = [op.random_coef for op in config.operations] + return (preprocessor.ssd_random_crop_fixed_aspect_ratio, + { + 'min_object_covered': min_object_covered, + 'aspect_ratio': config.aspect_ratio, + 'area_range': area_range, + 'overlap_thresh': overlap_thresh, + 'clip_boxes': clip_boxes, + 'random_coef': random_coef, + }) + return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {}) + + if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio': + config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio + kwargs = {} + aspect_ratio = config.aspect_ratio + if aspect_ratio: + kwargs['aspect_ratio'] = aspect_ratio + min_padded_size_ratio = config.min_padded_size_ratio + if min_padded_size_ratio: + if len(min_padded_size_ratio) != 2: + raise ValueError('min_padded_size_ratio should have 2 elements if set!') + kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) + max_padded_size_ratio = config.max_padded_size_ratio + if max_padded_size_ratio: + if len(max_padded_size_ratio) != 2: + raise ValueError('max_padded_size_ratio should have 2 elements if set!') + kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) + if config.operations: + kwargs['min_object_covered'] = [op.min_object_covered + for op in config.operations] + kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio) + for op in config.operations] + kwargs['area_range'] = [(op.min_area, op.max_area) + for op in config.operations] + kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations] + kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations] + kwargs['random_coef'] = [op.random_coef for op in config.operations] + return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs) + + if step_type == 'random_square_crop_by_scale': + config = preprocessor_step_config.random_square_crop_by_scale + return preprocessor.random_square_crop_by_scale, { + 'scale_min': config.scale_min, + 'scale_max': config.scale_max, + 'max_border': config.max_border, + 'num_scales': config.num_scales + } + + if step_type == 'random_scale_crop_and_pad_to_square': + config = preprocessor_step_config.random_scale_crop_and_pad_to_square + return preprocessor.random_scale_crop_and_pad_to_square, { + 'scale_min': config.scale_min, + 'scale_max': config.scale_max, + 'output_size': config.output_size, + } + + raise ValueError('Unknown preprocessing step.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7bb8dcc3bef014d8820c16855a382d55511e9a4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9e90344d0478229fa95355b53ecfa5f876325936 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/preprocessor_builder_test.py @@ -0,0 +1,758 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for preprocessor_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import preprocessor_builder +from object_detection.core import preprocessor +from object_detection.protos import preprocessor_pb2 + + +class PreprocessorBuilderTest(tf.test.TestCase): + + def assert_dictionary_close(self, dict1, dict2): + """Helper to check if two dicts with floatst or integers are close.""" + self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys())) + for key in dict1: + value = dict1[key] + if isinstance(value, float): + self.assertAlmostEqual(value, dict2[key]) + else: + self.assertEqual(value, dict2[key]) + + def test_build_normalize_image(self): + preprocessor_text_proto = """ + normalize_image { + original_minval: 0.0 + original_maxval: 255.0 + target_minval: -1.0 + target_maxval: 1.0 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.normalize_image) + self.assertEqual(args, { + 'original_minval': 0.0, + 'original_maxval': 255.0, + 'target_minval': -1.0, + 'target_maxval': 1.0, + }) + + def test_build_random_horizontal_flip(self): + preprocessor_text_proto = """ + random_horizontal_flip { + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 4 + probability: 0.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_horizontal_flip) + self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4), + 'probability': 0.5}) + + def test_build_random_vertical_flip(self): + preprocessor_text_proto = """ + random_vertical_flip { + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 4 + probability: 0.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_vertical_flip) + self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4), + 'probability': 0.5}) + + def test_build_random_rotation90(self): + preprocessor_text_proto = """ + random_rotation90 { + keypoint_rot_permutation: 3 + keypoint_rot_permutation: 0 + keypoint_rot_permutation: 1 + keypoint_rot_permutation: 2 + probability: 0.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_rotation90) + self.assertEqual(args, {'keypoint_rot_permutation': (3, 0, 1, 2), + 'probability': 0.5}) + + def test_build_random_pixel_value_scale(self): + preprocessor_text_proto = """ + random_pixel_value_scale { + minval: 0.8 + maxval: 1.2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_pixel_value_scale) + self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2}) + + def test_build_random_image_scale(self): + preprocessor_text_proto = """ + random_image_scale { + min_scale_ratio: 0.8 + max_scale_ratio: 2.2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_image_scale) + self.assert_dictionary_close(args, {'min_scale_ratio': 0.8, + 'max_scale_ratio': 2.2}) + + def test_build_random_rgb_to_gray(self): + preprocessor_text_proto = """ + random_rgb_to_gray { + probability: 0.8 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_rgb_to_gray) + self.assert_dictionary_close(args, {'probability': 0.8}) + + def test_build_random_adjust_brightness(self): + preprocessor_text_proto = """ + random_adjust_brightness { + max_delta: 0.2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_brightness) + self.assert_dictionary_close(args, {'max_delta': 0.2}) + + def test_build_random_adjust_contrast(self): + preprocessor_text_proto = """ + random_adjust_contrast { + min_delta: 0.7 + max_delta: 1.1 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_contrast) + self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1}) + + def test_build_random_adjust_hue(self): + preprocessor_text_proto = """ + random_adjust_hue { + max_delta: 0.01 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_hue) + self.assert_dictionary_close(args, {'max_delta': 0.01}) + + def test_build_random_adjust_saturation(self): + preprocessor_text_proto = """ + random_adjust_saturation { + min_delta: 0.75 + max_delta: 1.15 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_adjust_saturation) + self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15}) + + def test_build_random_distort_color(self): + preprocessor_text_proto = """ + random_distort_color { + color_ordering: 1 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_distort_color) + self.assertEqual(args, {'color_ordering': 1}) + + def test_build_random_jitter_boxes(self): + preprocessor_text_proto = """ + random_jitter_boxes { + ratio: 0.1 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_jitter_boxes) + self.assert_dictionary_close(args, {'ratio': 0.1}) + + def test_build_random_crop_image(self): + preprocessor_text_proto = """ + random_crop_image { + min_object_covered: 0.75 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.25 + max_area: 0.875 + overlap_thresh: 0.5 + clip_boxes: False + random_coef: 0.125 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_image) + self.assertEqual(args, { + 'min_object_covered': 0.75, + 'aspect_ratio_range': (0.75, 1.5), + 'area_range': (0.25, 0.875), + 'overlap_thresh': 0.5, + 'clip_boxes': False, + 'random_coef': 0.125, + }) + + def test_build_random_pad_image(self): + preprocessor_text_proto = """ + random_pad_image { + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_pad_image) + self.assertEqual(args, { + 'min_image_size': None, + 'max_image_size': None, + 'pad_color': None, + }) + + def test_build_random_absolute_pad_image(self): + preprocessor_text_proto = """ + random_absolute_pad_image { + max_height_padding: 50 + max_width_padding: 100 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_absolute_pad_image) + self.assertEqual(args, { + 'max_height_padding': 50, + 'max_width_padding': 100, + 'pad_color': None, + }) + + def test_build_random_crop_pad_image(self): + preprocessor_text_proto = """ + random_crop_pad_image { + min_object_covered: 0.75 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.25 + max_area: 0.875 + overlap_thresh: 0.5 + clip_boxes: False + random_coef: 0.125 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_pad_image) + self.assertEqual(args, { + 'min_object_covered': 0.75, + 'aspect_ratio_range': (0.75, 1.5), + 'area_range': (0.25, 0.875), + 'overlap_thresh': 0.5, + 'clip_boxes': False, + 'random_coef': 0.125, + 'pad_color': None, + }) + + def test_build_random_crop_pad_image_with_optional_parameters(self): + preprocessor_text_proto = """ + random_crop_pad_image { + min_object_covered: 0.75 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.25 + max_area: 0.875 + overlap_thresh: 0.5 + clip_boxes: False + random_coef: 0.125 + min_padded_size_ratio: 0.5 + min_padded_size_ratio: 0.75 + max_padded_size_ratio: 0.5 + max_padded_size_ratio: 0.75 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_pad_image) + self.assertEqual(args, { + 'min_object_covered': 0.75, + 'aspect_ratio_range': (0.75, 1.5), + 'area_range': (0.25, 0.875), + 'overlap_thresh': 0.5, + 'clip_boxes': False, + 'random_coef': 0.125, + 'min_padded_size_ratio': (0.5, 0.75), + 'max_padded_size_ratio': (0.5, 0.75), + 'pad_color': None, + }) + + def test_build_random_crop_to_aspect_ratio(self): + preprocessor_text_proto = """ + random_crop_to_aspect_ratio { + aspect_ratio: 0.85 + overlap_thresh: 0.35 + clip_boxes: False + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio) + self.assert_dictionary_close(args, {'aspect_ratio': 0.85, + 'overlap_thresh': 0.35, + 'clip_boxes': False}) + + def test_build_random_black_patches(self): + preprocessor_text_proto = """ + random_black_patches { + max_black_patches: 20 + probability: 0.95 + size_to_image_ratio: 0.12 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_black_patches) + self.assert_dictionary_close(args, {'max_black_patches': 20, + 'probability': 0.95, + 'size_to_image_ratio': 0.12}) + + def test_build_random_jpeg_quality(self): + preprocessor_text_proto = """ + random_jpeg_quality { + random_coef: 0.5 + min_jpeg_quality: 40 + max_jpeg_quality: 90 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Parse(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_jpeg_quality) + self.assert_dictionary_close(args, {'random_coef': 0.5, + 'min_jpeg_quality': 40, + 'max_jpeg_quality': 90}) + + def test_build_random_downscale_to_target_pixels(self): + preprocessor_text_proto = """ + random_downscale_to_target_pixels { + random_coef: 0.5 + min_target_pixels: 200 + max_target_pixels: 900 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Parse(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_downscale_to_target_pixels) + self.assert_dictionary_close(args, { + 'random_coef': 0.5, + 'min_target_pixels': 200, + 'max_target_pixels': 900 + }) + + def test_build_random_patch_gaussian(self): + preprocessor_text_proto = """ + random_patch_gaussian { + random_coef: 0.5 + min_patch_size: 10 + max_patch_size: 300 + min_gaussian_stddev: 0.2 + max_gaussian_stddev: 1.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Parse(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_patch_gaussian) + self.assert_dictionary_close(args, { + 'random_coef': 0.5, + 'min_patch_size': 10, + 'max_patch_size': 300, + 'min_gaussian_stddev': 0.2, + 'max_gaussian_stddev': 1.5 + }) + + def test_auto_augment_image(self): + preprocessor_text_proto = """ + autoaugment_image { + policy_name: 'v0' + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.autoaugment_image) + self.assert_dictionary_close(args, {'policy_name': 'v0'}) + + def test_drop_label_probabilistically(self): + preprocessor_text_proto = """ + drop_label_probabilistically{ + label: 2 + drop_probability: 0.5 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.drop_label_probabilistically) + self.assert_dictionary_close(args, { + 'dropped_label': 2, + 'drop_probability': 0.5 + }) + + def test_remap_labels(self): + preprocessor_text_proto = """ + remap_labels{ + original_labels: 1 + original_labels: 2 + new_label: 3 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.remap_labels) + self.assert_dictionary_close(args, { + 'original_labels': [1, 2], + 'new_label': 3 + }) + + def test_build_random_resize_method(self): + preprocessor_text_proto = """ + random_resize_method { + target_height: 75 + target_width: 100 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_resize_method) + self.assert_dictionary_close(args, {'target_size': [75, 100]}) + + def test_build_scale_boxes_to_pixel_coordinates(self): + preprocessor_text_proto = """ + scale_boxes_to_pixel_coordinates {} + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates) + self.assertEqual(args, {}) + + def test_build_resize_image(self): + preprocessor_text_proto = """ + resize_image { + new_height: 75 + new_width: 100 + method: BICUBIC + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.resize_image) + self.assertEqual(args, {'new_height': 75, + 'new_width': 100, + 'method': tf.image.ResizeMethod.BICUBIC}) + + def test_build_rgb_to_gray(self): + preprocessor_text_proto = """ + rgb_to_gray {} + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.rgb_to_gray) + self.assertEqual(args, {}) + + def test_build_subtract_channel_mean(self): + preprocessor_text_proto = """ + subtract_channel_mean { + means: [1.0, 2.0, 3.0] + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.subtract_channel_mean) + self.assertEqual(args, {'means': [1.0, 2.0, 3.0]}) + + def test_random_self_concat_image(self): + preprocessor_text_proto = """ + random_self_concat_image { + concat_vertical_probability: 0.5 + concat_horizontal_probability: 0.25 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_self_concat_image) + self.assertEqual(args, {'concat_vertical_probability': 0.5, + 'concat_horizontal_probability': 0.25}) + + def test_build_ssd_random_crop(self): + preprocessor_text_proto = """ + ssd_random_crop { + operations { + min_object_covered: 0.0 + min_aspect_ratio: 0.875 + max_aspect_ratio: 1.125 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + } + operations { + min_object_covered: 0.25 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + } + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375]}) + + def test_build_ssd_random_crop_empty_operations(self): + preprocessor_text_proto = """ + ssd_random_crop { + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop) + self.assertEqual(args, {}) + + def test_build_ssd_random_crop_pad(self): + preprocessor_text_proto = """ + ssd_random_crop_pad { + operations { + min_object_covered: 0.0 + min_aspect_ratio: 0.875 + max_aspect_ratio: 1.125 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + min_padded_size_ratio: [1.0, 1.0] + max_padded_size_ratio: [2.0, 2.0] + pad_color_r: 0.5 + pad_color_g: 0.5 + pad_color_b: 0.5 + } + operations { + min_object_covered: 0.25 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + min_padded_size_ratio: [1.0, 1.0] + max_padded_size_ratio: [2.0, 2.0] + pad_color_r: 0.5 + pad_color_g: 0.5 + pad_color_b: 0.5 + } + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop_pad) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375], + 'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)], + 'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)], + 'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]}) + + def test_build_ssd_random_crop_fixed_aspect_ratio(self): + preprocessor_text_proto = """ + ssd_random_crop_fixed_aspect_ratio { + operations { + min_object_covered: 0.0 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + } + operations { + min_object_covered: 0.25 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + } + aspect_ratio: 0.875 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio': 0.875, + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375]}) + + def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self): + preprocessor_text_proto = """ + ssd_random_crop_pad_fixed_aspect_ratio { + operations { + min_object_covered: 0.0 + min_aspect_ratio: 0.875 + max_aspect_ratio: 1.125 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.0 + clip_boxes: False + random_coef: 0.375 + } + operations { + min_object_covered: 0.25 + min_aspect_ratio: 0.75 + max_aspect_ratio: 1.5 + min_area: 0.5 + max_area: 1.0 + overlap_thresh: 0.25 + clip_boxes: True + random_coef: 0.375 + } + aspect_ratio: 0.875 + min_padded_size_ratio: [1.0, 1.0] + max_padded_size_ratio: [2.0, 2.0] + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, + preprocessor.ssd_random_crop_pad_fixed_aspect_ratio) + self.assertEqual(args, {'min_object_covered': [0.0, 0.25], + 'aspect_ratio': 0.875, + 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], + 'area_range': [(0.5, 1.0), (0.5, 1.0)], + 'overlap_thresh': [0.0, 0.25], + 'clip_boxes': [False, True], + 'random_coef': [0.375, 0.375], + 'min_padded_size_ratio': (1.0, 1.0), + 'max_padded_size_ratio': (2.0, 2.0)}) + + def test_build_normalize_image_convert_class_logits_to_softmax(self): + preprocessor_text_proto = """ + convert_class_logits_to_softmax { + temperature: 2 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.convert_class_logits_to_softmax) + self.assertEqual(args, {'temperature': 2}) + + def test_random_crop_by_scale(self): + preprocessor_text_proto = """ + random_square_crop_by_scale { + scale_min: 0.25 + scale_max: 2.0 + num_scales: 8 + } + """ + preprocessor_proto = preprocessor_pb2.PreprocessingStep() + text_format.Merge(preprocessor_text_proto, preprocessor_proto) + function, args = preprocessor_builder.build(preprocessor_proto) + self.assertEqual(function, preprocessor.random_square_crop_by_scale) + self.assertEqual(args, { + 'scale_min': 0.25, + 'scale_max': 2.0, + 'num_scales': 8, + 'max_border': 128 + }) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..8f35087ff40ed9e08e7c889803b704687ac3c770 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder.py @@ -0,0 +1,59 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Builder for region similarity calculators.""" + +from object_detection.core import region_similarity_calculator +from object_detection.protos import region_similarity_calculator_pb2 + + +def build(region_similarity_calculator_config): + """Builds region similarity calculator based on the configuration. + + Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See + core/region_similarity_calculator.proto for details. + + Args: + region_similarity_calculator_config: RegionSimilarityCalculator + configuration proto. + + Returns: + region_similarity_calculator: RegionSimilarityCalculator object. + + Raises: + ValueError: On unknown region similarity calculator. + """ + + if not isinstance( + region_similarity_calculator_config, + region_similarity_calculator_pb2.RegionSimilarityCalculator): + raise ValueError( + 'region_similarity_calculator_config not of type ' + 'region_similarity_calculator_pb2.RegionsSimilarityCalculator') + + similarity_calculator = region_similarity_calculator_config.WhichOneof( + 'region_similarity') + if similarity_calculator == 'iou_similarity': + return region_similarity_calculator.IouSimilarity() + if similarity_calculator == 'ioa_similarity': + return region_similarity_calculator.IoaSimilarity() + if similarity_calculator == 'neg_sq_dist_similarity': + return region_similarity_calculator.NegSqDistSimilarity() + if similarity_calculator == 'thresholded_iou_similarity': + return region_similarity_calculator.ThresholdedIouSimilarity( + region_similarity_calculator_config.thresholded_iou_similarity + .iou_threshold) + + raise ValueError('Unknown region similarity calculator.') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd9a71bd8824bcf4662d1412f5ca43870ad520cd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..da72e7360ee47d142ced9c90787bdc56813901dc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/region_similarity_calculator_builder_test.py @@ -0,0 +1,67 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for region_similarity_calculator_builder.""" + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import region_similarity_calculator_builder +from object_detection.core import region_similarity_calculator +from object_detection.protos import region_similarity_calculator_pb2 as sim_calc_pb2 + + +class RegionSimilarityCalculatorBuilderTest(tf.test.TestCase): + + def testBuildIoaSimilarityCalculator(self): + similarity_calc_text_proto = """ + ioa_similarity { + } + """ + similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() + text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) + similarity_calc = region_similarity_calculator_builder.build( + similarity_calc_proto) + self.assertTrue(isinstance(similarity_calc, + region_similarity_calculator.IoaSimilarity)) + + def testBuildIouSimilarityCalculator(self): + similarity_calc_text_proto = """ + iou_similarity { + } + """ + similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() + text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) + similarity_calc = region_similarity_calculator_builder.build( + similarity_calc_proto) + self.assertTrue(isinstance(similarity_calc, + region_similarity_calculator.IouSimilarity)) + + def testBuildNegSqDistSimilarityCalculator(self): + similarity_calc_text_proto = """ + neg_sq_dist_similarity { + } + """ + similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() + text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) + similarity_calc = region_similarity_calculator_builder.build( + similarity_calc_proto) + self.assertTrue(isinstance(similarity_calc, + region_similarity_calculator. + NegSqDistSimilarity)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/target_assigner_builder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/target_assigner_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f6434f653c8426733e90ff5ed04dc69b7d9e34af --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/target_assigner_builder.py @@ -0,0 +1,40 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A function to build an object detection box coder from configuration.""" +from object_detection.builders import box_coder_builder +from object_detection.builders import matcher_builder +from object_detection.builders import region_similarity_calculator_builder +from object_detection.core import target_assigner + + +def build(target_assigner_config): + """Builds a TargetAssigner object based on the config. + + Args: + target_assigner_config: A target_assigner proto message containing config + for the desired target assigner. + + Returns: + TargetAssigner object based on the config. + """ + matcher_instance = matcher_builder.build(target_assigner_config.matcher) + similarity_calc_instance = region_similarity_calculator_builder.build( + target_assigner_config.similarity_calculator) + box_coder = box_coder_builder.build(target_assigner_config.box_coder) + return target_assigner.TargetAssigner( + matcher=matcher_instance, + similarity_calc=similarity_calc_instance, + box_coder_instance=box_coder) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/target_assigner_builder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/target_assigner_builder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..279600214844c617056453890cc0d9d471ec5e82 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/builders/target_assigner_builder_test.py @@ -0,0 +1,50 @@ +"""Tests for google3.third_party.tensorflow_models.object_detection.builders.target_assigner_builder.""" +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + + +from object_detection.builders import target_assigner_builder +from object_detection.core import target_assigner +from object_detection.protos import target_assigner_pb2 + + +class TargetAssignerBuilderTest(tf.test.TestCase): + + def test_build_a_target_assigner(self): + target_assigner_text_proto = """ + matcher { + argmax_matcher {matched_threshold: 0.5} + } + similarity_calculator { + iou_similarity {} + } + box_coder { + faster_rcnn_box_coder {} + } + """ + target_assigner_proto = target_assigner_pb2.TargetAssigner() + text_format.Merge(target_assigner_text_proto, target_assigner_proto) + target_assigner_instance = target_assigner_builder.build( + target_assigner_proto) + self.assertIsInstance(target_assigner_instance, + target_assigner.TargetAssigner) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/.ipynb_checkpoints/object_detection_tutorial-checkpoint.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/.ipynb_checkpoints/object_detection_tutorial-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..75a81a19ec0a0eba7538167c46718d0393d2dd35 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/.ipynb_checkpoints/object_detection_tutorial-checkpoint.ipynb @@ -0,0 +1,783 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "V8-yl-s-WKMG" + }, + "source": [ + "# Object Detection API Demo\n", + "\n", + "
\n", + " \n", + " Run in Google Colab\n", + " \n", + "\n", + " \n", + " View source on GitHub\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "3cIrseUv6WKz" + }, + "source": [ + "Welcome to the [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "VrJaG0cYN9yh" + }, + "source": [ + "> **Important**: This tutorial is to help you through the first step towards using [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to build models. If you just just need an off the shelf model that does the job, see the [TFHub object detection example](https://colab.sandbox.google.com/github/tensorflow/hub/blob/master/examples/colab/object_detection.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "kFSqkTCdWKMI" + }, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "awjrpqy-6MaQ" + }, + "source": [ + "Important: If you're running on a local machine, be sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). This notebook includes only what's necessary to run in Colab." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "p3UGXxUii5Ym" + }, + "source": [ + "### Install" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hGL97-GXjSUw" + }, + "outputs": [], + "source": [ + "!pip install -U --pre tensorflow==\"2.*\"\n", + "!pip install tf_slim" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "n_ap_s9ajTHH" + }, + "source": [ + "Make sure you have `pycocotools` installed" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Bg8ZyA47i3pY" + }, + "outputs": [], + "source": [ + "!pip install pycocotools" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-vsOL3QR6kqs" + }, + "source": [ + "Get `tensorflow/models` or `cd` to parent directory of the repository." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ykA0c-om51s1" + }, + "outputs": [], + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "O219m6yWAj9l" + }, + "source": [ + "Compile protobufs and install the object_detection package" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "PY41vdYYNlXc" + }, + "outputs": [], + "source": [ + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "s62yJyQUcYbp" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing /home/job/models/research\n", + "Collecting Pillow>=1.0 (from object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/12/ad/61f8dfba88c4e56196bf6d056cdbba64dc9c5dfdfbc97d02e6472feed913/Pillow-6.2.2-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting Matplotlib>=2.1 (from object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/9d/40/5ba7d4a3f80d39d409f21899972596bf62c8606f1406a825029649eaa439/matplotlib-2.2.5-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting Cython>=0.28.1 (from object-detection==0.1)\n", + " Downloading https://files.pythonhosted.org/packages/59/c1/0b69d125ab9819869cffff2f416158acf2684bdb4bf54eccf887717e2cbd/Cython-0.29.21-cp27-cp27mu-manylinux1_x86_64.whl (1.9MB)\n", + "Collecting cycler>=0.10 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/f7/d2/e07d3ebb2bd7af696440ce7e754c59dd546ffe1bbe732c8ab68b9c834e61/cycler-0.10.0-py2.py3-none-any.whl\n", + "Collecting numpy>=1.7.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/3a/5f/47e578b3ae79e2624e205445ab77a1848acdaa2929a00eeef6b16eaaeb20/numpy-1.16.6-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting backports.functools-lru-cache (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/da/d1/080d2bb13773803648281a49e3918f65b31b7beebf009887a529357fd44a/backports.functools_lru_cache-1.6.1-py2.py3-none-any.whl\n", + "Collecting subprocess32 (from Matplotlib>=2.1->object-detection==0.1)\n", + "Collecting kiwisolver>=1.0.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/3d/78/cb9248b2289ec31e301137cedbe4ca503a74ca87f88cdbfd2f8be52323bf/kiwisolver-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting pytz (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/4f/a4/879454d49688e2fad93e59d7d4efda580b783c745fd2ec2a3adf87b0808d/pytz-2020.1-py2.py3-none-any.whl\n", + "Collecting six>=1.10 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl\n", + "Collecting python-dateutil>=2.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/d4/70/d60450c3dd48ef87586924207ae8907090de0b306af2bce5d134d78615cb/python_dateutil-2.8.1-py2.py3-none-any.whl\n", + "Collecting pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/8a/bb/488841f56197b13700afd5658fc279a2025a39e22449b7cf29864669b15d/pyparsing-2.4.7-py2.py3-none-any.whl\n", + "Collecting setuptools (from kiwisolver>=1.0.1->Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/e1/b7/182161210a13158cd3ccc41ee19aadef54496b74f2817cc147006ec932b4/setuptools-44.1.1-py2.py3-none-any.whl\n", + "Installing collected packages: Pillow, six, cycler, numpy, backports.functools-lru-cache, subprocess32, setuptools, kiwisolver, pytz, python-dateutil, pyparsing, Matplotlib, Cython, object-detection\n", + " Running setup.py install for object-detection: started\n", + " Running setup.py install for object-detection: finished with status 'done'\n", + "Successfully installed Cython-0.29.21 Matplotlib-2.2.5 Pillow-6.2.2 backports.functools-lru-cache-1.6.1 cycler-0.10.0 kiwisolver-1.1.0 numpy-1.16.6 object-detection-0.1 pyparsing-2.4.7 python-dateutil-2.8.1 pytz-2020.1 setuptools-44.1.1 six-1.15.0 subprocess32-3.5.4\n" + ] + } + ], + "source": [ + "%%bash \n", + "cd models/research\n", + "pip install ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LBdjK2G5ywuc" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hV4P5gyTWKMI" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import os\n", + "import six.moves.urllib as urllib\n", + "import sys\n", + "import tarfile\n", + "import tensorflow as tf\n", + "import zipfile\n", + "\n", + "from collections import defaultdict\n", + "from io import StringIO\n", + "from matplotlib import pyplot as plt\n", + "from PIL import Image\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r5FNuiRPWKMN" + }, + "source": [ + "Import the object detection module." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4-IMl4b6BdGO" + }, + "outputs": [], + "source": [ + "from object_detection.utils import ops as utils_ops\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import visualization_utils as vis_util" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RYPCiag2iz_q" + }, + "source": [ + "Patches:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "mF-YlMl8c_bM" + }, + "outputs": [], + "source": [ + "# patch tf1 into `utils.ops`\n", + "utils_ops.tf = tf.compat.v1\n", + "\n", + "# Patch the location of gfile\n", + "tf.gfile = tf.io.gfile" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cfn_tRFOWKMO" + }, + "source": [ + "# Model preparation " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "X_sEBLpVWKMQ" + }, + "source": [ + "## Variables\n", + "\n", + "Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing the path.\n", + "\n", + "By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7ai8pLZZWKMS" + }, + "source": [ + "## Loader" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "zm8xp-0eoItE" + }, + "outputs": [], + "source": [ + "def load_model(model_name):\n", + " base_url = 'http://download.tensorflow.org/models/object_detection/'\n", + " model_file = model_name + '.tar.gz'\n", + " model_dir = tf.keras.utils.get_file(\n", + " fname=model_name, \n", + " origin=base_url + model_file,\n", + " untar=True)\n", + "\n", + " model_dir = pathlib.Path(model_dir)/\"saved_model\"\n", + "\n", + " model = tf.saved_model.load(str(model_dir))\n", + "\n", + " return model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_1MVVTcLWKMW" + }, + "source": [ + "## Loading label map\n", + "Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hDbpHkiWWKMX" + }, + "outputs": [], + "source": [ + "# List of the strings that is used to add correct label for each box.\n", + "PATH_TO_LABELS = 'models/annotations/label_map.pbtxt'\n", + "category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oVU3U_J6IJVb" + }, + "source": [ + "For the sake of simplicity we will test on 2 images:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "jG-zn5ykWKMd" + }, + "outputs": [], + "source": [ + "# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\n", + "PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images')\n", + "TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpg\")))\n", + "TEST_IMAGE_PATHS" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H0_1AGhrWKMc" + }, + "source": [ + "# Detection" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f7aOtOlebK7h" + }, + "source": [ + "Load an object detection model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "1XNT0wxybKR6" + }, + "outputs": [], + "source": [ + "model_name = 'ssd_mobilenet_v1_coco_2017_11_17'\n", + "detection_model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "yN1AYfAEJIGp" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CK4cnry6wsHY" + }, + "outputs": [], + "source": [ + "print(detection_model.signatures['serving_default'].inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q8u3BjpMJXZF" + }, + "source": [ + "And returns several outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "oLSZpfaYwuSk" + }, + "outputs": [], + "source": [ + "detection_model.signatures['serving_default'].output_dtypes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FZyKUJeuxvpT" + }, + "outputs": [], + "source": [ + "detection_model.signatures['serving_default'].output_shapes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JP5qZ7sXJpwG" + }, + "source": [ + "Add a wrapper function to call the model, and cleanup the outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ajmR_exWyN76" + }, + "outputs": [], + "source": [ + "def run_inference_for_single_image(model, image):\n", + " image = np.asarray(image)\n", + " # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n", + " input_tensor = tf.convert_to_tensor(image)\n", + " # The model expects a batch of images, so add an axis with `tf.newaxis`.\n", + " input_tensor = input_tensor[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " model_fn = model.signatures['serving_default']\n", + " output_dict = model_fn(input_tensor)\n", + "\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_detections = int(output_dict.pop('num_detections'))\n", + " output_dict = {key:value[0, :num_detections].numpy() \n", + " for key,value in output_dict.items()}\n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n", + " \n", + " # Handle models with masks:\n", + " if 'detection_masks' in output_dict:\n", + " # Reframe the the bbox mask to the image size.\n", + " detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n", + " output_dict['detection_masks'], output_dict['detection_boxes'],\n", + " image.shape[0], image.shape[1]) \n", + " detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,\n", + " tf.uint8)\n", + " output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()\n", + " \n", + " return output_dict" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "z1wq0LVyMRR_" + }, + "source": [ + "Run it on each test image and show the results:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DWh_1zz6aqxs" + }, + "outputs": [], + "source": [ + "def show_inference(model, image_path):\n", + " # the array based representation of the image will be used later in order to prepare the\n", + " # result image with boxes and labels on it.\n", + " image_np = np.array(Image.open(image_path))\n", + " # Actual detection.\n", + " output_dict = run_inference_for_single_image(model, image_np)\n", + " # Visualization of the results of a detection.\n", + " vis_util.visualize_boxes_and_labels_on_image_array(\n", + " image_np,\n", + " output_dict['detection_boxes'],\n", + " output_dict['detection_classes'],\n", + " output_dict['detection_scores'],\n", + " category_index,\n", + " instance_masks=output_dict.get('detection_masks_reframed', None),\n", + " use_normalized_coordinates=True,\n", + " line_thickness=8)\n", + "\n", + " display(Image.fromarray(image_np))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "3a5wMHN8WKMh" + }, + "outputs": [], + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " show_inference(detection_model, image_path)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DsspMPX3Cssg" + }, + "source": [ + "## Instance Segmentation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CzkVv_n2MxKC" + }, + "outputs": [], + "source": [ + "model_name = \"mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28\"\n", + "masking_model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0S7aZi8ZOhVV" + }, + "source": [ + "The instance segmentation model includes a `detection_masks` output:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vQ2Sj2VIOZLA" + }, + "outputs": [], + "source": [ + "masking_model.output_shapes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "AS57rZlnNL7W" + }, + "outputs": [], + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " show_inference(masking_model, image_path)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "last_runtime": { + "build_target": "//learning/brain/python/client:colab_notebook", + "kind": "private" + }, + "name": "object_detection_tutorial.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/colab_tutorials/object_detection_tutorial.ipynb", + "timestamp": 1594335690840 + }, + { + "file_id": "1LNYL6Zsn9Xlil2CVNOTsgDZQSBKeOjCh", + "timestamp": 1566498233247 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1566488313397 + }, + { + "file_id": "/piper/depot/google3/third_party/py/tensorflow_docs/g3doc/en/r2/tutorials/generative/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1566145894046 + }, + { + "file_id": "1nBPoWynOV0auSIy40eQcBIk9C6YRSkI8", + "timestamp": 1566145841085 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1556295408037 + }, + { + "file_id": "1layerger-51XwWOwYMY_5zHaCavCeQkO", + "timestamp": 1556214267924 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1556207836484 + }, + { + "file_id": "1w6mqQiNV3liPIX70NOgitOlDF1_4sRMw", + "timestamp": 1556154824101 + }, + { + "file_id": "https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb", + "timestamp": 1556150293326 + } + ] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..b735cfbcea0e2c5b7e7c44e706e68a59d98b68ec --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb @@ -0,0 +1,1500 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "context_rcnn_tutorial.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "jZc1kMel3sZP", + "colab_type": "text" + }, + "source": [ + "# Context R-CNN Demo\n", + "\n", + "
\n", + " \n", + " Run in Google Colab\n", + " \n", + "\n", + " \n", + " View source on GitHub\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XuHWvdag3_b9", + "colab_type": "text" + }, + "source": [ + " This notebook will walk you step by step through the process of using a pre-trained model to build up a contextual memory bank for a set of images, and then detect objects in those images+context using [Context R-CNN](https://arxiv.org/abs/1912.03538)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "u0e-OOtn4hQ8", + "colab_type": "text" + }, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w-UrhxBw4iLA", + "colab_type": "text" + }, + "source": [ + "Important: If you're running on a local machine, be sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). This notebook includes only what's necessary to run in Colab." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SAqMxS4V4lqS", + "colab_type": "text" + }, + "source": [ + "### Install" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BPkovrxF4o8n", + "colab_type": "code", + "outputId": "e1b8debc-ab73-4b3e-9e44-c86446c7cda1", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 785 + } + }, + "source": [ + "!pip install -U --pre tensorflow==\"2.*\"\n", + "!pip install tf_slim" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Requirement already up-to-date: tensorflow==2.* in /usr/local/lib/python3.6/dist-packages (2.2.0)\n", + "Requirement already satisfied, skipping upgrade: scipy==1.4.1; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.4.1)\n", + "Requirement already satisfied, skipping upgrade: protobuf>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (3.10.0)\n", + "Requirement already satisfied, skipping upgrade: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (2.10.0)\n", + "Requirement already satisfied, skipping upgrade: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (3.2.1)\n", + "Requirement already satisfied, skipping upgrade: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.18.5)\n", + "Requirement already satisfied, skipping upgrade: wheel>=0.26; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.34.2)\n", + "Requirement already satisfied, skipping upgrade: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.9.0)\n", + "Requirement already satisfied, skipping upgrade: tensorflow-estimator<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (2.2.0)\n", + "Requirement already satisfied, skipping upgrade: google-pasta>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.2.0)\n", + "Requirement already satisfied, skipping upgrade: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.29.0)\n", + "Requirement already satisfied, skipping upgrade: tensorboard<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (2.2.2)\n", + "Requirement already satisfied, skipping upgrade: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (0.3.3)\n", + "Requirement already satisfied, skipping upgrade: astunparse==1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.6.3)\n", + "Requirement already satisfied, skipping upgrade: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.1.2)\n", + "Requirement already satisfied, skipping upgrade: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.1.0)\n", + "Requirement already satisfied, skipping upgrade: six>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.12.0)\n", + "Requirement already satisfied, skipping upgrade: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==2.*) (1.12.1)\n", + "Requirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.8.0->tensorflow==2.*) (47.1.1)\n", + "Requirement already satisfied, skipping upgrade: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.7.2)\n", + "Requirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.0.1)\n", + "Requirement already satisfied, skipping upgrade: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (2.23.0)\n", + "Requirement already satisfied, skipping upgrade: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (0.4.1)\n", + "Requirement already satisfied, skipping upgrade: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.2.2)\n", + "Requirement already satisfied, skipping upgrade: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.6.0.post3)\n", + "Requirement already satisfied, skipping upgrade: cachetools<3.2,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.1.1)\n", + "Requirement already satisfied, skipping upgrade: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (4.0)\n", + "Requirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (0.2.8)\n", + "Requirement already satisfied, skipping upgrade: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (2.9)\n", + "Requirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.0.4)\n", + "Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (2020.4.5.1)\n", + "Requirement already satisfied, skipping upgrade: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.24.3)\n", + "Requirement already satisfied, skipping upgrade: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.3.0)\n", + "Requirement already satisfied, skipping upgrade: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (1.6.0)\n", + "Requirement already satisfied, skipping upgrade: pyasn1>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from rsa<4.1,>=3.1.4->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (0.4.8)\n", + "Requirement already satisfied, skipping upgrade: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.1.0)\n", + "Requirement already satisfied, skipping upgrade: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.*) (3.1.0)\n", + "Collecting tf_slim\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/97/b0f4a64df018ca018cc035d44f2ef08f91e2e8aa67271f6f19633a015ff7/tf_slim-1.1.0-py2.py3-none-any.whl (352kB)\n", + "\u001b[K |████████████████████████████████| 358kB 2.8MB/s \n", + "\u001b[?25hRequirement already satisfied: absl-py>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from tf_slim) (0.9.0)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from absl-py>=0.2.2->tf_slim) (1.12.0)\n", + "Installing collected packages: tf-slim\n", + "Successfully installed tf-slim-1.1.0\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zpKF8a2x4tec", + "colab_type": "text" + }, + "source": [ + "Make sure you have `pycocotools` installed" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "owcrp0AW4uCg", + "colab_type": "code", + "outputId": "001148a8-b0a8-43a1-f6df-225d86d90b8f", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "!pip install pycocotools" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Requirement already satisfied: pycocotools in /usr/local/lib/python3.6/dist-packages (2.0.0)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wHFSRVaO4wuq", + "colab_type": "text" + }, + "source": [ + "Get `tensorflow/models` or `cd` to parent directory of the repository." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "E0ZuGKoi4wTn", + "colab_type": "code", + "outputId": "2b5d93cb-3548-4347-9b76-ce12bea44a56", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 136 + } + }, + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Cloning into 'models'...\n", + "remote: Enumerating objects: 2694, done.\u001b[K\n", + "remote: Counting objects: 100% (2694/2694), done.\u001b[K\n", + "remote: Compressing objects: 100% (2370/2370), done.\u001b[K\n", + "remote: Total 2694 (delta 520), reused 1332 (delta 290), pack-reused 0\u001b[K\n", + "Receiving objects: 100% (2694/2694), 34.10 MiB | 29.32 MiB/s, done.\n", + "Resolving deltas: 100% (520/520), done.\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GkqRm-WY47MR", + "colab_type": "text" + }, + "source": [ + "Compile protobufs and install the object_detection package" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "62Dn1_YU45O2", + "colab_type": "code", + "outputId": "439166dd-6202-4ff9-897d-100a35ae5af5", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 54 + } + }, + "source": [ + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=." + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "object_detection/protos/input_reader.proto: warning: Import object_detection/protos/image_resizer.proto but not used.\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "83kNiD-24-ZB", + "colab_type": "code", + "outputId": "aa148939-7dcc-4fbd-ea48-41236523712c", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 343 + } + }, + "source": [ + "%%bash \n", + "cd models/research\n", + "pip install ." + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Processing /content/models/research\n", + "Requirement already satisfied: Pillow>=1.0 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (7.0.0)\n", + "Requirement already satisfied: Matplotlib>=2.1 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (3.2.1)\n", + "Requirement already satisfied: Cython>=0.28.1 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (0.29.19)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (0.10.0)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (2.4.7)\n", + "Requirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (1.18.5)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (2.8.1)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from Matplotlib>=2.1->object-detection==0.1) (1.2.0)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->Matplotlib>=2.1->object-detection==0.1) (1.12.0)\n", + "Building wheels for collected packages: object-detection\n", + " Building wheel for object-detection (setup.py): started\n", + " Building wheel for object-detection (setup.py): finished with status 'done'\n", + " Created wheel for object-detection: filename=object_detection-0.1-cp36-none-any.whl size=1141324 sha256=1dff68de415a4ccc3af0e20b8f409a73d147d79720a713dcdc30f9bc8d4ab3a2\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-rlyj8yrw/wheels/94/49/4b/39b051683087a22ef7e80ec52152a27249d1a644ccf4e442ea\n", + "Successfully built object-detection\n", + "Installing collected packages: object-detection\n", + "Successfully installed object-detection-0.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LBdjK2G5ywuc" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "hV4P5gyTWKMI", + "colab": {} + }, + "source": [ + "import numpy as np\n", + "import os\n", + "import six\n", + "import six.moves.urllib as urllib\n", + "import sys\n", + "import tarfile\n", + "import tensorflow as tf\n", + "import zipfile\n", + "import pathlib\n", + "import json\n", + "import datetime\n", + "import matplotlib.pyplot as plt\n", + "\n", + "from collections import defaultdict\n", + "from io import StringIO\n", + "from matplotlib import pyplot as plt\n", + "from PIL import Image\n", + "from IPython.display import display" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r5FNuiRPWKMN" + }, + "source": [ + "Import the object detection module." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "4-IMl4b6BdGO", + "colab": {} + }, + "source": [ + "from object_detection.utils import ops as utils_ops\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import visualization_utils as vis_utils" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RYPCiag2iz_q" + }, + "source": [ + "Patches:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "mF-YlMl8c_bM", + "colab": {} + }, + "source": [ + "# patch tf1 into `utils.ops`\n", + "utils_ops.tf = tf.compat.v1\n", + "\n", + "# Patch the location of gfile\n", + "tf.gfile = tf.io.gfile" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cfn_tRFOWKMO" + }, + "source": [ + "# Model preparation " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7ai8pLZZWKMS" + }, + "source": [ + "## Loader" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "zm8xp-0eoItE", + "colab": {} + }, + "source": [ + "def load_model(model_name):\n", + " base_url = 'http://download.tensorflow.org/models/object_detection/'\n", + " model_file = model_name + '.tar.gz'\n", + " model_dir = tf.keras.utils.get_file(\n", + " fname=model_name,\n", + " origin=base_url + model_file,\n", + " untar=True)\n", + "\n", + " model_dir = pathlib.Path(model_dir)/\"saved_model\"\n", + " model = tf.saved_model.load(str(model_dir))\n", + " model = model.signatures['serving_default']\n", + "\n", + " return model" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_1MVVTcLWKMW" + }, + "source": [ + "## Loading label map\n", + "Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `zebra`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "hDbpHkiWWKMX", + "colab": {} + }, + "source": [ + "# List of the strings that is used to add correct label for each box.\n", + "PATH_TO_LABELS = 'models/research/object_detection/data/snapshot_serengeti_label_map.pbtxt'\n", + "category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=False)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oVU3U_J6IJVb" + }, + "source": [ + "We will test on a context group of images from one month at one camera from the Snapshot Serengeti val split defined on [LILA.science](http://lila.science/datasets/snapshot-serengeti), which was not seen during model training:\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "jG-zn5ykWKMd", + "outputId": "c7bbbb2f-0f6e-4380-fd92-c88c088bd766", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 85 + } + }, + "source": [ + "# If you want to test the code with your images, just add path to the images to\n", + "# the TEST_IMAGE_PATHS.\n", + "PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images/snapshot_serengeti')\n", + "TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpeg\")))\n", + "TEST_IMAGE_PATHS" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0038.jpeg'),\n", + " PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0039.jpeg'),\n", + " PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0040.jpeg'),\n", + " PosixPath('models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0041.jpeg')]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 11 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oBcQzptnQ-x6", + "colab_type": "text" + }, + "source": [ + "Load the metadata for each image" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZLLINOHcQ-An", + "colab_type": "code", + "colab": {} + }, + "source": [ + "test_data_json = 'models/research/object_detection/test_images/snapshot_serengeti/context_rcnn_demo_metadata.json'\n", + "with open(test_data_json, 'r') as f:\n", + " test_metadata = json.load(f)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "BgGTPHhkOAel", + "colab_type": "code", + "outputId": "1421a32a-c208-498f-931f-1bfeb25d6488", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 105 + } + }, + "source": [ + "image_id_to_datetime = {im['id']:im['date_captured'] for im in test_metadata['images']}\n", + "image_path_to_id = {im['file_name']: im['id'] \n", + " for im in test_metadata['images']}\n", + "image_path_to_id" + ], + "execution_count": 13, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0038.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0038',\n", + " 'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0039.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0039',\n", + " 'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0040.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0040',\n", + " 'models/research/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0041.jpeg': 'S1/E03/E03_R3/S1_E03_R3_PICT0041'}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 13 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H0_1AGhrWKMc" + }, + "source": [ + "# Generate Context Features for each image" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kt3_pPQOj7ii", + "colab_type": "code", + "outputId": "fc72e978-f576-43f4-bcf1-3eb49fef5726", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 88 + } + }, + "source": [ + "faster_rcnn_model_name = 'faster_rcnn_resnet101_snapshot_serengeti_2020_06_10'\n", + "faster_rcnn_model = load_model(faster_rcnn_model_name)" + ], + "execution_count": 14, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Downloading data from http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz\n", + "588832768/588829839 [==============================] - 3s 0us/step\n", + "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k6Clkv_mBo_U", + "colab_type": "text" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "H1qNlFESBsTR", + "colab_type": "code", + "outputId": "9b8b84e0-d7a8-4ec9-d6e0-22d574cb6209", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "faster_rcnn_model.inputs" + ], + "execution_count": 15, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 15 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eYS8KpRCBtBH", + "colab_type": "text" + }, + "source": [ + "And it returns several outputs. Note this model has been exported with additional output 'detection_features' which will be used to build the contextual memory bank." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5M-1yxgfkmQl", + "colab_type": "code", + "outputId": "1da98c3b-79c5-4d19-d64c-3e9dbadc97c0", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 153 + } + }, + "source": [ + "faster_rcnn_model.output_dtypes" + ], + "execution_count": 16, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': tf.float32,\n", + " 'detection_classes': tf.float32,\n", + " 'detection_features': tf.float32,\n", + " 'detection_multiclass_scores': tf.float32,\n", + " 'detection_scores': tf.float32,\n", + " 'num_detections': tf.float32,\n", + " 'raw_detection_boxes': tf.float32,\n", + " 'raw_detection_scores': tf.float32}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 16 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zVjNFFNIDCst", + "colab_type": "code", + "outputId": "edb46db0-05fb-4952-bc88-db09d7811b01", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 153 + } + }, + "source": [ + "faster_rcnn_model.output_shapes" + ], + "execution_count": 17, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': TensorShape([None, 300, 4]),\n", + " 'detection_classes': TensorShape([None, 300]),\n", + " 'detection_features': TensorShape([None, None, None, None, None]),\n", + " 'detection_multiclass_scores': TensorShape([None, 300, 49]),\n", + " 'detection_scores': TensorShape([None, 300]),\n", + " 'num_detections': TensorShape([None]),\n", + " 'raw_detection_boxes': TensorShape([None, 300, 4]),\n", + " 'raw_detection_scores': TensorShape([None, 300, 49])}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 17 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JP5qZ7sXJpwG" + }, + "source": [ + "Add a wrapper function to call the model, and cleanup the outputs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "ajmR_exWyN76", + "colab": {} + }, + "source": [ + "def run_inference_for_single_image(model, image):\n", + " '''Run single image through tensorflow object detection saved_model.\n", + "\n", + " This function runs a saved_model on a (single) provided image and returns\n", + " inference results in numpy arrays.\n", + "\n", + " Args:\n", + " model: tensorflow saved_model. This model can be obtained using \n", + " export_inference_graph.py.\n", + " image: uint8 numpy array with shape (img_height, img_width, 3)\n", + "\n", + " Returns:\n", + " output_dict: a dictionary holding the following entries:\n", + " `num_detections`: an integer\n", + " `detection_boxes`: a numpy (float32) array of shape [N, 4]\n", + " `detection_classes`: a numpy (uint8) array of shape [N]\n", + " `detection_scores`: a numpy (float32) array of shape [N]\n", + " `detection_features`: a numpy (float32) array of shape [N, 7, 7, 2048]\n", + " '''\n", + " image = np.asarray(image)\n", + " # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n", + " input_tensor = tf.convert_to_tensor(image)\n", + " # The model expects a batch of images, so add an axis with `tf.newaxis`.\n", + " input_tensor = input_tensor[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " output_dict = model(input_tensor)\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_dets = output_dict.pop('num_detections')\n", + " num_detections = int(num_dets)\n", + " for key,value in output_dict.items():\n", + " output_dict[key] = value[0, :num_detections].numpy() \n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(\n", + " np.int64)\n", + " return output_dict" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "un5SXxIxMaaV", + "colab_type": "text" + }, + "source": [ + "Functions for embedding context features" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "qvtvAZFDMoTM", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def embed_date_captured(date_captured):\n", + " \"\"\"Encodes the datetime of the image.\n", + "\n", + " Takes a datetime object and encodes it into a normalized embedding of shape \n", + " [5], using hard-coded normalization factors for year, month, day, hour,\n", + " minute.\n", + "\n", + " Args:\n", + " date_captured: A datetime object.\n", + "\n", + " Returns:\n", + " A numpy float32 embedding of shape [5].\n", + " \"\"\"\n", + " embedded_date_captured = []\n", + " month_max = 12.0\n", + " day_max = 31.0\n", + " hour_max = 24.0\n", + " minute_max = 60.0\n", + " min_year = 1990.0\n", + " max_year = 2030.0\n", + "\n", + " year = (date_captured.year-min_year)/float(max_year-min_year)\n", + " embedded_date_captured.append(year)\n", + "\n", + " month = (date_captured.month-1)/month_max\n", + " embedded_date_captured.append(month)\n", + "\n", + " day = (date_captured.day-1)/day_max\n", + " embedded_date_captured.append(day)\n", + "\n", + " hour = date_captured.hour/hour_max\n", + " embedded_date_captured.append(hour)\n", + "\n", + " minute = date_captured.minute/minute_max\n", + " embedded_date_captured.append(minute)\n", + "\n", + " return np.asarray(embedded_date_captured)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xN8k5daOOA7b", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def embed_position_and_size(box):\n", + " \"\"\"Encodes the bounding box of the object of interest.\n", + "\n", + " Takes a bounding box and encodes it into a normalized embedding of shape \n", + " [4] - the center point (x,y) and width and height of the box.\n", + "\n", + " Args:\n", + " box: A bounding box, formatted as [ymin, xmin, ymax, xmax].\n", + "\n", + " Returns:\n", + " A numpy float32 embedding of shape [4].\n", + " \"\"\"\n", + " ymin = box[0]\n", + " xmin = box[1]\n", + " ymax = box[2]\n", + " xmax = box[3]\n", + " w = xmax - xmin\n", + " h = ymax - ymin\n", + " x = xmin + w / 2.0\n", + " y = ymin + h / 2.0\n", + " return np.asarray([x, y, w, h])" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "lJe2qy8HPc6Z", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_context_feature_embedding(date_captured, detection_boxes,\n", + " detection_features, detection_scores):\n", + " \"\"\"Extracts representative feature embedding for a given input image.\n", + "\n", + " Takes outputs of a detection model and focuses on the highest-confidence\n", + " detected object. Starts with detection_features and uses average pooling to\n", + " remove the spatial dimensions, then appends an embedding of the box position\n", + " and size, and an embedding of the date and time the image was captured,\n", + " returning a one-dimensional representation of the object.\n", + "\n", + " Args:\n", + " date_captured: A datetime string of format '%Y-%m-%d %H:%M:%S'.\n", + " detection_features: A numpy (float32) array of shape [N, 7, 7, 2048].\n", + " detection_boxes: A numpy (float32) array of shape [N, 4].\n", + " detection_scores: A numpy (float32) array of shape [N].\n", + "\n", + " Returns:\n", + " A numpy float32 embedding of shape [2057].\n", + " \"\"\"\n", + " date_captured = datetime.datetime.strptime(date_captured,'%Y-%m-%d %H:%M:%S')\n", + " temporal_embedding = embed_date_captured(date_captured)\n", + " embedding = detection_features[0]\n", + " pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0)\n", + " box = detection_boxes[0]\n", + " position_embedding = embed_position_and_size(box)\n", + " bb_embedding = np.concatenate((pooled_embedding, position_embedding))\n", + " embedding = np.expand_dims(np.concatenate((bb_embedding,temporal_embedding)),\n", + " axis=0)\n", + " score = detection_scores[0]\n", + " return embedding, score" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "z1wq0LVyMRR_" + }, + "source": [ + "Run it on each test image and use the output detection features and metadata to build up a context feature bank:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "DWh_1zz6aqxs", + "colab": {} + }, + "source": [ + "def run_inference(model, image_path, date_captured, resize_image=True):\n", + " \"\"\"Runs inference over a single input image and extracts contextual features.\n", + "\n", + " Args:\n", + " model: A tensorflow saved_model object.\n", + " image_path: Absolute path to the input image.\n", + " date_captured: A datetime string of format '%Y-%m-%d %H:%M:%S'.\n", + " resize_image: Whether to resize the input image before running inference.\n", + "\n", + " Returns:\n", + " context_feature: A numpy float32 array of shape [2057].\n", + " score: A numpy float32 object score for the embedded object.\n", + " output_dict: The saved_model output dictionary for the image.\n", + " \"\"\"\n", + " with open(image_path,'rb') as f:\n", + " image = Image.open(f)\n", + " if resize_image:\n", + " image.thumbnail((640,640),Image.ANTIALIAS)\n", + " image_np = np.array(image)\n", + "\n", + " # Actual detection.\n", + " output_dict = run_inference_for_single_image(model, image_np)\n", + "\n", + " context_feature, score = get_context_feature_embedding(\n", + " date_captured, output_dict['detection_boxes'],\n", + " output_dict['detection_features'], output_dict['detection_scores'])\n", + " return context_feature, score, output_dict" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "3a5wMHN8WKMh", + "colab": {} + }, + "source": [ + "context_features = []\n", + "scores = []\n", + "faster_rcnn_results = {}\n", + "for image_path in TEST_IMAGE_PATHS:\n", + " image_id = image_path_to_id[str(image_path)]\n", + " date_captured = image_id_to_datetime[image_id]\n", + " context_feature, score, results = run_inference(\n", + " faster_rcnn_model, image_path, date_captured)\n", + " faster_rcnn_results[image_id] = results\n", + " context_features.append(context_feature)\n", + " scores.append(score)\n", + "\n", + "# Concatenate all extracted context embeddings into a contextual memory bank.\n", + "context_features_matrix = np.concatenate(context_features, axis=0)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DsspMPX3Cssg" + }, + "source": [ + "## Run Detection With Context" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f7aOtOlebK7h" + }, + "source": [ + "Load a context r-cnn object detection model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "1XNT0wxybKR6", + "outputId": "cc5b0677-cf16-46c2-9ae5-32681725f856", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 88 + } + }, + "source": [ + "context_rcnn_model_name = 'context_rcnn_resnet101_snapshot_serengeti_2020_06_10'\n", + "context_rcnn_model = load_model(context_rcnn_model_name)\n" + ], + "execution_count": 24, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Downloading data from http://download.tensorflow.org/models/object_detection/context_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz\n", + "724664320/724658931 [==============================] - 3s 0us/step\n", + "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G6IGGtGqBH6y", + "colab_type": "text" + }, + "source": [ + "We need to define the expected context padding size for the\n", + "model, this must match the definition in the model config (max_num_context_features)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4oh9XNLBjkTL", + "colab_type": "code", + "colab": {} + }, + "source": [ + "context_padding_size = 2000" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "yN1AYfAEJIGp" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8, plus context_features padded to the maximum context feature size for this model (2000) and valid_context_size to represent the non-padded context features: " + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "CK4cnry6wsHY", + "outputId": "d77af014-769f-4e20-b4ac-bfdd40502128", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + } + }, + "source": [ + "context_rcnn_model.inputs" + ], + "execution_count": 26, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[,\n", + " ,\n", + " ]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 26 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q8u3BjpMJXZF" + }, + "source": [ + "And returns several outputs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "oLSZpfaYwuSk", + "outputId": "63a3903f-529b-41f9-b742-9b81c4c5e096", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 136 + } + }, + "source": [ + "context_rcnn_model.output_dtypes" + ], + "execution_count": 27, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': tf.float32,\n", + " 'detection_classes': tf.float32,\n", + " 'detection_multiclass_scores': tf.float32,\n", + " 'detection_scores': tf.float32,\n", + " 'num_detections': tf.float32,\n", + " 'raw_detection_boxes': tf.float32,\n", + " 'raw_detection_scores': tf.float32}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 27 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "FZyKUJeuxvpT", + "outputId": "d2feeaba-2bb2-4779-a96a-94a8a0aff362", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 136 + } + }, + "source": [ + "context_rcnn_model.output_shapes" + ], + "execution_count": 28, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'detection_boxes': TensorShape([1, 300, 4]),\n", + " 'detection_classes': TensorShape([1, 300]),\n", + " 'detection_multiclass_scores': TensorShape([1, 300, 49]),\n", + " 'detection_scores': TensorShape([1, 300]),\n", + " 'num_detections': TensorShape([1]),\n", + " 'raw_detection_boxes': TensorShape([1, 300, 4]),\n", + " 'raw_detection_scores': TensorShape([1, 300, 49])}" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 28 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "CzkVv_n2MxKC", + "colab": {} + }, + "source": [ + "def run_context_rcnn_inference_for_single_image(\n", + " model, image, context_features, context_padding_size):\n", + " '''Run single image through a Context R-CNN saved_model.\n", + "\n", + " This function runs a saved_model on a (single) provided image and provided \n", + " contextual features and returns inference results in numpy arrays.\n", + "\n", + " Args:\n", + " model: tensorflow Context R-CNN saved_model. This model can be obtained\n", + " using export_inference_graph.py and setting side_input fields. \n", + " Example export call - \n", + " python export_inference_graph.py \\\n", + " --input_type image_tensor \\\n", + " --pipeline_config_path /path/to/context_rcnn_model.config \\\n", + " --trained_checkpoint_prefix /path/to/context_rcnn_model.ckpt \\\n", + " --output_directory /path/to/output_dir \\\n", + " --use_side_inputs True \\\n", + " --side_input_shapes 1,2000,2057/1 \\\n", + " --side_input_names context_features,valid_context_size \\\n", + " --side_input_types float,int \\\n", + " --input_shape 1,-1,-1,3\n", + "\n", + " image: uint8 numpy array with shape (img_height, img_width, 3)\n", + " context_features: A numpy float32 contextual memory bank of shape \n", + " [num_context_examples, 2057]\n", + " context_padding_size: The amount of expected padding in the contextual\n", + " memory bank, defined in the Context R-CNN config as \n", + " max_num_context_features.\n", + "\n", + " Returns:\n", + " output_dict: a dictionary holding the following entries:\n", + " `num_detections`: an integer\n", + " `detection_boxes`: a numpy (float32) array of shape [N, 4]\n", + " `detection_classes`: a numpy (uint8) array of shape [N]\n", + " `detection_scores`: a numpy (float32) array of shape [N]\n", + " '''\n", + " image = np.asarray(image)\n", + " # The input image needs to be a tensor, convert it using \n", + " # `tf.convert_to_tensor`.\n", + " image_tensor = tf.convert_to_tensor(\n", + " image, name='image_tensor')[tf.newaxis,...]\n", + "\n", + " context_features = np.asarray(context_features)\n", + " valid_context_size = context_features.shape[0]\n", + " valid_context_size_tensor = tf.convert_to_tensor(\n", + " valid_context_size, name='valid_context_size')[tf.newaxis,...]\n", + " padded_context_features = np.pad(\n", + " context_features,\n", + " ((0,context_padding_size-valid_context_size),(0,0)), mode='constant')\n", + " padded_context_features_tensor = tf.convert_to_tensor(\n", + " padded_context_features,\n", + " name='context_features',\n", + " dtype=tf.float32)[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " output_dict = model(\n", + " inputs=image_tensor,\n", + " context_features=padded_context_features_tensor,\n", + " valid_context_size=valid_context_size_tensor)\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_dets = output_dict.pop('num_detections')\n", + " num_detections = int(num_dets)\n", + " for key,value in output_dict.items():\n", + " output_dict[key] = value[0, :num_detections].numpy() \n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n", + " return output_dict" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "0FqVkR3Agc6U", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def show_context_rcnn_inference(\n", + " model, image_path, context_features, faster_rcnn_output_dict,\n", + " context_padding_size, resize_image=True):\n", + " \"\"\"Runs inference over a single input image and visualizes Faster R-CNN vs. \n", + " Context R-CNN results.\n", + "\n", + " Args:\n", + " model: A tensorflow saved_model object.\n", + " image_path: Absolute path to the input image.\n", + " context_features: A numpy float32 contextual memory bank of shape \n", + " [num_context_examples, 2057]\n", + " faster_rcnn_output_dict: The output_dict corresponding to this input image\n", + " from the single-frame Faster R-CNN model, which was previously used to\n", + " build the memory bank.\n", + " context_padding_size: The amount of expected padding in the contextual\n", + " memory bank, defined in the Context R-CNN config as \n", + " max_num_context_features.\n", + " resize_image: Whether to resize the input image before running inference.\n", + "\n", + " Returns:\n", + " context_rcnn_image_np: Numpy image array showing Context R-CNN Results.\n", + " faster_rcnn_image_np: Numpy image array showing Faster R-CNN Results.\n", + " \"\"\"\n", + "\n", + " # the array based representation of the image will be used later in order to prepare the\n", + " # result image with boxes and labels on it.\n", + " with open(image_path,'rb') as f:\n", + " image = Image.open(f)\n", + " if resize_image:\n", + " image.thumbnail((640,640),Image.ANTIALIAS)\n", + " image_np = np.array(image)\n", + " image.thumbnail((400,400),Image.ANTIALIAS)\n", + " context_rcnn_image_np = np.array(image)\n", + " \n", + " faster_rcnn_image_np = np.copy(context_rcnn_image_np)\n", + "\n", + " # Actual detection.\n", + " output_dict = run_context_rcnn_inference_for_single_image(\n", + " model, image_np, context_features, context_padding_size)\n", + "\n", + " # Visualization of the results of a context_rcnn detection.\n", + " vis_utils.visualize_boxes_and_labels_on_image_array(\n", + " context_rcnn_image_np,\n", + " output_dict['detection_boxes'],\n", + " output_dict['detection_classes'],\n", + " output_dict['detection_scores'],\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " line_thickness=2)\n", + " \n", + " # Visualization of the results of a faster_rcnn detection.\n", + " vis_utils.visualize_boxes_and_labels_on_image_array(\n", + " faster_rcnn_image_np,\n", + " faster_rcnn_output_dict['detection_boxes'],\n", + " faster_rcnn_output_dict['detection_classes'],\n", + " faster_rcnn_output_dict['detection_scores'],\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " line_thickness=2)\n", + " return context_rcnn_image_np, faster_rcnn_image_np" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3cYa2B8uAYx0", + "colab_type": "text" + }, + "source": [ + "Define Matplotlib parameters for pretty visualizations" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "9F8okR1uAQ0T", + "colab_type": "code", + "colab": {} + }, + "source": [ + "%matplotlib inline\n", + "plt.rcParams['axes.grid'] = False\n", + "plt.rcParams['xtick.labelsize'] = False\n", + "plt.rcParams['ytick.labelsize'] = False\n", + "plt.rcParams['xtick.top'] = False\n", + "plt.rcParams['xtick.bottom'] = False\n", + "plt.rcParams['ytick.left'] = False\n", + "plt.rcParams['ytick.right'] = False\n", + "plt.rcParams['figure.figsize'] = [15,10]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YGj7nXXQAaQ7", + "colab_type": "text" + }, + "source": [ + "Run Context R-CNN inference and compare results to Faster R-CNN" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "vQ2Sj2VIOZLA", + "outputId": "1c043894-09e5-4c9f-a99d-ae21d6e72d0c", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + } + }, + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " image_id = image_path_to_id[str(image_path)]\n", + " faster_rcnn_output_dict = faster_rcnn_results[image_id]\n", + " context_rcnn_image, faster_rcnn_image = show_context_rcnn_inference(\n", + " context_rcnn_model, image_path, context_features_matrix,\n", + " faster_rcnn_output_dict, context_padding_size)\n", + " plt.subplot(1,2,1)\n", + " plt.imshow(faster_rcnn_image)\n", + " plt.title('Faster R-CNN')\n", + " plt.subplot(1,2,2)\n", + " plt.imshow(context_rcnn_image)\n", + " plt.title('Context R-CNN')\n", + " plt.show()" + ], + "execution_count": 32, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOx9d5xsWVnt2tXVfe8dYJhAGEYFRAxk8JFBER9ZUBAFFRQkCKiAJAcQRMkgUQUlKAxZFCQHA2EMAyoITwUe8BjCEGeGGSbduV3Vtd8fp77qVavWPtV9b0+4t7/v9+tfV52zw5fXPt/Z51SptSIpKSkpKSkpKSkpKSlp52lwaTOQlJSUlJSUlJSUlJR0pFJecCUlJSUlJSUlJSUlJV1MlBdcSUlJSUlJSUlJSUlJFxPlBVdSUlJSUlJSUlJSUtLFRHnBlZSUlJSUlJSUlJSUdDFRXnAlJSUlJSUlJSUlJSVdTJQXXElJSUlJSUlJSUlJSRcT5QVX0mWaSilfLqXsL6WcT38nHuRYDyyl/PMO8/fAUsrGlK9zSymfLqXcfUmfo0spLymlfHXa7/9Nv19pev7LpZTvlFIuR30eUkr5CH2vpZT/KqUM6NgzSymv3Un5kpKSkpIuG1RK+ZVSyn9MceObpZT3l1JuuwPjvraU8swd4nHpWFP8umAqx9dLKS8qpaws6dOUvZTyB9Mx70Pth9Nj1yS+ainl5tTm2qWU/DHapEuE8oIr6XCge9RaL09/37g0mCilDBunTq21Xh7AMQBeDuAtpZRjGmOsAfhHANcDcBcARwO4FYCzANycmq4AePQSlk4E8EtbFiApKSkp6bCkUspjAbwEwLMBXBXA1dHhzc9dmnwdAt1oipu3A3BfAA9qNdyi7N8F8IdLLty+C2BHLiyTkrZLecGVdNhRKeXYUsp7SilnlFLOnn7+fjr/wFLKl0op55VSTiul3K+Uch0Afw7gVtMK2TnTtntKKS+Y3m36dinlz0sp+6bnfqqUcnop5aRSyrcAvKaPr1rrBMDrAVwOwA83mv0aOrC4V631M7XWSa31O7XWZ9Ra30ft/gjA41sXblN6PjqAaV0IJiUlJSUd5lRKuSKApwP4rVrr22utF9RaR7XWd9danzBts2e6U+Ib07+XlFL2TM8Flj1uunvim6WUX5+e+w0A9wPwu1NsfPf0+ImllLdNcfa0UsqjpsePm451j+n3y5dSvlhK+bXWWH1Ua/0igH8BcOODlX1KHwCwDuD+PdOdDOCGpZTbLeMrKWmnKS+4kg5HGqC7+LkGuouX/QD+FACm2/D+GMBda61XAHBrAJ+qtX4WwMMxvRtVa40LmecC+BF0yf7aAL4PwO/TXCcAOG4612/0MTWtrP06gBGArzSa3QHAB2qt5y+R8T8AfATA43vavB3AuQAeuGSspKSkpKTDl24FYC+Av+1p83sAbokOy26EbsfEU+j8CQCuiA7jHgzgZaWUY2utrwTwRgDPn2LjPaZb1d8N4NPT9v8bwO+UUu5ca/0uurtRryqlXAXAi9Fh7OvcWMsEK6X8GICfAPDFQ5AdACqApwJ4WilltdHmQnR3yZ61jK+kpJ2mvOBKOhzoHaWUc6Z/76i1nlVrfVut9cJa63nokidXrCYArl9K2Vdr/Wat9X/coKWUgu4i6jG11u9Ox3o25rfpTQA8rdZ6oNa6v8HfLad3zC4C8AIA96+1fqfR9ngA39yi3L8P4JGllCs3zgfAPHW6VTEpKSkp6cij4wGcWWsd97S5H4CnT3dMnAHgDwH8Kp0fTc+Pprspzgfwo42xbgbgyrXWp9da12utXwLwKkyxsdb6dwD+Gt32+LsBeNhByPTJUsoFAD6Lrrj48ka7rciOKV/vAnAGgIf0NHsFgKuXUu66PXaTkg6N8oIr6XCge9Zaj5n+3bOUclQp5RWllK+UUs4FcAqAY0opK7XWC9DtB384gG+WUt47raA5ujKAowB8Ii7o0G1L4AucM2qtFy3h72PTO2bHAngXumodSilXL/Syj2nbswBcbStC11r/G8B7ADyxp837AJyOgwO8pKSkpKTLPp0F4EpLto+fiPmdFV+ZHpuNIRctFwK4fGOsawA4kQqd5wB4Mrrnp4JeCeD6AF5baz1ri3Iw/fh0/vsCuAW6rfiYvgwjcPN+2JrsTE9Bd7dvrztZaz0A4BnTv6SkS4zygivpcKTHoavM3aLWejSAn5weLwBQa/1grfWO6C5sPoeuMgd0d4SYzkS3HfF6dEF3xemDvGj0adJ0m+AjAPxqKeUmtdav8ss+ps3+AcCdC72BcAk9DcBD0W3raNHvoQPDo7bKa1JSUlLSYUOnAjgA4J49bb6B7kIp6OrTY1shxbmvATiNcPGYWusVaq13A2bb518J4HUAfrOUcu2esdqTdvRWdPL9/vTYXQk334ityc5j/j267Ym/2dPsNehecvXzW+U1KelQKS+4kg5HugK6C6VzSinHobsoAQCUUq5aSvm56QXNAXTbJibT098G8P2x/W76kotXAXjxdC86SinfV0q588EyNt3f/mrMPwfG9Hp0YPa2UsqPlVIGpZTjSylPLqXczYz3RQB/BeBRPXN+BMB/A3jAwfKdlJSUlHTZpFrr99BhystKKbHLY7WUctdSyvOnzd4M4CmllCuX7idGfh/AG7Y4xbcBXIu+/xuA86YvjNpXSlkppVy/lHKz6fkno7uwehC6Fzy9rmy+HVDH2go9F8BDSykn6Iktyq70ewB+tzXZ9E7f0wCctE0+k5IOmvKCK+lwpJcA2IfuDtXH0G0DDBoAeCy6yt530T3b9YjpuQ8B+B8A3yqlnDk9dhK6atjHptsT/wHtfe3b4e9upZQb6onpdoY7oLvz9vfoXnrxbwCuBODjjfGejul2ix56CrqXeyQlJSUlHWFUa30hOmx7CrrnlL4G4LcBvGPa5JnoXrb0fwD8F4BPYuuvQP8LANel56Q3ANwd3Qs4TkOHta8GcMVSyv+a8vFr03bPQ3fx9UQ31hZl+y90jwY8oXF+meza/l/Q4WofvRlbf546KemQqdSav/mWlJSUlJSUlJSUlJR0cVDe4UpKSkpKSkpKSkpKSrqYKC+4kpKSkpKSkpKSkpKSLibKC66kpKSkpKSkpKSkpKSLifKCKykpKSkpKSkpKSkp6WKirf6QXJNKKbWUglorSimz4/oyjjgXx/U7H2sd176tuVrzbuWz9nVjL+PTUczTkmG7tNV5D3bslo63O5cbK+myQZeUbVpxtNO0XXmcT7fyVmuO7eQv7d/67HjT8fpylZ5fli+3koulzZm1Vv5x8KQeSoxMjNzuWEmXDUqMTIzsm6PFc63VKnlLF1yllFvWWj/WOj8cDjGZTDAYDGaTbmxszJhS4QaD+RtrtdZZfwCYTCZz5xms4nO0nQo36xM8xPdSCgaDwYICneNxHz7vjDEYDOb4jGNqUB0rPkd/5wA8jh5zjqDyLgM17ss6VLsMBgNsbGzM8eDax+doz2PH97CLk8fpjL/zXGpLF2Tx54LK2U31GLIAi77IsvF5lodtwHExmUy2nDDi88rKyoJMPHdrodVaIAWvwXeMu7KyMsez0yn7h9OB8zv1Y+4f/DvdqG9xrMTxrSz23Lnop3OyzjSeVlZWZrHQ8sEW8Lg8oDy5/OQ+u3zq8ivHlfKsMab613iM/vF9NBp9pangXUjL8BFIjORjiZGJkYmRm5QYeWRh5Gg0auvWGWM7VEqp4Yg8eQjFjDqBgtEw1HA4nAOICKaNjY05h9fgUgWEA/YpKHgCsJDY4/vKyspCklWKthwAzujqdAp4PGcLRDTgGIhZT8yHI9XfysrKbP445gCPdajE8qvMeo4DOuyufZyeWTdBmqD1e8se4XetBUccH41GC0mHx+TEzOPy4or9noOTZQM27eD8nG3L7XVsBjM+x2OzbVU3LslqXCuIBsUcrh/zE+cjSTOwuHlbCxFuy+O7BWjEiYJf9OHco2NxPmnFo/ZRe7PdOM7dAqQVhzGW5je1dSsf8YJJ24aPqW8zb6UUrK+vf6LWelMkbYkSIxMjmR+Wkz8nRiZGMj9xPjHy8MLI8XiMyWRy8He4lpE6QCjAJXxWACso+sTxcLQYUxOkVn+ibyQHdTQ2Mjsa88PjRnKbTCZzytfxmB9VvDqRS8bMj7bVANIKRhDzosDnAE0raTwG08bGBtbW1jCZTDAej2e8Md8KChoU8T3s4vh01VwAs8XDeDye+YAmFk3oPKcmJ1eV0sUk8zcejxeC3iVrTa7MKwM9+4mzjy6EVCdhC+cfLYB1vs4A5IAn7OHiyOlVq4EKsOoPcZz9phVfSq6yvbq6OhtLie0VOUSrzDEO5w7VmQILJ12Wx/Gvvu4qvWFXznma/N34ChSsF81vzFMsdoIPjk/2Gc4VLR0nLafEyMTIxMjEyBgjMfLIxciWXYAdemmGOlfLGTgRMGP8P4zsmHcK43N6q77FI/MRx7mawkbQZMM8slytoNMr8WjPjh9Jk4OZK3MuiQHovX2rFSUmBXFeAKjt1tfXZ8lckzzLGlVOlll1Gvy4aqhL6qw71Y3Tu55rJRfWQyspq69qXwWp8CHuC3SJDti0VfAZ/VhX2l/nDr9jf2nFB/MW5zR5BS/BR3xX4GzZSIEi/nMMKx9MLimGfOxj2p4TXSR2XWgp/6w/9fcYR/MAt2H52I782fmS2jD8Mtop3yEL68QtONxig8dXXqONxpSTuTVmK3aTllNiZGJkYmRiZPxPjDxyMdK1C9qJl2bMCRlM8DllkJ2SlaKBoUHlgITn5CqJS6LaR8dmQzOg9M2vAKWVF55L+8exSNZMCgZ6nvlpASx/j+B0FcBW1Zp1wsGiulOnd8mOb6Oz7Go3tXvMzVWrGCuSQIzLFYiYQ2VjwNRA1QTLfDMItsBMfYZ9nPXnKp3OflxFU3/gai6DE9sm/FH9nRdDCqhcKYq2ansHErHVoBU/TlYF12i3vr6+UM1zxHPo8b524aOql5BHFzzRXvkO3jU/OHuobdgGMR/7nCZxB+z8X0FNFzhaRVbZuU/LTjxe0tYpMTIxMjFyfr7EyMRItceRgpF9tCPo6RK3Cs/CspJUsCBWhIKKJrAWLxxw0S6qKKpMNjYnAXYyTmh8hdy64m8RVwqcrvgcV3VUt8FH3NZ3c2qicFf93Fb7AlgIJNaLOnGcZz1zP+ZHt7ZsbGzMBRmPyQlNE5wGsepR28W8uujgwNQAU531LSj4M2/10CqRI9UnzxHfp3uE5/q1bt+34oV1wn2ULwUSJn4uBfAPBmtss6108RBjaIJUQGK7OZvzZ47Z0BP3c4u9vuPqM25x5o6r3mMOlqela80VPK4DmvCziFuWlxcXGpPRj3OO85Gk7VNiZGJkYmRiZGIkmsePFIzsox17houFHAwGC7f9HLngU6Wx4Cwgz8vt+bNTBM/HiUsdSfu54HcJXGXSczxfOBwbXfuoflhPrJPoFw9Y9oEsj+2AbFkfl4SZL7Y7g3HLId1igsklmGjPyUITgIIB92VAcgHJ/qeJ3QV0yB1zctDyosb5a3xnPhUsWA7ew+yATYFe9cvf4yHjvoWd6i744X6uLcdrS15XpebPaj/1WSc7n2d7cBtXmVf+9POyHBHHVVat5i+LN23Pfujm1sVPjKvbuhy1wMkBYQs4k5ZTYmRbJj3H8yVGzs+TGJkYmRh52cbIPjrkO1w6uUv0ShzsmvDVEdQJuD/PzQrmylqMyYrg5NACnJibx3SApoblylVfO9WTJrqYTx0rSKuSTrfOVnpe/6tOHcg7p2K52T46FydVtoPzlVayVR24INPEzDxrAKuu1c5qH9azG8+dc9UX53PODgxSGh99ccbVwr5FQh+AuKQZ7d0tf3dLfiu60/gE5p+DaOmupW+dTz/3bQ1woKt6dvmH27Nv69aLOKYx5e4g8HeurOn5Vvy4xYvTUeikBXy6aE3aOiVGJkaq3ImRiZGJkbsPIw/5gkuTbmtCDZL4HMzr7XYWximLg4znZ8M5fhhstK2TwQVCvJ1JgaZPB/HdJVMNvr5Ac07UAmP33fGm8ro+fX2VLz7W4jH+MyDw974xVTYFDrWd+lDcSla/0geduW9LV8pLjK8VTAf6bkzHtwN8YN6XXJxwZVOTV/zXxZHjh+XQeGU+mFxVTuPbyahjuLzB49Ra5xZfEZ8a25oDOA+0Fil9pAsMBRS3AFHe44/buqTNeub2DtyYn5ZeW4sEtoUCIp9P2h4lRiZGKl98LDEyMZJlSYw8cjFyR7YUxkScJFuCOmDgvpoU9areGZHn01vVOgff1nYB3JIrFNuqXHHSjGNuHG7fBwwKVK2xlNjRHEi5hO4SfrR3gLsM5FUXqkc+1wIcHov9I/rwmNFuWYVM5wM2fUoriZxo+HY786bjcl+XoNiu6t8sE/s5b7Vw9nNjqt1Zlwx0GgvOZjyXxpXaknl0sdpKRMti0Y2l211Yz1pBDD072WK+sK9b3Do+Wz7Y0pfypDHQiiPWAcuoumz5CcvXl3e5rS7wQt99QJK0nBIjEyPdeE6PiZGJkaq7xMjDHyN3ZEuhfnYCqRDxPwzd2hagc7lEoVfAmoD4FiPzyGPwZwccTk5N9GpM/nNz8ndNNsyTc1zVt/LgzrEOefxW4u/TFfOrC4UWoDl/YBtykLTG7ks8mljVH92boVQmt+VDx3J25B8mbOksApWf3XB+wvIo2PTJy+dVBp7TVfGY1Id1TJZF7aAJk22nCyzHs/MbllXHVOJkyOO6qpibx+m6lY/YJ91CRuVS3275amu+PmKbaa5TH2R+W2O4WNaxkrZGiZGJkYmRiZGJkbsDI/toR7YUBiPMlJ5zCZL7OAWwEdxvDrgg0OMuuFsJvhWIuq3BJRk1UHznqpA7Fsc1ADixsRz6P/roNgF2GievUkvvPKZuZXDAweS2pXBg8zhuYbAsMasu9HN814DlMeKYPsDO53grhyMF/JBPda+A0bc1hO2qPqRysJ/oGDy2S3Kuj9MX89PSsat4uSSpY7cAPubSOHC+zT6jOmc7aluex/VX/WkSVrvzb+04kFT5NL54Ya0+x/p3/sLHeQwXJ2ybINanAz4eN2nrlBiZGJkYmRiZGLk7MLKVP4Adei18a5JQoN6KVkfid+pH2zCMm6MFBn2JJL7zcU7ubtz4rm9qYmfW18FqwDn+3BaIlpOq7H3yBj/s0NyHx3XJwvHjAlcdVgOqFZAxh9rVteOgUnuxDpkXB0Sqq1aCdVt8og37B//ngFV9xwJB53Q+xvbQZOHAg8+55O4onqlQ3XB/JwPrp8+2SqrnVkVJQVvlVn6DFKjjmPvcAnPlw8mjiz/lh+OkVc1kedSvtQ3nJNaB5kcdX/ngthwXvK2ipQd9fTLzn3TwlBiZGJkYmRgZlBi5+zByxy64HHEyr7UuJLloo32Axd9M0KtaTvZKqkjuowpxSZUNwglM+7FsmgTcmE5m5+gacDxG6DD0oWDIx/mH61o8sB55DpeM+5KG06kGRIylPyoYFPNqtZSptU2Dz/NvX/Txpzz2JXpNpMFny690C1C01USkulV/WBbECnSa1OJc+ImrGMb38BuWKXjQ+FN9Rn/VmwONFji0dNnKEfqd+2ncxzFd6CkYc+w7HnkeBUI+pouR1gKhpRNdeIUPuj6txVdLn25x38pPOtdWFi5JW6fESD9HYmRiZBxLjJzXT2Ik5o4fThi5I89wxcN5zmHUwGoANjAnfw1cFW4mgBiR54pxNIBaV9k6t55vAQ+TGlJ55DaxJcIBipK7EmedAps/9sdjq114LOWb5eLEwFs3VBYGWtWhyssArPpgPlvB3peQhsMhSilzABpzqc5U1laQu4TkdMn65C0iXBF0/Vmv7JNciWQ+VQbVr4Iq94vtILow4u9c9QneFZiUD5WHxw0e3cJQx+Qc0LcIYNk1xpYtdlinWrFv+YbaQucNvuO7e3A2fqzUJezWMQUflpkXXH0gxDHM/GtuVX3G+Dyms0fS1igxMjFS+8V4iZGJkYmRuwcjD/kthTwxC89BxUlQE7VzAD4WffX2fZAmH5eIQlkRIO7qtY+vkEUN1JeQ+bwDLxfYffyoM2h7Bxou4Su/qjP+r20nkwnG47EFRwYRVwHihD4YDGZAB2D2w4IuIateNaHGWNGGAXSZjlUGBnk+z/K4RZPqXX1HfzSR4yR8MRYCMUfog6uxmhhZduePk8kEw+HQAiD34+OaNBlY2J4OnLli7MaNxSOP3wceajOVvaV7tbvGZYynx1sLDuUh8gBXQ2utczZj2Vvz8efBYDAXWyqj8ql8KWAwKOgCjxdoPJ+Or3lhK3ZKWqTEyDbfiZGJkYmRiZGt+fjzkYCRO/ZaeA0SdhROwHE+bpm3Kls6rlZx3PzKB5MCXV+icX2DZ5fYXBJujeOSzjInZh4A/wBjjOOCiMePwHbAzw7GAaE8OTBVPridOrvypvxzBa5Pd1rh0oSgVTXl21UonO9xsLskyECmCYXn18TibK0+yeNrxSmOLQMzAHPPgDh9sr4UrHVcTp68MIi5eKHAQMcJLsZpVQl5Ps4jrJeWvVhuB2ot/9eFCceK82MGRuaJSfu75M0AyO24r8aL04eb1+mUx+NFheNfwX5lZQXj8Xhh3KTllBiZGJkYmRgZekiMnOfpSMHIVn4CduiCSycKBjWwo60eU+IqBo/P/VUo/c7gw3xFNYJfUeoAzAVeVGI02XI/5UPbOl1FhYVlqbXOADcCkB1IdeISSJxzlQ4HdtpfnUyDm5OAs28c5yTCztuqamoA1lpnP9TnZGA/4gqUVr1Y97Hvdzwez+ku+AteYxtG6NEtanix46qYcSz8x9nQ6RvotoHEAjd4CtDiedhXnB31tj7bQBcnvLhWP2d/4vEDaFWevspjfFZQZjnYHnwubMGL0mjDfLuFEJNW+NnWrCPlP3wo7Kt9op2bT2XUnMCxxboLmzPgOUDVeNVcwf0cULDMbuyk7VNi5Ga/xMjESLZZYmRipM6nMh4pGLljd7iY0VbiY+FYSWwAZV4Tvd7+4/GZWhUgDQIdhz9Hko/2/J3HCedtGY8NpklXnZV50Ad6FWQdoNZaZ07nqh2uesDEjstztpyKq1UsY3znRMoJlufmpMuByM6+vr6+APycQDRZqv5bcio/3D/szQnLBZ8LTH3LVPDnkr7yyPodjUYLY7C/cV/t7yrjet7FXoypidwlRl20udhWfUffOM8LEtW3Vh1VZ+xr2s7lAK3YOlKeXZ6K4yy/06EbixdFnAe0r8Y5t2X7tRY6LJ8uUlSPSi2bJx0aJUYmRoaMiZGJkTpm6DD6xvnEyCMDI3fkgqsVVC7w+DM7ghop/rPyl4FOfF5ZWZkL2lZgMY8uwFj53J/51zF5y00roHgcdphop1U71S2TAgbLxW1cxUUrZZrwmd+oPLmxNQnzmGy7GEO3yozH47n96apX59DBnwYz61srdy0e3dhuHN4G4Ko+3CcqS+ETnNh4QaT+qDZ04K6AGomfx2cbMtjwG33ivy7QeDGi8akLO9VnEFcqdRGjCdWBnCZCZyP1MZY/qvO6IGA9a2XdJWJdJIQtWAfL/JTbqd+y/K5SyrxprDKvqkcdg3Mb24F1qy8nUJ0kHTwlRiZGJkYmRiZG7m6MLIcKpKWUGreUgzjxqKCsCHUaDXANHK0UaHLUufj4YDCYS1r82lW+CuaEGgrnfbga5JwsnSzqmHqeeVCHaH3WIIjjOoex1ULyaunSkQtwtZFLfvH7FsxzbEVQYqBhWdUPlE+2m+pME3/YMs5H0tOHaznxORnZxg4A+4AqqLXI6bO/UvDqEjTPw3MEMMZDqLzg4DbMI88XvKi8Gg9sQ+XPJUunK61MBUVcjkajBRB2IOFk4LnCxppUOV8pn61FgVbe2QbcX3Mg6yk+K4/xWSuh3Jb50jlaOnGLHVdtHY/Hn6i13nRhgCRLiZGJkYmRiZGJkbsDI6c50CaIHdtSyAHLFYVgiBXLitAEywpTRS4LkviuyTYMw1UJdhbl3yVdt//WORcnH+7fCh5nVAUKbs+Ap+Np8nG6dv10HpbPXeUz6S1pBXKtDAVPq6urM175P8vBY7D9OQEPh8NZlYl1x/rXqqMGWACJ+qT6ZotirNhL7oCHx9MFQQsgdHHlFko8nvqUiy0dP6pb/FYhXgRwHLO+FLT5e8QaJ9awEbfl/goWumh0yX/Zq3S1v0uWGrsuSeuihftpruI3gGk7XaiqvtgvXQ5RmVgejlXlS3NcLKaYP26n+YNjMOngKTFyc67EyM3PiZGJkYmRuwMjd3RLYTDMTDJz8dkl4TimVQa9CmXHYgdkMFAgUaOoAtXwTK4axMlNFc86URDpS9xaMeCxHWCpPvm4BiJTHwDxfwcefYDCjqzyq0zKBycxYL5iEMHWGlMrbqGPVrXVUQCOyt8HJi5JcJLl6rAmRF2oOAq+AyzjmFvQhJzqF47vPh1wktfFiEv6KpfmgTgXANtKsi1ds05dQo1FnuODY6EVD8xLqz+w+GC0W6SxPYJnl+BdPIYfBwDHwrUV9xoDrp3eDWBe+SH4rcR5X05I2holRiZGJkYmRiZGHvkY2YeT/hVI2yRWNCc2ZUiZ4e+8v3ZZIDihWkJygGg7TVTxX694WZnqQKroMGLsjw25NNkukzGcUdvH8ZhLr6o58UQwhvO76gYnKHXQ6Mu/Su906ABKjztwVEBWmUMeltnNyUmcE0+rusp6ir3jLdBfdox50ucStE0kWAauZTpxQBKf+S1iqntOImq3lkzO19mHGag18XJFWR+G1vGXASj7AY/v9M+88OfYShF6Z3m5csUyudylABg65i1iYYs4r3cJeBHM1XCeU3nlObk6rTrgNiyjto0tJQzOnM94Dh5D3/SVdHCUGJkYyfpNjEyMVEqMPPIx8pARVJlX47iEGJ9d0LFDuyTeUm60c4DERmeF6bjKV8jC7bkqw2OzLlrBrcHM1amWsRQ4Odm0dBXfOWhcgm+BggKMJjmWm89zoLjEz/bRiuPGxsbcb3joNg2etyWz6kd50UCNRMFvOXL20T/Vc9hOH2pWe+tn5xMKkgEYkaQ1ubH/BGkFnNsp76xT1aNWRuO4gkLIzkbi9KcAACAASURBVLHBNuZ5NXadPqOP+oCzTWuxpPLHeLxodfMyvyyvytXKEzGm6lhlYbs4X2KeWJYWWHCeYoDSCrbGM+cQXnzFMc4huiBO2holRiZGJkYmRiZGJkbuSMkyHuzUBM4Jm5PAbHK5padjAFjo16oOAlhwclYgK5uDXoOFDRt93L525wjq3C6hqUPxXOpoqhPm1yUHdaRSyuy3Obg/66ulD5aJ5+eqiOqN9cTbP5Q0uateXLDzWMoDy6f+ocHCc6l/qh9GkmRdRrVP7RWfGRBZh/oGIJ7XycoyMuDG/udoo37i4sL5B8sffzGna8Pfl1VTWfc6v+pZK4bcn9tyFanFh/obgzDrUWMkjkU8uAWL2pNtqbrVnKa89bXjhN+qyLkFi86vudjx4XTi5tSFQ9L2KTEyMTIxMjGS+WD7JkYeORjZimlgBy64IlCcQTm5OmpdoYcyNWEvU7IjHov7sNM453XzRXJx55Und0tS+Yr/LhkGqWPoHEq6L5rBTeWNMfXBSqVWQLmg4/OsE8cD64iTAwe7kxtYfMvXMuDSeVQOZzfVX6vyw2PxOQYOHc8lVh2T+enzMbUVb13gBMm61N864fNa5eSxo61baLRkcMma5dMc4WJGFx6uWsZjK0i5MeMY5ymt7qvcAeour3EstLaGcGywP+hdDpanb0HaWjS1ZOmrnDJ/zLsuNpO2R4mRiZF8js8nRiZGxvfEyM02hzNG9tGOlitbSVMTNIA5weO4qxwEKajomNoXWNyrHZ9Voc7oYUSuonB7bsNztuR2Y2tSaCVtTdR6+5a/O107wHJgyqTJ3tm2pbdWdYfBRGVo7dlWEGDduYBd9tmBgVucxPjxmefnPnGMK2rRnve9tyqeOqfGBx/Xh5Z5YRNzuGqW+qjGg+7XVl0rYOsiUedTWfoqhn2AqnHQ0pFbKPHcyjv/6VYTNyefYz07Up5cX+fL2s7psmXXOMZy8Tzhq1ztdTK3AFTbJx08JUa25XZjJ0ZuUmJkYqTaJTFyXpeXdYzcsdfC8+RhQD4exMbjBKptOaG4OYKcI/YlEZdA+VjMqVfz6iT81iE3px5z+nJO4Y6rHlSHgH9wUUGmFQBKk8nmQ7iuGqg8x/zu9nifDpYFsXNgBSSWmwHSBYH6Avfnt9Iw7zw+99XkxAGviZbnDT25hQ7L4Kp1PF4pZWELhkuufbpnvl28qmwqvxu/T+9KrmqnCw0HBG5u3crEIMKvoV0Wqy1gZOBx7VnO8KtWfDt9OWBTnl3e4+Ouustz1rr5+mHHu+NN40wXgUlbp8TIxMjEyMTIxMgjGyP7aMdemuEmYuWyI7h+HBA8VijY3erksVu3JzUQ1BmCuFIVgczGiOPBS99tb/eZqz0a+My7JlOXUJ1M7pyzRXzWIHaOpcDhKkNOdrWzJlq1J/tJi2f+rr7E59SvIrHoMU76cZ6TBW85iD7BX18SVN8NvanNVS61Kfujjhdz6vYNp5c+/uKcq3zHOK2k6BYMreTE86qP8/hu+0dLPzqPjss2Zt3x1qCWXA6c+ZzjQbeCtMCWY0uTtG5LUKBzPq5zOr0on84XWu0d70nbo8TIxMjEyMRIPsa8qL4SI49cjNyRZ7hYEK46aED03WJUJ3CJoiWMc3KXBNjommDcWK3g5DF1ruDBfXbzRZuW8+m8zvE5aJhnBTrHt55XwHUJ0AG+qyjyeZWXnVfbu0TK/CqPTgb+7ObneVwy7PseY3JlM3TA4KS+qA+1qj412FuJxVUUlyXGUua3ckR79/skzLv6QZ9+mNwWo1bSDzu4BSHbiKtiyrPTqdtHHrHfijWeqyVna8HGVbFWnIZe2MdV78pDy19iTleN43Yazy4fLZNxWW5KalNiZGJkYmRipFJi5O7DyB29w8UOH5NrYog2rFRg8wFFvYIPJ9Pg4vOt49tppwmI23KljmVoOZ4m2iAegxOL47EPyFi/LRByAOfk1bk42BSsHGgD3sY8brThxOEAw8nKetGA1HnU57RdH1gooDuQ4fnjj3WnW0ZYPvUd9pvWwsYdc4CtPCixz/UlQRfD/J3Hc4sBrdLF/2VbB9TOyjd/V1voYoL/8xuleAHB86pvqc0dkGqsKHD27V/XMVR/Tm7VV3x2C1rnqy1S2V2OZP62ut0qaZ4SIxMjEyMTI2OcxMjdi5E7docrJmPDuS0FfGtak5g6hgKLngcWA5wNolUUl/A4sDUhMf96S52Jx28ZVZMvkzqFBin/Zx1zYDge+DjLxaSyRzC0wAyYfxBV9apBGf91+4kmMJdIVW/RzyVonS8+q2+5W/LRRh9K5jm4Lf8QZBD7WitZqV2cHzuw0PG5/WAw6LUHH4tX5vKYDIo6L4MGJ0i3LcQBu0vMqrNSFhcqrkIXMuqbo4LcvCoH86p2cXESY7kk35JTx2npQfU3mcy/USza69wt3+zTu8u/erx1jPu14jSpnxIjEyMTIxMjEyN3B0b20Y6ULMPYnMydkHxLmc+3glirZg6kol+c56qQzsM8AFhQkCZTnjM+89j6o3d9SY3nG4/HC0Ed7Xg7h4Kt8qHJQbeCqFxODk4oyi8HEo+vcvIeYLavA9dWsPO8LSd3wc26V8DXBYXyxQkmfpSQx4ngZnso8Ib9XHJS8HOV3IgZ1hvzrGOzDhUcOSZ420XwoLGp/uD05Krp+tpnNwbbpLVYUKByIBzfdSuCi69o70Av5OC3YvGChtuoXpwNGMQdvy1gc/I7v9H4VNkd4DJvnD9b2ziUTx3LyaMgnrQ1SoxMjIzziZGJkYmRRy5G9tGOvKWQg7J1Lj6rQC0A50B0zq6JIZTnEhjzoUmJDcDJIo5FsPOP6WkFbzBYfFOR+90VbqdyayBwUuNKooKovga1pWsGIDe3OqRzzBhXF14O8FUu1RvzyyAYPOhWAtabJudIEhpoKp/qhxc/8WOMLM94PF6Yh+3hkh1TABT7sdolkpKrZvMcfXYLMAxeYyxdWLGPu7hzyYRjiiuwnPxD96oDrRSpLh2AMS+tOOLzLkHqnG5Bw0DOsnM7tpfSxsaGfYDX2Vd5DN25vkyqszjWV113Mte6ufDtA/Ug9h8+lnTwlBiZGJkYmRiZGLm7MfJieS28Jvw4zw7EytZqmiYynYPb6Rzs/OpsPD+THuOg0XECENgZ9XWpkeQ5ACM5OZnUaRkcmR8HWvrZOTMnSdfGOabTu/ZXnTmg5zFc0Gvwq+NrhaXFs0vCGlAMjA4QOFBVp5o4uXLrKHhX2+mrklU+9zn8SfsxgLqFEcvMMnA1yCUq9jvlQyvnAOZ0Ebpp+Z3q1cns+GefYHlaiw83Z6tq73zdAVjYsjW2Vs1VFs0lDBYK4i6Hxlzq+7robi0cWvpXWVt2Szo0SoxMjEyM3KTEyMRIleVIx8hDvuAqpcyqFOHwbGB1YBXCgYo6sSa+mIMrATyXggiPpUlWFcpGUEAJYqDgfiE//6nj9iX7OB/zqi5Z56xHDjx2NE5ELBPrTZOk0mQywXA4nFWyXPJ3+lM5WkmAiSt0zl90HlfdZZ0oMGjC0QSgPqXjaiLlz3yM/YYfSlUfDR6CmN/4rImZda5xwgsVBmJ+2F51pHrluV27VvWNq1mtBYBb3MWYXMFVG/JcTkcsm8sFLdKEzFsp9DyP7cZRPUbfiBu36FEAbYEL67gFMtHPgaADZ64Wc46Ivs7HWc9JW6fEyMRIp7/EyMTIxMgjDyP79Llj6MmKnkwmC79/EMSGdpWAlrK0yhdtgcUrfz7GClMHc0mIz7uxFQyYZw1avp3K8yg/qkf9zHwrYDjeoo8mrOBTA5p1zkkoSJO2yqIByP14m4nO62RRfmJMrkIB89sbXPJyuuexA0jCZsPh0OogZGC7cvU2iMFG9R99OIDZxgE2qjsFYCbVN/u9LrKcTtmWLLN7TSzzqraPtgyWqm8HyrpI4jYbGxtzW1UcKMU59W3lXRcBHAM6P8vP/V3+cP2D1Pdan1s/yun0HW2Dd44dfSCcF7LxneeJmBwOh7NFMINNtOE7BayjpIOjxMjESNVXYmRiZGLkkYWRzi+CdmRLIf9QWkzqbtfFOWWQSa9ANUG45B7n1VD8i9FRYYzzzglcEtOE3JKXZY3kwGPomM4wTjeufQvsVH98jG87h+6UYq5IrDoHO1volHWjAaFVTg3SSBgqvwN+5SPG5yAL0kqZ9i9l86FW9tM4xttaeH5OnGwb1g3PpQDPi6AIaJWR5VLQ6fPb4I9l4s8si1aCuW3MET7Dizgdm2XVhQ8nJ52X36akCZYr3py0V1ZWZtuSOKbcgkuPsf1C57yQ0HH4TwHIkcY4LzZ4HPVJfREC+8lWwDz+u6pexFefLph39S/ND8uAJKmfEiMTI+N8YiTmvidGJkbuFozcsWe4ggF9i00wHAwB807PwraSpho2junDgqxMNQjzpElJ5wK6ao4LIg6Q+OMEHYmpj1o88nnlx4GAc27tx4lV9a5JezAYYDwezxKrJhqVrZXwnH5dMLJNGIhZz9FOgSr07pKJs6vSeDyeq3qEbLFoiX76PED4nXvNLI/DMkVyZh65WsikvqP2jvFiPrdvXRdkmsg0ebF+OdnyQkBlY1srCGtidwDYWqy45Bb+qjEZ/GoCjDE4H/H8bmHjbKm61yq2Lk50caSg5HTAPq88OH1wf40dtRUvpt24MYabm/XaiqGk7VNiZGJkYmRiZGLk5ri7CSOLS2bbocFgUIfD+es2rnIsTEiMta4UJ5PJXAWJ/7vk4ebgtqoUdfZWIuIKhjpFkCYubqfgxX3C8PxaVU34y5yxlZwj0TKQaL9WEnZAzvpknYSO3Nh6q1Xf0sP60WqS8sXnXLJVX4ugdv7Hfbj6F4knbBK3i1v79nURo4mT5+Jg5wDXxOLs5OTWpAZgAVhbi6m+GGKAZburfK3KDi8IVHcMVjym8ze2qavwOp1rjLjqlBJX2VQGt5DSvMFjsC+p3VmP4XcxF4O2W1yrvp392AZM6k88X6sPjxdjsBwAMB6PP1FrvantmLRAiZGJkYmRiZGJkbsDI6dbee0V2I5ecCmTqhgnIDsfGy/aal91jj5Q4ePsDFzxiPmCj1Aet+ekx4ZdWVnBaDRqJnh19uirFU51zFYS1IQfPPA2EJ07gtG9GpfBlp3M2UsTX+jFOTDPr8e1jQNcFywho1YVo50D/DiuyUBlDAo/VP1zYEe7OM4+EdUllUUBVBNdKxG29AdgYUwGcE1g0T8SmdqgVW1mXlUXGhNaSV9Gqjsno9qIY4b9khcpLH9LTo5JYN6ntMqq/qkLIOercVzzlSbm1gJOfSLm69MrAznr170Jjv3YAZ4uZLVfXnBtjxIjEyOZEiMTIxMjj1yM7LvgOuSXZmiAuiDlc055+hDeZDKxDh4PsMUYzqA6NgdunNPEFO1Go9HcG1MUYJg/dgyVi2XgByxblbRoH/y0gjJ44Fv1zC+DIAMYt1eg1sTHFYRor4HEdoux4r8LFE4CwX+M6x48ZH5i7NXV1Tk7s9whH+uuVTl1cwSxPC3w4WSicjkbx4PL6ustO7NPse+GXTjR8PMB+hspsWBx8eBi0cnJdmmNE37Hx/mcA0GueKnelA/VC7fXh5f1f/DC8ccyuFjQGHB88TG2kcrN8RDzhu9rbCsA8p/LhTyX+mX8sf4VLB2/3I75Yn/aykIhaZ4SIxMjEyM32yRGJkbuVozckbcUsrJKKTYoOSGxMUMJfMsyiIMoBGIBNWmxofgKVdvqeE55ratlDiB2PnYuVnrLObUKpcDW0jPzrfpkfp3jMEiz44Y8GjjMS60Vo9HIVnP4s3uDjbZj3lWvPAZvZYi/aBNVCZc0NcFxZYl16b6rDlvHmJ8gfeCc/YD9Mb5rctREX0q3JxuYByUX+LoA4M/On12FOOZTIOP/QQwKfMzpNRYMGpO82FF9cnLVcVluB+CsT27PeuUFTfTRRZHK4cYB5p/T4Dbsr5of3DgODGNM5VHBQccNPSvYqh21UsftnD5auSmpnxIjEyPjc2JkYqSOmRh55GCki+OZjvpOboUGg0FdXV2dCchXjqwIFp6dhitp7PSRhFvjBTH/7tZrGNVVIxhk5pQiAcLG4fN9yUaBUW89uySjySLGCx41STojq76VeByVP47x9heVR+3A+olxGSjjT18py0lL9Rc2Y73HMX7bDMujPLRsxvbQwHPbTngO1jPzyonD2VcXFvFmHF706LxsG31TUIBp7AGPLR6txOT8vBX3LX9SP+RFiluoKZ/s5+zHbDutasYx9oXghfOIJj3lgW2oulJfU9258fi4ApbaUXXI7fv8mOXXZyRczLhFCp93ecGBphLrKvxwY2MjtxRugxIjEyMTIxMjEyN3B0ZOLy5tpx3ZUqiG48DigItzwOZtY3Yul6xYUcvmj+/8n0HMJam+4GG+gflb2Ty/qwK2ErUbl53SObV+Vwd0zqWg03Ic5SV45dujLtkzyLPN9DWebAOeZzwe925P0AogPzjNsusCQW/FOz1wZYf17xYWTFwpjPauMuuAROdVH1Swjnn0mQVOGvGcRV8yDPm4isdy82f2Qx1Tfdkt8py9VU8cL5PJZPbGL1cVVkDUsdQXYtHodBFtuUqo1WqNK10gOLDg3OT8x+lYgUOBio/FItQtPpaNq9SXRx2xjrSSmLR1SoxMjEyMTIxMjNwdGNlHO/pa+HBsJpfE1ADRjo9pclEn4bYMGC7xOcM50mDmIGYeHfC0ZODP7m0+rQqEytj32T3kCbQfWtSg4fF4zGUgpMDvEiKPxf04meo8GqwuSce5VrBxcDEvwU9f0DlwccDKCYqBrbU4jXG1euQWMKoXrv4GiHDS48WK6sHxy/rXSq6zu+MxiPu7GOB5FYD65nPArMeVXAyHvlwllfe4t5KtxnFLn335z/XTz9zHLcZCltYCUPWj/blyyXy7/KPAFHK1YiZpa5QYmRiZGJkYyX0SI48sjOy78DrkO1wtY/N5dhhO+uH0TuFMTnnKg7uKdgmKxwpeXCAF8ULD8RjfW47EPLlADB6UR5ZDxwra2NiY7Vt2DsT/VV+sA8cTz8320j/mjXWhxE7JSVDn6tMvgwjPw/ywvRiolWetDvL8XFVmnQUxiDFPPJdW5ZzuXRVR/zO/+jparVxy++AxyPlYH0AocLItVEYlTtyhT632cl/93vJnBgWNkb4E7fyGt+CwL2ocqF5bOmPf6wM8tidXZFUmXjyw7tUeGjsuF+q8Lb31UV/+TeqnxMjEyMTIxEimxMjdiZFLL7hKKbcopVx1qxMFs6zoVjVD+7uEz4bWc3E+jrVug7aM20qICi4a3PHHD6xGX74dq/Nz9SWOq6O3QM0FdisIXDLS7zyHOu1kMr9dQl8RrHK29KhgxPZRHhjYdS7+r7JoILlkqeNwEuf2ehteFx7xnx8AZTladnT8Rxttr8SgoLYL3TpAaulJ/cEBJ+tMFwZqV/YFB6TOFznGeFyNXwckHOua+FXPLpbic1SyXMVO81GQ6lrzDIOV8z0mzhmab5h//qw6cbHtQE7bxB/n6rCbvv53mU52O20FH4HEyMTIxMjEyMTI3YCRfbR0S2Gt9eNLR5EJNbEw6TmnxL6x3Vh6G5QD280X313As0G0rSYsTgZ9TsHzsnGZbzYe928FoQMXx3+fbjkB8LwtJ9W5WDf6mdu2FhEMpKGL1u3Y1q3s4FH50iTdup3MvDEwx6105lX3vLuqC+uRx1Xw0gq2I92yEnvV++za8uE4FzK6H5d0vtVajIQM2i/k0dc3q15cfDA4abxpPGhc88LHje/aho11Hv7MoOXiTvXr4k/n5346Z0uuvhhiX1Rd8ziuqtfSj+uzFUDZbbQdfJy2B5AYmRg5z1diZGKk6iUx8sjDyB1/LXzruLZhQVvH+pKUtgf8A5l6xR39HJ/8PcZ2jqEJq3Ve52Mn1S0IwSPLEI6scqjczLcmXb0CV1BoARU7lP7QoZ53/DhADhm4eqn6iz9OKNxWE2BfUDJvfRUq/uwqjOq/rAutnuocLRDk7RYa+EG8NUB1ybzxPNyHdRbf4zP/1on+GKTqxenJyRoUx+N3YHihFLwq3zrespjVCm+LFx2X5+67g9NXteRxl+WJvvhQefp+nFUrmnq+9Z2PKbj0ycGycJ5LOjhKjEyMTIxMjAxKjNydGLljL81QJp2Q6uRBWsngJBJjukBi5XA/Hp+VwoksEocmAk7kPEbcVtYk2kreeuvd6UhBhckFxzJwZYr59dfN1aE0ibeShuokkhYDnsraAgRNjly1acmviwauzjHQulcnt4C3BYBOB8q/nnNjtRZRrXYKfDyP+p4b182hPHC8KO9hY1cF0rYxn/ISMmhlb9l47GM6hy4g3LhBrdcWA/NVVPVB5cXp0VHLntHH8a4AwZXi0B3rV8d2ixeNa3fnwsUBV6DVp3iBknTolBi5KQuQGJkYmRipfPWNlxh5+GPkjl1wRYDrvtFgrhXQwKaS9PYzt3NAwWPorX+nUFZWyzk0+XESZefgJMr9dD4GyJbczI9eKfMYqo+WTlh+HU8BVJOe8qdy61jxXfnlMRko3IJBdQwsLjDClupfDsSYv9Yig8/HZ2eXvgSjsul51oMCpgteJZfoeFxd4LAuOOlobKmNHNjw+KozAHabQa2bP6zK8nFSaumzlWyd/6tuWZ98zOlDSdv06T70yNtoHLHeNVb5T+OOK8dONse/+pHLvxqvHE+tXKILK37rV9LBUWJkYqTylhiZGMl9EiMPf4zsox274NJqlXNmDfSgaKuvZAyhdCw9zuPEsb5KkOOvFdAOCDRAnSw8PsuiPKicqifWr8rKPLrb8nrLn3lY1j8qncELO6I6nwMDto0LIB1T+yivoWvdV90H0uojCoiDwWDhQdOYi+VQQHI2VrljfzbPqT92yTJoondAwElA5YiFDS+iNHkFD6EL5YHt68DO8RTJtdbNH+oEMDvu4qAv3lgetUOQ2pHlYH91PsBjOX/j+GuBC7dvndM5XA5x7Vk/6rv6fIT6+3b4U5nZB4HF1+HmXa5Dp8TIxfOJkYmRiZGJkbsFIw/5Ga6YkAVVcg7F/V1C18qQBgUHD//x23H0vHMiTbIAFh5m5Fus0SeCm/vzbXsem/WketH+LX1o4LPRHQDxeMEXf2+BsM7HjsRJy/HqAp/7hY8wucSgsnJi5h9W1DlirKgg6bjqXyrvMt3zd+7vkpi257aaZHVODeioFrnfqGGfd/4Uc8Z4umjgH40MXatcQRoPnPSHw+FCe60c8xj8XXXA/sn27gOh4CvaKvA6/bo4cHHGf3yXgvXGvPPiKsbSKjHbVgGaY08X6cxbC2g1B/OcbJeW7lrzsO6Stk6JkYmRiZGJkYmRuwMj+2hHXprhBGClcBJwzh7Jgh9SbCUAJ5wLfq6GKDlQY0dS4An+dAwG0WjL43NSYMM4x1be1IFd0mZndDzr+Jok2MG1gjccDhd0O5lMMBwO7dgcfOqYEXwxH9sYmK9qcaDF9xgzEpbauqXDIFfJbQU9gJkfht9GO060WkFsLQy0qsn7kBXY3QKHdRTn1B8Y2BS41IbhLwG4al/n77x4aCWXGJcrQTG3JuhWTLqFGOcCF9MrKysLixTVn9qhNb/qS8GY2/LbsNzCxOlOY0P9L+SJMfQ3Vdi2qi9HMX5rgaNx6kCWF9JbqeAleUqM3ByDx0+MTIxMjEyM3C0YuWNbCoN5TgpskGXEjsgO6K40NSmqw/Jn/i2BCBbnKBqM7HThLOPxeC6oV1ZW5qqFeuud+XGJYtmWDpZXkxG35zckMf+6tcCBM7DpPJwAuTLJOlP9cLJ3Acs61B8h1O0Sal8NoNA386OJhfURfEXCUTu0dB7+wok3xm0tGFq8u7l4W4G21fHUf7iPJmddEPDYoa/QA1cQQ7YYI+bleTgZ6lYQtR/riZMR66AvBhUMONY4cXO1SxM6J3n1Rda1ArHqQHkKYt9VudjnVS5dJKmtol/YSHXnQIFzp6PxeDy3mGvJxPmilU9dPk7aOiVGJkZye+YzMTIxMjHy8MfIPjrkCy4GYTdpMBOMcjJc9pBZi3mXkLXiE4HV5yhMatggddJoG0DF88RnTvwKTK2kweMqTxrYrAdOHurY0Sf0r7fbmV83L5O+9nZjY2OhwqcgoosJBeeWnMp7jDUajQAAq6urC+NqImEAYp5Yhy7Zc9BxxY5BNj5rFZN5ZX70t0K4SqZVQAWWsHEr8DUp60JMgcgtKPriVm0HzPtZAK/z69a4cU71FfrlRR+DUvRZXV3FeDy2sczytYBMK2T6gG8LwJ0Mbh7lmfXGMcQLLM6hautWHnS+EmMF6DMPfYDUN5cuXJO2R4mRiZGJkYmRiZG7AyNbOgF2aEuhS0aRaOO8Bi4nGzZGJGVWEjsYC6ROE07N1RceV5NyBKm7Zan77RW4gly1hG9xcuVQ5XTy8rjOiH0A4/TK8rnEpAHAtlKbcaUygCT41zlDplbycAHLwBY24CALYIhE4hyfeXdBonJqtVV9xiVf3S/Pc6h9hsPhbI7V1dW57S08Pid+vsUd/3lREHxq/xYYqOy68In+DG7sw+ofWhVTCvuxHdUPNMlyX5afKY5peye3Hmd/idw0HA7nkrn2ieMa4+zr3I/Hb1GrOsa61piIWAvS8zwOH3eLCgV7Z98WoPTZPKmfEiM3KTEyMZLnSYxMjGQ6kjGyaBLbLpVS6urq6hxjOqkKGkHBTg34W4/8v+XcMaZTXrTR6h73cYmGHVaTspKTI8bRsXlMraYFAPYlSR432ruExAmbx2Ubqe0VrHmulq5a7Xlft0vqzjG1auOqTywDA7aTo1XNZH1z/6hQhm3iWGxv0AQddo+x+DwH+fr6+hz4RhuuIrV+0I8TFsvNPPSRS1Iqoy4eeEz2USd7fOZq4jIg19jkMTkhKxiwzZhayY/nUn9luzPPWqFVmZmHrcRS+LJW+ALI2O4xN4OmrlREAgAAIABJREFUxhPzqTlPbdSS351z+nG2HwwGWF9f/0St9aYLSk+ylBiZGJkYmRiZGLk7MHJ6sWqvug75gmswGNRwJL6qZ0OoEXifqSZgFTb6RRutckQ7Djydj9soaULSdn1ApwDpDKZJlasGEdAtvngcTWR8jB1CHW4wGMx+zXyrCUlBJ+RzVRvniKqDFsi32vH8KrcLBNWjjq+V1Dim1SAH5uoL7L+sF15MxPiRrLmS7HTkAl79kZO3yu9syIko2vODs+PxuOnDnFBZ725M5Y91oHKx7hmAXLIHFn/7ydmsz99ai6MYi2VTvbE+lNwiiNvqPm83htquBaJuO4cbl6udHLNqo2XyqM2db45Go7zg2gYlRsL2S4xMjEyMTIw80jByenfZXnAd8jNcmvDZWBqoQXFV6JKSJi8noBo/2nB/9wBhjMEJTedgZ+UA1qSmyZ4TOANb7KVm59FqIFc9OGFrEtZA1uAOoGJZOXAd3ypD/Ocr960ARt/xkIXtoclN2zJpZYzH5+SnyUeTSszNb7bhcaLNYDDAaDSae3CXkyW/3tUtXEqZfwU0BzXHgbNxHI+HODlOWgsW9jfd7sG6CwDhSqJSyOniQROx+iv7iXu4VPluxRbzov10MaILnxYga4Jt5ZJWwlV9K//s186v3Fi8gG4tKJYBuPLjfEZzs7O7yseyBC9uIZi0nBIjEyOXHU+MTIxUG8c4iZGbsh7uGLnjbykE5m87M7EitELAY7jKFYCFBBCkgcufteLHAcDA03JSlS36u+CKz5okWB6XTLTi4drGMXUidTStLEXy4AqAAiy31XG46sh9XaAqf60KiSaFllxsM3V0V6VloGC54jxXrFh+lmdlZQXr6+sLld+wYwuMeTxdkDg7KXiEnzLgtXQ1mXT74tnvtV+Q8lxKWagGhX+o77H+HRDyA/FaVeM51HdYf24h46qHXLmLN6HxVgP1HwcgjlS3vIVE8wwDBduT//PcDgRYDrZd32JJeeV5NMeGX7jtN6znlozBE/PBC4wWOCZtjRIjEyMTIxMjgcTIIxUj+2hHnuFaW1tbSPJsSGDzAVJ+ZakLaq3AhEBBGsxaCXNJsi9psXFCYRo0jjTJqQHU4MwfO5PKH/01WXN7dVJNBk5/LfDQJOj0pe0d6GpAaNJo6XvZ3Jz8eFtOJBQXLI5ntY2zl/LL57XS1LfgiWQ4GHQP2Y7HY2xsbGB1dXWuwsZ6Cl7ZB52v6kKGx2BQUX2wDNE2xoyHlqMCumzxpgslx6fqToFex1S9aFJWu/IY6tssbyyKdDGi/4HNRaX6hsqtczmdqC76co7rp/M5P3ALFvUnN17Y2Y2tvOvCtJSSz3BtkxIjEyMTIxMjEyN3B0ZerFsKnXHcea2o6H9Nyi4wNOi5P3/mxMRJySXVIDYqz8dVG2cwDno2WnxnY0TQsh64GtaqDLHsLpC1X9zm572urmKnc+h8EeD62leVT51ZAZB54MUE64u3Ujjg5LFcMnX9WCZdJKgcIWM8AKzVCl7ksE87vpjfSJD8ml6n7+jL21QCNMOeLaCPqmT4oiYptl3ox52L81pJdL7HiyQH2iwbg4iLgZiTk7mzoY7tco/GYAC6LoI0rwQPTk+OB5fruE9LH24Mltct1HQul3P0DgDnFY1d9pMYw1VvFaiSDo4SIxMjEyMTIxMj58fdjRh5yK+Fd4mFA9EpNBxVr6o56LWaw/1Vsa19zlol287CQR0sxghgqXXz1jQ7IAel8s/JpBWIzK+TO6or+vslLpBaPx7Ic/FrZSPxRaWVgT0Src6puuGEo7KoLjiphFytSksrseiCQoNE+7QSACdzrkhxslM7Mc8aA2pPnU+J+RsOhzM+OBGvrq7OdMt77NkPmQ9dGPDf6urq3JYMtbGCact3VB61lyb8kCXm0vEd0KhNWc/OjqyHeJaAeeExGUDVt1kH3J6TcR/guUWOtlMZdHzmm/nTKhvrFdiMJ81BLX7ZXxxfLhcnbY0SIxMjWTeJkYmRiZHzMu0WjNyR3+FyDIWRVClOQL7yDMajvQYKEztHzM3AwueB/nf/a/A5cs6txFUTBhU1FCcwHpvHYN6iPW8d4P3JnOh5Dg4WlU/l5PFUbnZyBtU4r6DrQCDGUWDkMZ1vsC/FOOwXnFB5QcJ6VV60UsN+ybZROwZPqn9OTMy76jd4dwlLdamVZLaLJuLBYDCXOJVUPqWWvXTuWMho8mJ5lQe3sIjj7FcuebtFAVe6VUYnl4Jg6Inn5MUjLxYdQCjgqZ/EOCyH2pJjQKuzum0kFnzMY5xj3nlPuv6WDes/9McVO7fA1yofz510cJQYmRjJcydGJkbG8cTIIwMjne8E7ciWQiekS57cx906DqGVYVYIO4WSztMKGuWhbz515Pivb5jhcdWAGkgsi+unzg3M3yJ1wMOOpQCuSZ77Kvi3ko2O5eZk/YVD87mWPTggnY50EcLfuVq2bBHAnzUp6duhmB99DSm/Qlj/oj+/ZUxtoHpXG6pOXaUqxgg9s/14fG4fFWOnk4gHTrK8EGF5WU/qTyEPy6L8awwqcHKCC9u4eZ29o40bL8gtKtlGrAeWgXXEeonPvJBlmfnh+mjb8munV20bfLjfIwo+9WFpllOr7H0xr/m2FWNJbUqMTIxMjEyMjHaJkfN8xHy7ASN3ZEsh/3fn3HdlyjlHBIabK4K8jx+uBrmE6hSkjszfuR0bRKtOmkBcEub5mFd1MCdvOIsmkWijwczyqK70Tx2a/7hCqJW3Fgi07BPJzQGytg+ZtNrJc2giYOoDGa2qhK5acrXG0Soc8+Qq2S6pK68Myuwn3K712yVObq0usZ/EsZYd1NYOtFqLBrdwZPncwor5Cnu4hYMmyzjW0kXw7WRlsHSytHJd+JFWAnVcp9OWH2s/jj/WG8vGvqJyA/OVSSeb+8667wOSpDYlRiZGJkYmRvJ8iZG7EyN3ZEuhKiWMHgK75Bl93C1SHZv/83xuzPiu/bkdn1OjqAFZDq2yAPOvrFT+XfLQuRQwFFR4bk36vI2ilM3bwOw0raBgkObKhF7Zqz3VNqorZ0sFb+af5+B+bouE8t8KrDivt3sVHFQO/n0OXQy4hKe6cODhki4Aq2e1h8oXn/l2vtOR++6ShbbRiqP6oiOWNW77q8/rnC6muU0c1y0RrbsDfD54VqBkcvM732A/ZXlbecr5SZxT/+vjqRVrGi8ud4UeNL/oHNy2FbOtfJm0fUqMTIxMjEyMTIw88jGyj3bsLYWc2Dg4NBEzc8tudTujqXCcCLUv/1dyQaKJnXls8RM8uCoj64dlVKfjhB392SFUFpcU3HhcodmKfl0Cd+c12PqAQx1aba6220qwqW5DVnfrl4HQAQmPo69bZnmdrzhgcf4Rt7VblUUNfAYhfmBb5+Fb9E4m5lP1F8Ch/g1s3t7vk5cXP+r/qjfVc4vXFr+uSqgVwT7Ac3M5O6icOp/LFUGawFvAwn3dQgFYfJtS/O/bAqJ5mH1+mV7c+byjtXOUGJkYmRiZGJkYmRi5Yy/NaBmjlUjZ+SL58XcNLv7fomjPrw3VxO7au3OtZOiCtJVU4rsmS9eOx+EHEZeBYTiQ48Nd4btKo+Pf6cLx4PTVAkHlvbUAcPO7hM2+FJXLqHDog+F8rgUMyrfqVcfjduz3LphV325+rdoFuKkvt/hn0OFkEHrmB2FV3y0fcMd5oRXjhx0YZNwigWPd6cuBm9qNbck2Uftsxb806bPd3KIw+I2xnSy8sNZ5mP+Wv7tqeSu3MG8xp/Mv1iHPo+36FsV6LGl7lBiZGKnyJEYmRrpxEiMPX4zsox274GKG47szijLHzsWCcLVPjaL9OGA5kahj9t22doAW5xmcuCKmFbLgRQGU+dWk06o+sl5bQcn/45x7awzzpceUZ/5jHmMu56g6vm674L58G5uThf4ORPiSBjDrwy0YYi6eb5n/hYzBd4wfn9nPaq1ze4BZpviuvhBjhVyOT07AHEuuwu2SdgswOSZK2XyugPurLfuSrtNhkCZWBW7mW/Wjfqd24xjv49EtFljPjli3LuHz3MqHm8MBVGvhpf2VT7ZJa1Gh43O8xhz6GyKt+fW48tLSYdJySoxMjEyMTIzkcRIjdxdGHvKWQsdUkLuS5HZxjt/U4xJ7kCpWlRPHRqPRnEO7dpE8nGNrsuNgbiV+dbJIOpzcow0nVAYrbqe3/2Ps2EMd5zgJOp40GbIeHAjG2Gon5Z95CBn4nIKwo77FRhxnXtx+bL5dz7pSIOL/TK3b3MyH49fZWwHKgVzwynrVfpwAWMdsRwadIF6YMLCwnzOAx+fRaDQ75uI15uFYZaBVf+I4cbrXfKGgwvJwHPW14/mVHChxfDlq8ag2Go/Hs9fWjsfjGb9uHI03/s85T/OEtlee+btu+Vnmy0xsf/7PizeXl5OWU2JkYmRiZGJkYuSRj5FOv0E7csEVxM6vzLMxHLHy1GDcXysp+hskTiE8h4IGz9dqz4EMzCtd+6p8LFPwORgMLIDG2C7ZqPx3fflTcKXrXKsbE0DLxApSMR6fc0mvOw+EONGklPY4PMeM7ymDXb/58Wafe/hXvhYDaXOcue8x4RbHnfWfVLz1ro9Y8JEIKLYZ9+VKj1abQz8rKyuzRNO30GpVAyOZR2LVpKW8MM8OWPUBcl0AOGB2YzFw9CVLTYgOSFwlP2TSN65polabKPHii19ZzXnAzd1a7ACwtlD5+3IR60BzhfoA21SPsx1bvLdylfMN57MuvyVtjxIjLzmM1NzLd0fcIk55Y37HD30eJj96UzZGNAJKQWFbEvZsYmDF2GDkuBQESM7kobEZixkk4+MCdhrcc37SfS/TEbZAIaPMD8yPMDeizjvXrz33nB5c7qVx2vqqwPpF2PN7d58bNzEyMfLixkhX2AnakQsuFUoXnSwIf1al8Xj6XY3jAID3JOtYqlT9/QvHY4sXNbgGBTBfUdCqBs83HA4xGo0WEoCruHH/q9zgR3Diza+/IG/SoVGVgNTqq/NbXmioj/PFU/w2CbD4o5LRTpM5J8CYS3+zgn2D5+XqGv8AZSzCeEE2GAxmVTwGEScHL1KAzdfuDofD2XcFM5aB9ROy6lYibcNjsa51HqfHOfua8eN4q5rnFizaVj9z1VRjXon54DasD62Uaj508jgQU2pV/RXgnF6Stk6JkZc8RrJcEYt8p2k0Gs1djOqCbW5xfI3rol7/Ngu6m8m8hePLIsid34lxXZtdEc37z0+MTIy8TGHkjt3h4knDmHr7Um/D83HdHsAOH8TH2aHZqVzCjfl4bB0zxlPw0bFYLndbkhfJLRBkfviH2fiVq7GgVn5mi/QsNF8ixL4BbAafbnGJtlxJ1S0/nDjjv1ab+TOw6Sc8P1egOMHEefVlHofPcczym544HnVMvavGc8TdN5ZVFzEqG4/ZSn66CHP6CuJfpOe5NFZdAnaAxTzpfFyFb+mWx1VedXsK+w/riPs7/TgA0Xn5vFv8at7cCngkbY8SIy9hjCT+g69WbId9VF99C7Gkyz6FjyRGJkaqXnRePn9xYeSOXHC1knArscf3vuSviTiIDafOpwDj+OkzBu/95vYMdGxoNZ4bk4PJVeRahlOQ48C/OBZDb7jLw/DNT35GjhY8/tsfPSjQeeNdH45vfOJ/AAC//bn3YN9xVzwovt7zsD/EZ//2H+aOxXgvPPH2mIzHWFlbxWNP/1DvOP/83Ffj1BedDAC495uej2vd4VZ4ze0egDM/+yUAwKNP+zusXW4fAF+R5aqqLjT6znEVhxcZcS7aRmLiLUe6IOGFh9vC4MZnHw1e9A5e7KvmxOVkaS1k+Pv6+vrcNgznO27hFd+dfLGobG0V0LzB57mdzsux7ezuYo3PK2AEGKjeWslb+VQbs84d0DmeHa9935087C+tvknbo8TISx8jOZeXUrC6uorRaIRSytzzJSHf3DyowO/eBfjCJ4FXfhK48vfPDz46ANznB4BjrgK85r8X5r7E6FE/CXztc8DJnwOOPu7S46NFj74d8NXPdp/fdBqw73LAZALc+4Tu2NFXAk7WdYjQi38TOOVvpp8/AlzzusADrgOce1Z37O3fAkrbjxIjEyOV177vO4mRO/IEtF4luitOrcZHkmSn42oEOxg7HVcUHDDEL66zE8U47pkXnlMdQ/uw0gHMGZkVz+OpQ43H4wWjcRvmi/nm/wdzAbSM7vO2l2DfccfgwjPOxkP//a+wfv6FuPCM7x70eL/4Ny+ejVcnB79QO3Du+bjwjLNx7zc9H4/64vvxqC++H3uPPRovOOF2OP+bZ+CRn38vzvv6d/DCE2/fHOPf/vRN+MjTXobbPOFBuOZP3Rxv/tnfxukf/z+46LvfwwM+9BrUjQlQK5533K3nklEkNK6qcrDrloTxeNzcQx3jsd+PRqPZRVb0izhRP48Y4vFWV1ftbXldnDG/GxsbWF1dne075gULJ4vwU6D/hwz53MrKClZXVy0I9SVolbPl4/HgrW7j4Dm0SqgPkfM5BlAFU+3DPGvFNI5p7mqBH9sj/vMxbuPyIX9n3oIHzT/cPj7rb9e4hXz4i/KdtH1KjLzkMTK2b0U/5pH/x+fWAnoWx+efDZxzRneBoDRcA974ReDlH1s8d0nSed/teKzt50guNXr8HYH/+mfghf+4eZE1HgH3OBY47xzg1Z8Cvvo54EE3bI/xit8F3vsq4NEvA078IeARNwO+9RXge2d2/c87B5hU4GePB4DEyMTIOd4ubYzc8S2FrARmUKsGDCQsQAjH59m5nKOxMp2BWjyFwvQ491HS+V3gsrMqwMY5xwt/1/3z2hYV+LMb/jzO/NyX8PhvfRQvvdZdMLpwPwYrA5x09sfwnKNvDgA47tpXx2995l0AgM+980P46198LADg9s94JG570oMBAG+6+2/i//3dv2Iy6p4xWrv8UbN5n7nnJhjuWcNjTv8Q/ujKtwUAnHDjH8OdXvB4vO4OD8GNfu1n8bOvfjo+/LQ/xT8/59UAgF9+98tw7TvfBmVlU0/PO/ZWWL9gP55y4D8PauH2xrs+HCgFj/ri+7Hn6MvjMV/t7noNhl2yWD//wmbfjdEYG+sjrOxdw2B1iPH+A91FFoBX/PgvYDIa43nH3RonnX2q9ZVISLFQ0eDihY4uFmILQfSNO1kR0Jo8+TPHRoyvccTfNzY2sLa2Nrf9gF91GvNubGxgfX197qHY4C3GW1lZmbt1r4uU+K8+HaTtIqZ14RnHuEINdGDG52rtHmDWfOC2YcT5ucUS8cXxyXbWyryOq7I58Na2rW1KMdfq6uqcvaJd6EIXq3yR73hhmTT3MGC63KhAqu1Vl0nbo8TISxYjQ5+xhYrzpItdzauqoxnd79pAAfDuc4A9+4A77QXiqajjTgBe9GHggdeZ7/MLvwOs7QPe9Jz546/+FHCN63af73EscNEF3d2Z934PuNsV5tve4CeAF/1j9/ltLwX+/And5ye/Hrj9fYFH/yTw5f+Zzndi9//9FwCDFeDOe7vvx1wFeOkpwAN+bH7sez8a2Hs54I3P7r4/850dr5/5GPD6LwAnXGOTP6C7wHz/+dgWXXRhdyG47/JAKcD+af8LzwOGQ2DfFQBU4KKecdcPABvjTu+Dlc0xAeCXrglsjIC7Xg5477nAeB1AYmRi5CWLkSof047c4YqraiaXXPlqkZmOxacqk7cv8JiqwGjDDqpOwJU/ndsBiVsIcx/32V3hqmx65R3ntU/ccla++NjDP/U3uPxVj8fzr3RbHPjeeXjy+f+O8YERnnXU/8Lqvr149GkfxJmf/RJeedP74Msf/jf81b0ejRvc72fw0898FP7xiS/Gv7/8LXjbLz8BX3jvKXjgR0+evfWQ6ckX/AfWz78QL73mHfE7X/47TEZjTEZj1EntPo83cOqLTsYpT/9z3PnFJ+G6974j3nS3h+P0j316NsYfXeUncNE5HX+lFIwPrOPpKzfsvStFQgMAHvCR1+L4H74GXnLNO+HCs87BytoqnnP0LfCM1RtjZW0VJ539r8vHEnr4p9+O39v/Cew7/hj87pn/jNWjOlDSQOTEoz8eGfaIpB1VkfivleTwBQ5i9bf4zHNtqqMbY21tbXZxx7wEn+yPXNWJsfbs2TNLzrpQY59duNDH/MKGkw4nH+4TCZATv1soqV7iGIPNvGssvhqbZY+cxItTruZFX63Gsb2UL53fLfZiTLYJz+cWmAwabsHKuU15VDBjPTM/8d/dSWF5nD36QCRpOSVGXjoYqQspnivygctn7q7cjN7w+e7i4G6X7xb/H9wPvOd73d2a8Qg48VrAy07tPt/ibsBjXwG85Y+A4Spwt4d0x5/xDuCHfxz49esD3/4KcM8rA+efA7zvvG574t2PAf72O13bq1y9u3P2nx8Cnnov4AOvBf70McD9nwL8zEOBp/8y8G8f2NxeBwB//XXggxcBK0PgjmsASjfemV8HnnCnbrzgdzwC3vrC7gLm7r/Rfa8VGI+7z6jAva7a8TceAR+4sJMXAM7+NvC/V4CH3mRRT0p//E/AD16/u2D93pmbx//+wPRO1zHAFa8MvOELy8dSevu3gQ/s73T8vvO6/0BiZGLkZQojD/kOV6114eqTJ2fm1Mn0apavSkvZrDxwEETy1ESpY3P7ONZSPhtIKxXch43PDqBJncGI9aIOEvzxOOFMXBXRc92xihKBVCuedP6/Y2VtFU8dfQrPGN4IB849Hy+95p0xGA5RBoPZq2k/ffK7MFhZmR6n8QcF7kUcZTDV1aTOLn4WiHi81+ufi3u+7tkYrAznzp909qkYrHbHhnvW8NTxp91IC3Sv1z0HP/+G53aylulctbtr9aTzPo7BygqevnJDPO/YW+NJ5358S2MGTTYmeNl17oH9Z52D5x9/G0zGG3jq+NMLiVT9je3jqiphW73VHHe62A84mcR4WunmtjG/e5A8YkYTYfyPOOKkzwsd9jn1Xa0IcUxzLLCuok0kdZZLEyvPzbrQeGJ96yKsZSNOsrx4dPmKqbWYVH7dIpl1znmMz+k8mg9DlwrQyi8DrYIR51nNc2wHxz+TA6ikrVFi5KWDkbwVPP7zdi4XG+4ZtYVcXDZxd9p481jHyOb3UuZxs1AbHiP+R7/JRMaUOeO3VkrpPtfa8RETlAJMpls9339Bd4frnlfeHKMMgBvcFrjvE4BT3gZc+8bdHadWfL/9W8DPHte1ucNqd0Hz/guAY68K/MPY91Gqk+75t5UhcK+rbF503XFPN947zwJ+5mjg/j8MvOlLWxszaLLRXRSOR8BdjurkeNd3Z7ZOjOwoMfLix8g+OuQ7XOp8PLGrDinDYWTuz8pkRwhn4DfEqVHU2ZknHZONqMbR3y7RW6kcPK5awbJygAcfCsAR6FxdYKdQ+QBgtP+i2ffRhfG5YPWovVi7wuVw0jmn4qRzTsUDP/rabuw9a7jh/e8+O36TB98bwz1rKIMBxvsPdHufZ2NN57hgPwBg9ai9KKVguG8P6mSC8UUHOr2Op4GyOsTGgXW87VeegOdc4Rb4xn9sPjg83LcXzzv2Vjhw7vlYv2A/aq0YXXgRxvsvwjL62199Iv7vOz/c9ZtUDPftAQrwou+7PZ65dpPZVsLZ3anJBOsX7Mdo/4HZGCvDlY6/9RHqeIyVPWsoKwP85W3uj1/7x7/AvuOPwWO//uHZ9kSXTNl/2B68j5svrLh/K0E7O3MCAbpEzBeAOpfzeU3KSsr3ZDLB+vr6XGJXX48tkXFMnznT/c7sy67qpvzwMbcQ5H6cA/gCV23TqhgyCLFOterPnzXOWU4dy/XhnBay8Xe3kOTvMZ76Sh+/jtcWkLJsuugGYKu5SVujxMhLByP1XCy2XZzzwpC3kW3J3y+6ADhwQTAAHKDt7Rsb3R2rlWG3DS9o/aLuAmRtb3dxsOeo6VjTvnuP2mxbK3Bgf3eRtLpnOtZqt2VuPAJW17q7U0C3bRGl4+Eex3R3yu68t5v7rV9bLstwOtboIqBOL9gO7O/G++vTu22UKN289/mBTXnXl2M5nnBn4E57gK98tusXMu89Cqgk+57u5VWYbHS6Xd/E8pmsowPdRWno737XBv7qK51e3nXWrHliZGLkJY2RfVQOFUAHg0Hl266tJBgMqcL1ORYXCNFWjR394rgGj45Za7dNi/fs6tV4fO8zDPdjWVsLEq00sDwKZDqHVi6i7QP+6WS8/9HPxVlf+PKs/eO/dQoGwxWMD6zjRd93+9nxq97wR/GAD/0lPv/eU/COBzxpdvyOz38cbvKgn8df/+JjcdqH23eHVtbW8LhvfBgAcPZpp+NVN7vv3PmbPeKXMNy3Z/YWwF94ywtxrTvcEn9x6/vjrM+fhkd+/n348xvfG6ML96MMBnjs6R/CC0/8KVzuysfhtz777ua8APDu3/gDfPbtfz/7/rD/fBuu+APdG41ecMLturcUrq7icd/8CADgnNO+jlfe7D74/lveCL/ynpfP+v3Tc16FU1/4WgDAvd/8AvzQHW81O/cnP/Iz2P/dczodfvsUvPiY29gtOlolif+6mGA/CttzEonAdD+CGvuy+bkxYH5hEnOG329sbMwelOUEEwlkMplgPB7PnmVwsdnywxhr2Q/7cTy1QEzba3xz9V6TsC5MQ7ZWLAPz1TxNzK49x69uSwD8MyOsc32LpMob7fk788G5T0EqZG3lWB6DZVJdaRu3hcN95zw0Go0+UWulX4FN6qPEyEsHI6O/Xszx3QTWl7sLGd/Xn/kuTN7yR8DnP9k9d/WY2wPnnd3d+fn5E+aZutoPAo97JfAbP95dwBx1BeDnHgE86BnAS34TeOefdceGa8Cf/Atw9R/t+t3n6t3FSxkA7/gOcMG5wN2v2H2/wrHA9W4FPHuKl+/8M+Avn9p9ftwrgJ+89+b8D7kxcMbp3ee3fRO499Xm+TvhmsBJrwFe9STgXr8F/Ot7gB+8fneNPj/nAAAgAElEQVRxc9/HAy98WHfXq4+Gq93Y55zRvSHwmtcDXvrR/j4A8Mif2HxL4Vu+3D3PNZl0d7wA4IpXAl73ue7zZz4GPOnuwE/dB3jMJpbjRQ8HPjp9S+FLT9ncRgl0so5HHX9v+AIuf5+rJUYmRi6MwTKprrTNdjFyNBphMplY4+7IBVfcpufbkcsSZTg5J0LnDEEKOAFIrkLIfZzx+4LB9Ws5PRuUF9JO7lZw9QFg9GNwjeMP+KeTceLNb9CUPengqE4meNEVb71wCzxsrvbSBMTn2W9qrQsLJ/4cfblS4xZXGgM8J28viGowvwExxnXPLcTcLtHowk4BqG/Bw+NrbLjFPCd93ubjfm9IF3Q6j26fCBu1FlbMF+cYNw4DDFfFnewuJ2hOcUDD8R524MWz8q5tXc5xuufjbI+WjvKCa3uUGHnpYCTrMOaO/4PBYLbAjpzAdwijX/A5ftZ7UG92p6Ye52g8Aj7xD8AT7wbc7M7AU94IHH189wzUnzwK+LvXA088Gfjp+3Z3rBzV2t0J+vXrAVe9RvdM2PFX822TPO0/H5f7xRMSI5EYqfpQm7VkdHywPZyO+i64duQthbxojP/qNC0nisQXxuPjqiB1nvgMLF5tK2DE/HycK1zO4KrgmNMBpZORP3MCD4fWB4X77pT0gWbSztJVrnftma1miaVOMCjTKnGtGJTNtxCiAIMy9ddBQUFZCPy4c8UJL3xEK0vqE+xHkzpBQSMJDgpWBt1ioqJ2LzaRylQkR+WPFyz8w6W1Vow3NjCY8nTu17+N/Wef2wQO5YlJ5XYJy+WMlu+7i1smfV4u+mheCdKc0lqIak5iO7nkr3O1Fre8SGVyIMTnXD7k8ePzVqqqPI+OnXTwlBh5yWMk51e9eOXx+MJKj890up0QOOubwJ89vnv74He+Brz1RcBDngV88GTg/36iO/7m5wE/dCPg2jfyY2yMgD/4xc03GL70t4Cnv30bTCRhMEC9+nWwUTD7aZo6GGCwMr3QqsAo7Fym/jKZ+sFQMBKLGFn2n4+VM0+3ccR9EyMTI2d9Ws6wVRoMBlXf6BFMuwVkEF8V8+KODcaLw+jDwBVzRdWKBXeAInwv8Bf9HVDEcbcY5rsh6oiuahZzaXVEnYJBlXkBgF895bU48WbX37KdkpJ2gt730D/Af73hPXPHeFETlTGuvmlctIDEVQwnk8ncq255jL7FpfKnAMCLUE20nDcYUBwpzyqzyyl94Ojm1IWggpzKrxfUqgfmk/nj3OaAJL6vr6/nHa5tUGLkpYORcWx1dXXueRx+SyEvjnXxxf9Hz3o36k23eIcraVfQymdOxeWedJeZ77DPuIuwxMjdgZHTXHPx3eHSyd2tfb2qBDDnePxbGfE/3urG/dS5XcKNMVvAwAHRdxXvHCL4CPnYEHzbmm8/Rxv9/Q+9Da3EfXicWmu35QDA1/7pk7MXWNRagVIAdq75AUPAze/T9rP/0/OhlTrrVzZH47Y8Hs8TctI4c61Y7mn7QnOXUmZzzyUe4r2GLcy4hfQx49yA8pyOws4kbYyjfsI24b84x74SfLLtZvyoLoXcmZlOSc/MP0pBnavOltmcM3kGgzk+5uxDOov2x1zr+3HsD/3AgnxM4/F4dp6TmHsgHmi/En/GN7CQA4DFN+a5+FawcBTxFf00JpUiJykfDtgU/JivSPKaEzmXuHymIMRjhY7duE4velx5YJm2U/VL8pQYecljZPSLu4Mhc8QR34Vjft3CdSb3f/8LcNEF3S4DAreKOpczof1KAQNL1346QIk8CzCYRZ8abyGcNmT9TJWwOVf4VPTD9O4OscuY0eEny1JnmGNjXjGHxxHcQFmOkQXApG7yWcE4OdUT48KmyrdGzC+NVwgjQX7D/QrFTPBTClDX9mFyg9vOYoRjS3eUKCVG7m6M3LEfPlYm4nvftgcmFZIV55yMj2uCVmOp0/WNp+f1v7bVQGi1D5nUoZSHOO4CZa7f9Px7H/w0nHv6t2bBz0Go/Lgr/9CZ6jv++JfWNUA1iPiNO3Gc23LyiHPj8XjuHP8P0t+aiX5ha30rYFBLXk3+bHP11/iubxiK39WJ//GbMLrAGI1Gs6S1sbGB8Xgce3wxGo3m5HOLL60kqc71v1bRgm9+cLbWzR9IbCX3eHA+fOBWT3wwbvvUhy3w57ZGsa7Zn/i8JlcGLpd8NWeoH8WcurBTe/SBF59XvreSBzTZa5wsGy/8nhedC4s20X+MqZV5x7/LdS3AU0BrLSCStk6JkZcwRlJbPcb8sC3i4mwBM9At8lf/+LdRvvrZxMhdjJH1qtfAhX/xP7M1WOBX6yInMTIxMmjH7nDpd2XOKZaFic9qdCbtG3OxA22FPx1Tx2q1c7ceOeidTKoTllPBzRmU53KybEw27JW9ytKqZqp86nQu0FtjBGnFR3WnCdLpR28981YTro5GQudqSSTU6BdbSOI4v1aYj/Nc8Tl+TV6BbjAYzN52xeDPeguf5Ip0gMh4PMb+/fsxGo1m33lrC48Rn4On0E3rHOtWx4sx40FWtVkrTnlMPs52iO8u6bXAxfmSbkFQvSqQsay8NUllXpagQ/9sN/6x2lZ8t2JCc5uO0eJJz4e/O13GYkFjPeSP52B4XAVtltvlXrftJWl7lBh5yWNktNOLkZYsfRhZNzvNzRFzJ0buIoyU2HKxzf8TI3cXRvbRxXKHiyd2Dsl91PmY8RY4BIVi1CBc5eHxXSWf+2kyZzl4Tie36sA5OhtWq5NuDnYapslkAkgbdSgelwGHgcwlcAY21ic7GlfNmAcXPEEc3PGKVuWTK0QKjlyNUaCJClp8Xl1dnf0FGESCUdCIublKxy+N4HMO0JnnSNKsD04GXOmbTCbYu3cvNjY2sL6+PvuL6t54PF6o6qk/sW7DfqxTB2jqWwr0/Nr5ufgom3bVbQLO9/g8J/q+pK7xy/pX+fk7Hw8bK+gwD6wz5tXFvdtW5RawrcUVgyL34zld3mA/by3eeIGiOlD96NwMumxvJyfzmnTwlBh5yWIkxwHnBeUN2BpGAkCdTOa2mSdG7j6MHMf2PYOF8VnHSIzcPRjZh5OHfMHVSrytZNxyJmA++YewGiAqrPZ3iuvrrw4Xx121qzXmVs7Fd95PznOx7ByAwduiTqe8TsdRO6jhNTkzfzwn667PuZwdWjrg4GBn1yqf40WDMcaJpDwcDrG2tjYDgeFwOAOSqNSp7By4PGeQJlFd8PA4WglheXWRxMknqn+1VuzZswfj8XgGJgcOHMBFF12E8Xg8AxV3EdRKvsqjsysvBpRYpoW94QbEggLMNcnrZ80T6kusJ+7L59h3NUZ1XPXPvphmuzkA1Rjg77qNtyWjjqO8qk9xe/Urp+dYDOmiU4HEyap6cMCXtD1KjLx0MNLF4UFjJLq7XPGGu8TI3YuRMULo3v12E8uUGLm7MLKPDvmCyzlnS/F6LL5zkER/l6x4LicoAwKAWSWFgaElgzqcS8hA5zD84CP/V7lbhnLHNHgcMOvn6UA2mMJh9IFqptCZJvkYj5OVc0aWhc+3FgmTyQRra2uodfNB53jgk/UebcJ2tdbZ76UAwNraGvbu3TsDDd4fHtW7GM8Bido3vjMvITNXg9hPuPrB8jJ4sT9wBVV9kaso4/EYa2tr2Ldv31xVL6p86nsaO0oKnCpH6+6W+m6RC3wdr7V4dD6nutFzWrVvJV533gFV31yOZyX2F/1hR+VN5eVFkIsR9RXWvY6l+a41Zl/FUUl51hymeWyZrpIWKTHy0sHI1mLoYDASkS/L/EsMEiN3H0YqaRwG74mRiZFKO7KlsHUb3jlny5lYAeGsC9uaptRKolpl0e9OOY6vPid0t3xZBlW4q0C25OHvIbs62+bnKW/El/IzGAwwGo0WHD7OtfTpbimHPbmaFlseYk4G2jgW7UopWF1dnemn70/1FmAR1bn/z9677MiRbed/KyIyM/JaVSySre5zWgLOHwLOQIDHHhnwzC/iN/AzeOAX8DP4KSwIguCRJgY00cCQjk73IdkkqyrvGTcPMn87v1i1M3mp+pOCMjZAVmZkxL6sy/ftWHvFDsgEghkMBqGPCuYxwIvJVfPb9cFelYPKinaUdFQ33tG9PakekKsff57nlud5yF/fbrdBvrvd7lGkNzYubcPbR7/fb91o8+AygBkb16Gy1jFd0tfjKtNTPnwKyDzh6fn+M9+xScWiU+TBJEttQqOrKkffL69nrS/WV+3fKTA/df4psojVc8rHP0XmMZn667ytdeXLS8eR354j+U393PfnczmyVW9ddxx5wRyZSP/O3axzrOPIy+LIc+XZUgpjjSvweGDmdwzPR3NigzqnIK1Dz9E69J0m+jtvmUdgSmTeqGJExnX+WEw+MXLQ4knKO9axT+07aiVhxqpLot4wuZZ+9Pv9sDOQ6g35aQRUHdb3lYIMiUpp/zXdAV3oUjN5/BAFEbo0TcNnUiSwmSRJQvoBOjwFcqoH1ZuXoY9aqk3H9KLApO35na88KSs5+f5kWWaDwSD8q+valsulVVXV2t3Jy1D7SFu8fFl9IE33KSW73S6MERlxvtplYu3JDv31qRc6hpgv6XmeSLnGE6DKxstKdenBmKI3kjoujR7rWGPF+w91+pVBtT3VQwwfYjtwKdF52Xob5HPs2thnPeeUHyRJ0tK/b68rn186jvw+HOl9+ikcSV+qujJz+N1x5OVxpMorduPRceTx86Vx5Lly9oYrSZL/0cz+v6Zp3pw559F3BU41CG9MDFCV4CNu3mB0YKcIi+t832Jk5Jcn6Y8fm4/A+WijGkuMAGPGFVOOl5M6ke+TmVlV1Y8cwZPHKQdTY6Xf2jbH/Racsf77JW4/oQiEJUShqQk4Ju0SrYNM9JiSS5LsI1Fc5/uuQK0AqGTHX52Q+L4rCHmH9HJRGSip+ZcTan/os9qlRr8Yf9M0NhgMbLfb2Xa7DTs5qYy9HSnA8b2u65BWom2r3/j0J188IPr61Q78dTGb1Pa8vGO+q33T8fJZiVpJXovav/dp317Mh2ITulOfY5MFbUPTdfy1qr9TutDrdCLkcTiGVSprj4n++q4cS8eR/7k5UieyT+HIsE1h0/bDjiMvjyNDDUnyiCNPcUfHkZfDkefK2Ruupmn+n0/WcCjeyc6lCKjDxM7xxuaFJP1rXRszMq/MyBjPGpmOz0f91AAV0D2AekLxSlQQ8aCn12vfGlIKE2v1g2t8JNO3qbLRaCW6A/gUjGMy0jY9oass1D76/X4gDlIu6EuWZZbneesBX9IhYqRC/5G/JwTtg8pTd1qCmJRUfIREI80+KqJkiD2VZRnk5tNKtK3YpELHYnaMumkfkMtmswntbbfbENFTmWvUij7o81pqyzo+tSPszYON9pk6YwDqz9fCeWpPpyZ7moahdXofjmGR99kYUPqIr8cl/zk2Dp/+EpvU+r7HJmmxSZvHBF11UJmpbXlf8CRGfZ5MdTIZk3FXOo78z8yRKrencqTu0kr9HUdeJkdiMfzV1DvVe8eRl8mRsTRxyrOlFMYIwzuJDi6mSF+nBwF/jheeB3Ft1//1RKSfFRz4rEamCtQokR+/J0M1ao1uqfIxfv0tNpYgh8NfyI/rlaD43cvN7BjlKooijJHcZT9WvT4mN/+ZerUvnlQUkHnAV1MklFiQd5IkIY8c8Ne2T5EB0S+KB1O1Of4qmCFbX7f+rpE66tEXKvK7gn6SJOHlln4sKie1W+y93+/bcDi03W5n/X7f1uu1bbfb8AJJjUwp6HjiV5uM2W1TtwFNb9K1xCZdOh4PtnqeEldsguiJwfdT7c7r0PsSckY32IxGTLW+c5NYf51OQlWmMZLXlCFfL3I4F12kbV9HrN2Tk0kp6geKQz7lpyufXzqO/D4c6SeGT+HIMtmvbHQc2XGk15/HZh1nx5GXx5HnyrPsUqgAqMe1c954VFAxwPRkFBvIKUHobyqUmJHE2vHOY2YtYXINwObvaGMO5vvNMXUsb7RmR6N/JN/wt+18gKaC06l3VXC+LnWHZXORjfbPGzJ/1dh1jPwFENnGVesgEpemaXjYN0mSQCyQCSDtl9U9OXq5+/Fqf/V6H2Wj75wTs3FAhPM04qbkhg6wQdriPJV/XdfBLmKRnX6/b9vtNti2RjTJ++dB4liftY+0h3zLsgxRUZWZJ7lYBPnUBFLH4idm/ruPGGm0lH6ScuP91tuq9lEBMjaZo03v+zoZ8aSMbLQujukzL8jLg3iMvPwkUq/T9mNj1uJtRscSIxg/YVLMZPLmdduVzysdR34fjqRe7zdfw5F6vNdx5EVz5I5NT5ydabob9qh1dhzZceSz7FKog9FB+KKK1AiTjxy0wK3Xe6QYHIK2fITAK0MBWgWkRZ2E8cQISEss2kBd5yKJarD6EKs3Vj2XurzhNPb4+S1kkCRJ2BHJj1NliMECSJpq4Z1KyS32uxqf9kF3ZlJA1V2U+v2+5Xneehkjv9NX6oQoFeyxJQUg/U3tAfvzwEZqgk5yPKl4W/Ljquv4W+W97hUUNULnbRMbiaUubDabcEyjnuzUtF6vA3kraDImCEWJis/6oGzdHKJDETtSeVBUvrEJm0bWdDKmelPC9+TlJ3dq31q/RjCxH48bPsoX8z3vl1qP70NZlq0oIyWWduSv12iyvyY2Tj7rOacI59TkXHWktkF/Yv3typeVjiO/D0d6WX8tR6o/dhx52Rw56Pdttx9QmyNlHKe4oeNIC23+V+XIc+VZbri0s+pMKlAFS47RWXVI/aupCF6QupsMwKKCi5GZGpD2W4u/VoFL+6a/Mx5+i0Xr9FpviLGxc84psvLjUuNh6RzZqiFpP31d/X4/OIKSsJ4DKQC43uk0VYFJAECs0Z/BYBBSIci1VjCkHj57wtQJBKkTuvyskSKVP/XGbEsBUKOAsUkF7TFmtW8f7dPxq21AEgCq/sY16FBlTT+9vHVSRopomqYhfYJjGnHkOurWh4RjPpQme9nou05839XONHIcszs/GfG/KV7EfMb7h5+c+omqXqdy8PXGIvjUo1G+U+f7SYz+ppjHudqGAvo5MI/hwKlJtp8MaJ9UT17u/nyv5658Xuk48ttz5Kl+fBVHHr72+32rDhjcceSFcqQbt7c56uw48jI58lx5tvdwqTAVeE8Z5CmANTsqDSWhlJjCY46ubavBeKHGhOuv1+/+GnXMWJ2+3574YtERvyz9qXSKxNrRPMBMo3Y+nUPBmP5otErBNkZaHFdi4R/1qFz4Xte1DYfDQCBJckyJIGqn7xPRes2OqRAxsodU9Dwf3WN8uvSrfyEvlQuy9xsHqF2qbmOyTZKklRYBYBCZRu9+sqVgqeTOWKqqssFgECZWROyQH//SNLXdbtfqY1EUrQgjctfILVvslmUZXnxc11XrHCW2GPCpLDxh6Hj0HD3m/f7UMZWP2rtiik4QtK5Yv3V8fPc+rn6ppOn15P1f/Vz9Se2DOpmEmFmUKPwENman6pfalhKpJ2W/6hHDgq58fuk48vtwZKyPX8ORlLIqLbGOI/l+qRxJGy2OlDZ13B1HdhxJeZZNM2hU96T3d386aAUDvSv3xuQHrsWDnT9Pz1eDARhUUHqdj1qpEflxa19jBun7Rv1ab6xvSi6e8Hwkom7a0R0AXsHJEw4gg764To1bI66qJ5W/1yUOoNcpUEIWXE/Ebjgc2mAwCISgETQfteG4Opg6BLqjHdWxkoMe1/HFJi96zAMF9hIDR3Xguq5bD1rTF10tUrvQfHYPHNoXbR+ZVlUVyIpjq9UqjGm73YY20Fee59GHPoMMmse7pSmA+YkN1zJeTxJ6jOPed2NEHcMHT1IeLM2OKQwesH0AgxKbKPrJrH6mXrVTtSlfvFw86TEOxSO/8qHj1f54n/HPQHjZKgbFJjO+f135stJx5PfjSO33UznSzKzYFZYe0s86jrxMjkyRjbM7+qG23HHk8fOlcKTHYS3PsmkGzkBURJ3Cg6IKAGfSiLkHMu+83mA94KoyY797IWk9McfRooas9avxeiDyBqEk4kkyFlnkuseRiQNgpcelfcZ/aux6TI1HDSh21855ALdGnMh9ps4YuKNP8s55Q/xgMGhF66jTE5Xvj4/CcR2FcVCnJz0FFJWJ6l2Jh3MVuCkaLdG+eT3mef6oXd++n1ghO6JrkLVGMZumCfnoPFRNPzQaN51ObTAYWFEUoU/kbldVFbbOVTmxS5QHQ7UJby/qB3rMTy79pE0jTCoT/d1PwnykSj8jK9Wn9sdP4mJjoz4Pzr5NtR/+klLhCesUqfrJipeBXhPDIbUbP2GFnOi3J2TfD2+HOt7uhuvLS8eR34cj9Tw//i/lyIbXsKTxVTTO6zjyvz5HmtiLPvsVs6WOI9v1XQJH+tVyLc/2DJd2Tg0FR8ZQPHH4NzXrwLX+mFJiNwcxA1QQ8HVr8XfyMRL032PA7Z0PQ1ZFesWpYQOE6oAYyFHeDZ1s9UdBkO+lROR8hIk6aS8WifLAzjtCYs6sTpTneWhzMBiEv8PhMNRD1E5JhH+ae+1l5mWu4+McTcugb7F8bgVfrVNlqPajelZyoP4kSWy73YbPfoKkcuW3FrkfZEdUTB8WVv9Q8FKSh1SwQ/xuOByGMWZZZtvt1na7XdCZkjM2E8afoIe2bDwp629qu/pZbUr1p+CrhEF9p4BPiUVt2k8itHiCocSIxU+QtS+ePJWoPG557IldG7Mv1aWOXfvpScrb2ylc1QmOb/tUe1358tJx5PfgyHj/voYjQ32WWNJx5EVzZGAamXDH0ks7juw40pdn26WQRj3Aa8djd+aAUiz3mOINVh2Lun1fEIYahILQOYNQ0Pft+j7Gfud4DLj1PDUW3x5/PYHQv+MKV/YoSqHG4eVC0SVuJT305OVMrrLKTfut0Rf6wXa1vDsEcOc7wKbvC/E68E7MeWmahveXUABvrldyaJqm1Y5GAFVGAG+/3xdZt6OuOmaVs+a9h/QD0a8SCHUSPfOrjHzWZxW8zfnIDXWp7NApZI2OVI5N0wQ/pE3Ko0mcPQYWiMj7HPJUgPUg6FdMvIz1XP4qyfOd9mPRbZURxBMDZP3ux+j177FDJ6/+Gu2/nh+zdb776KbqXcdDnd5fVI/at9gYqddH+bwMvE668vml48hvz5E6kdrtdk/myMbM0iy1RCZ3HUdeHkcadiI2G8N+35+OIzuOfLYbLgxDIxaqNB2ICt9Hr3QJNkYsMXCNGaqvy1+vDumVoX1RA9K3v3ti9ADsiYw2tM8qp5gs1RgfOdlhqFXV3omKB0LVEPnsyVejXSo/8qj5Lc9zMzNbrVahHX1w1sxCzjO7KdEHyAMgY5cl2tc+etl5clEApJ8+3QaA9vaDXTLOWP66AjL90JU/LeiN49i+viBW/5Efrf6BfSowUrf2U0FQ++rJiUmZtoOdEqVDJ3Vdh1z2PM9ts9m0bF7HXNd1eIYrFXv3kyvv39hbjERivh2zffQNlui4YgTj6/ZkRTux9n1RbIqRCG17fDlFiNq21umfl9FImvZf24xNCPw5qhetJxat89fw3f/mb8i78nml48jvwJGH4ndr/BqOLOChJLX0cIPUceTlcqSZhc1TOKYY3HHk5XLkObk9yw2XGsupu3FvKLHOmrVBg+OnjM8L0B/T+nXJ95QB+M8xZz1nPP6YjoFzFPh8e754cmqRZsP1j+WhhOflGTtXnVGjR1xfFEUASQU35Kp/aY9InW5jC/grIXh56U2gyoE2qFsJOLa9MtFEvRYS1IgcTuUjJb5Or2fVh0bFfB55zMn5rrLWdvQFmB6INVqpgED/NXqn8tVnR0hR2Ww2geB5AaROpmL+pfpS0vD91N9Vdj4C7H1HJ0Y68YtNxrQfugWv4o3Kx0e/PAh7kNXrz/mpH5/XtydbPvv+6G86eUAGp/LMPaZou9gDvuzx1BOeJ02Py135utJx5LfnSP9Zx/7FHFnvP5dVab267jgyUuelcCQtNfbYl9SPO460Vl0dRz7TLoUYUyyCRge1Y77DalAMWuvzdXqiwDBijqnn+/r0fB8Z8f0GmDyhsNStgKP9onjDUTLw7XnjVwJ8RAr2eOnVy0nHoX3Sh3m1b9qeAlpd163nCRSwAVR9CSP552mahpxp1bcSiBqv15vmlHMM+dC2khDt851jp5zap+poGxrB4btGjryDQiSnrkXuKj8/9pg98BdQAUCIQvpIIvW3cs9lxyXkSkSa39H3I/K0tg/HiEbHEItkqu7PXU/xESZt+5RNozcFZHSiMtF6qMM/3K51nvJrvp+aJMeKxy/VrScxre9UxE3r9LajKSWxumP90vM1kv05Y+vK49Jx5PflSK2PcXs5fYoj2TSjqY+rkh1HXiZH1sjmRB9iNttx5OVw5LnyLLsUeoPh+CmwjBmDFzAAYPY4fzOmTBWWAquCY6zvMcHG+uKVcAq8zxkDhq5AdC4a58s5I/DjiRG1gpXKTOunXxohIxKVZZkVRfEoQgRRaISu1+uFlzZSH0TDWAFYjfx5olJSUPIws9bDrQoiSia+Xi9nrtXoiKZI+LaVIDTKqQSjNqD5555kYoBCWxpV9BMt1bMSh+8j1/vJXsxXNbedc1sRTTE7T6Te9vyk6JStxgA7Ri6eyHykNea//vM5IlI5x3DC/+YnPz7qp3qM9e/U+GL+7Y+pjGKRzpi8PL748zxm+QmSJ6qufFnpOPL7caTvy9dyZGJmKp2OIy+XI8NxZzexvx1HXh5HnuPJZ0sp1EY8SMY674sfbGyQMcONERFFI03njCrWF21HJ5+xtrXEDNw7mjeyGPh7Y1FZq4OmTr4eeLSvGg2MyUT7PJlMbD6fh9+oj2iPgrZ/yNd/b5qm9e4QNXCiT4Csj7j5NAz6SF3Ih3YgIG2DumLkq7I+1a7+jQElRKHApBOpNE0fveNKyUjB3vfLzFrypg7fH7UJBWnq9/3S8TJJ4BxvCxiRFmIAACAASURBVHVdh5Qa/1usaA5+DAxjAH4KqLQ/Hku831BiE0ElAD1+inhix9WnPJ55EtXjsYml1q02HZvs+Yix1uPHfYqMPiUnf40SJ8XbVVc+v3QceSzfiiM9RjyFI/24zTqOvHSO1KL+feoc317HkZfHkc+6LXwM/FSICkxe4LEtK/31p4Db/37qszqSjxxoW6eOK0H6h/jUQWL9568uP6qivWJj5BxTvjfsmOGeIzJ+j0VIe72e7Xa7cG1RFCE6Z2ZhuV3TFSARfd8FY/AAppEs9OLJhnYUFLxuvONpDnesTi8HwJy2fFTKT45ielY5+2Vvs+PuTPqCPcgvZqM6fm8Pqj/65glUU1xC7vmhXSZZmr9PXUo6/uF3bfuUvcYmSnqOt8VTIEtdyEcji57Q1Gb95DPm0zF/V6KIyfkUwehvXo++nAJ0D/gqo1Oy9sfPYZ/XifedU330Y4jJpyufVzqO/D4cqWN4CkfG2u048jI5sj42eHKDmI4jL5cjz20q9Sw3XNrRc4CsHVRQiAEn15xSrv9dAUmBnfM+tVTor+U8vaP20TBfPmcy4kHCjyXWz7jCGy58JEcPhp8CJe8IOK9Z26EVwHjYN0mSELHL89yGw2ErV52i4AWAe3A+lTahjqX53HVdt9pQIlBy8nV42+A756VpGohTx+xtWGXo88wV5DX1gXr0AVbVA/Wfszev0xhZel/zhOmjjSoLtSfkQUqhnwT5v97OToG6jl3BEx/QiUbsulOkoPJRMNWot/ZRJyF6/BRQ+wmNH7dvx197yhdV536MMVlrHbEJViyCG5uQesLyY4pNJLrydaXjyG/NkY8nV1/LkfCsJcd+dBx5oRzp7Mzzhx7vOLLjSC3PsmmGVy7CxDAoKij/ux/UqTYoXqAK+vzuI3TeYPz3T6Vr+Hb1blaP6+49MbL0MgOMABit7zRBJeF/ztF3XOi4PDCpTGPtJEli6/Xarq6urGmaEMFDr1mWWZ7nNplMwvs4+v2+5XkeXuSYJEkgG/Sjy/YakVMiV2D0cjVrR37Rn0ahYhMFrlEb8Y7p/0JSHqBpL9afpmk/VHoOaHX83kf8A6tKBFo0qhsDVM051z4o0cZs3EeFkyQ5bpphj/3CT160eJ/09qZ9ixGQYopixanIX8x/ud4f9zKJEZSv2/uzH4vqJkYgvk6VnZ4fk5OXl++nv0bHrPLS6LKWGNHG7LcrX1Y6jvw+HOnxgna/liO1dBx5uRxJLY3Uo9fGbLfjyI4jzZ45pdDssXB8J72D651nDMxjbZwCwNh5KEgf8ozV6RXpHcaPBTBQElBQw+A9mOsydkx5p4qOr2masGtSXR8Nh774cezPe7z7VMwwubau69Zb7rmGyB3AnOe5DQaD8I+cdH+eyt1HWJFjTH/eWZCh5r5rFM8DZMypvY1iG7RHHTpRUNnEiE4nD/64rzcWbSHy6FNyPDCoDTE+P6lR+/LkpN95jmC327VsS/WkYzAza+rHDz57O/UPF2vU6BRgqo2o78SKJ2uVUawd5B5bFYhNLGLtcszbiteX2vE5/46RsNpVDHv8uPQ6f55fidAo/Lni6z23ktKVzy8dR357jtT6kPvXcmTi2ug4suPIxE5wZMS/vJ12HPlflyPPXf8suxTGDEqXZs3iZIAj00nAyud+c64Cha+La2JkFVOG7wffvWH54iNlMaPxbauzaBRGyc1HZ7yR6+fWeGRb+LIsrdfrtQhFndRHSmMRQD5DTHVdW57nliT7/HR2PVIHJk9dyYStbrUPSn5m7QdVY85MX/wyt4K91oujA8xqN3Vdt873Tu/TN9CxP6ZjUN3QBu3qBANSgPSa5vgOEuqKrSopKHu5MZ4kOT5U7VNJvPMjMw9WkD999uTh6/ETIQUqve7chOnUJE314Z9Z8bbvgwbet/0YfL/U9nzxmKFtavF2qe3pd+0Px/RZhViU1ROv76/qWn3E+5SmPiHbmL/FZOf70ZUvLx1Hfh+O9P7/FI4Mqxn7nMKOIy+YIz1b+DpiNt9x5L5cOkc+2wqXF5wng1Pn0Xn9zRuWmT0ClBjw81lBk6ICVyVoH7witc4YyajReIP0TqyK9O3GDDdGJK2xHz6nYhwqPyXqmOOfkrPZHkQnk0mr71m2f/kfueiDwcBGo1FIkdAIHufQjkaPPGjGJiIqC6JTfnKi50IeGh1TkDez1m/kzut7JxTAff98JMTrXX+jXk9WHshjdqqTAiVrJT8lTa2j3++3CNODsY5R5ai7lJ0CGL0uRpyceyod5VNg7MlSf/O+orbAtfzu++T1hhz89R609RjXaPH1aXuxSWzMzmN1KXb6cWuhbiUMfw56jeEncvVErnLXz94muvLlpePIb8+ROnFUn9HfP5cjfek4st2/S+LINHls397+abfjyI4jtTzLDZcKG8Cg83qHrefw3ed3++i6CjfmxDpgPebz1fkeU9I5Yfrj3kEUnD2xeKfyY/PFAwjH/PF9HY/7TrSN64jmER1Qg4+1S93k1/PZzEJkjjzz0Whkw+HQBoOBjcdjy/M8kIw+mEsfkD1b5tIP3hiPfgF0dQL+KiEwBv1ddaE68//UGb3jxyJRftekmL0rQPnJBfUhV28zlKIowvmqR4qCpZdPzB/8JCIWIVS/ybLMdrvdI5BJksQkp6ZlL2matvRLORWB17b1uLdLiFdJVEGfemKgj5z8BE5J3o8vNmHT/p2ajGrfz6UjxOpXDKQO/CXWfkx2KoMYOcReXBzzfz/pjJXYDUJXPq90HPntOTLmD1/LkaF920+4O468XI7cunP0L587jrxcjjx3zrPccMXAP9ZpL1AVkApDz1EBqSOfG5Qul7NsHRM05/goTiz6lyTt/FMlBr1jj8nFF0+QumLAbwr+MTmQ5FCLoccIHAJYrVahXh27gqqOR5f1NWIHePAQMORByoTmjUMK1IcuID21B3VECJB/fhKhION1QJ1KaEro3iHUEX1dClIeeDwQIFvGqITiU4IU3PS49lWfpyCax5iwQ03D8AAHeejYYyDEhCHLskBmKhc/+UnEfnlZpT+naRrr9/tWFEXUJ2Lg5mXMbx4ztO963BOJnufTLvxkjetbL7V07atteXtCN34C6K+N2V0MQzzp0T8dd2zSeiqSGMNiX3Ryovjj5fYpwulKvHQc+e050vvjUziSPDLq6DjygjkSu7LHwQ9vvx1HXh5HnivPvmmGfldD8AM/NzhvVP54DFhV2doO5+lv3kB95CBmwNTnDc5HcnxbnOPrMzuS3na7DQrM87xl4D5qoM5rZpalacuRAHAfkVIy9jLTY/xbLpc2m81CtE3JAcLQuqmHc/h9NBoF4Oz3+wEYiYhpv0hj0PpUtirjmH4hITOLRitjEwrVOf9ikTWKgr5eqxMDioKYRs/0XHXaU+ktFAVJ+kk93CxpXxiDTiL0en6PRbRV7/sTj9fGcsf1s05oPMnrOQpgsUmj9k1l5CO3vg+0i6/GCM2TkX+HiSeB2KRD6/P1xnQZsyc9R2Xg7cmPWXXs6+H7Kdw5V7Rej69fWldXjqXjyPY534IjY/jytRzJCn9THyfvHUdeKEfShj3mLpV3x5Ht+ny9l8iRz/YeLrPHd6R6N+mPafHCPFU356qzK1CogerdMMfVMNWYYmTj+81nfUhT+3/OobyRNs3xre9ExDabzaNlUurWXFPdTlf7forkttutDQaDlgzVUdM0tel0akVRhAhfmqY2HA5tOBy2ABqiUIfJ87wVKSPy4AnazMKb2mlXZaHRP9oiokQEsWmakPIWs7HdbhcijQoESnScS/3IUPuqETgvU42KKKl5EinLMkwM/FbGfktb9Ir80JO+VBNyVjtTgNFJhD+P4qOICu6etIi4BvJMkUPSsgXthwdelVUMCzhPH9RWX9U6vdx1TEoc9EcnVDFiiIGjnyj4yZhO6ugbv6v+1f5VxhT1Q188Keqx2CRIj8cwTicKPprtxxqbGFP8eLrydaXjSGud+9+TI9XXzexJHEmp6sr6B5l1HHmZHKk+0OJIZ5cdR3Yc6cuz3nB5B/a/xb574XmQ1siDrwMBeLLgXH7HYPV4zKgUPGLC1j6ZPU730Bx4DEoVyAOz2+3WzPago+AbIyTqpZ2Q9xsM+die2dGQR6NR2M5UncQ7qdmecHq9nr148cKqqrLtdmvX19eWpmmI3uk7NzQ3XYEb4lHZeQLTZ4SQDwTAP16miP407171i4EzdgVVZKHHkQ1yoI8aiVL5+5sQ72CxiJ0CqYK2Tl4Gg0GI5imIq9ywe0iG+pRUFIypi89+LIxZyaosSyuKopXWoPJV0PH1aVGgiQGkykrPUTJELzE/0Ikc/kD095R+lEBi/h0bi8euU0TqCUWxJIZhftxKtNio5pH79um71ql/tZ1YdN/3Xc/Xnet0khebCJ8is658fuk48hty5KHgR0/hSKR6c3Njyf2040jrONIcR3qc19Jx5OVw5DmefLYbLm1EheePnxK0ClKdQUEpZoj6u48QeAJQo/S708TGoIrQO3cFKwU5lKFbkKZp2iKR7XYb+kOkajQaWa/Xs/V6bWVZ2mg0ejRergnfD/0a5INWVE3Hk2WZvXjxwlarla3X65aB0Feum0wmlqapPTw8hC1ukRn90eiXgh7RN823JlIXczaAj/SIXq9nw+HQkuSYigHI+7fN0z5Aq/LxEwpPEoxBJwTIiwipytw7sIKmymK320UBEvDWZXvtEzn76tgApu7CpP3VPGhkjowgErVLBfmyLIOOfP5xkiQ2HA5ts9m0nu/wK6pN086jRq+Qnvov49MJBjKj/2pnalPIhAdkfcnzPHxWQPYRJiUnH0nUiZnHmVPE5nWI7PirY1RAVxnqQ/raT61DccqTh8dDJSePZb7/WnQizjXaVz/+c0TSlU+XjiO/LUfSpzTdp5M9hSMpg/7ANoe0xo4jL5MjV27MuqJqdtz9ruPIY+k48jCus79+ZjlFGAxOB+qJxndY69PlXX8edcaASs/RPqlRcTwGkHqtlnP1aFtmFlINAJP5fB4AljFSF2kHaZraarUKoKI53I/6cvjLA7n0BQfNsizkvUNmEAppCEyuX758aZvNxtbrtU2nUzPbO9p4PG6NiX4rMHAMcoQEFBBj5KUPD/u8dz+58G3q74w9lsrhZadEoOc0TdPaqjZGGFpfbFLideSjaCovBfQYyVAfNkA/dOw+WuZTBLRO9RXsgjQUnQCgL/0Xop7JgWAlOqoPedOOL/4YcvTRWPqtxKG6YjwKgE3Tzl9XGSrpcr6fEKrPet9WWeskxU+I/bWeMH2dagN6Xuwv+kK//KaErrrTnbC8j6oeFJdUB1yL/Gg3pseufFnpOPLbcyTyzPO8tVLyNRyZpqlVZjZfzO3msB18x5GXy5GHhoJcdZxsP2/WcWTs2kvmyGdPKaR4wI0dV4VrZIR6GKwHIk9S1KuftQ2KpjNonaqoT92hal16vjeCoijC28npFw/EqrzSNLX7+3vL89yqqrLJZGJ1vd/6EsDd7XatyEzTNGGFq9/r2Ww2a+Whl2UZ6iFqMxwOw2d1JracffHiRThnOByG6BiyIrrGOFRf9M2nfmC0+kJdM2vt2kR+O3n6TdOEyAzL+TiGj75QJ06ETJUkFEg0+qkApGDgAVj16a/1kbpTgKRy12VxTXNQm0BvOja+a1SOf17GGplDZ/zVfigw+5UsJWiVm26JjA6LomhFrgF+PyFQ4vKTN/7y4lC1UQ9w9A9dq561bi0qV+TgyVfb1Do8Lp0iCOpBJv5cPvuIpmKbPzfWDvLRyYR/fkH7qpMBJijoykeJfR3djdbzlo4jvx1H+hcNP4UjN72eFWY2m80sX+cdR0bs8VI4si+po6w0ohd4pePIjiNj5ck3XDiVEoMCNb9rp2Kd1u8qaH+XrefzWdv1d/V6DvUBclyjhtI0TWuZ0t+50z+N9HAt36uqCuCjqQSqWDV+jYgURWFZloWXKvJ7WZYhkpNlmaUHxxrkuY3HY5tOp7Zer22z2dh4PA511nUdgBoQJ2WDNpbLZSAyxpLneWunJVI7GINGCpIkaZGkWXsiwDj0gWfSJJRYYsaL/P3yvgKt6iIWDeYYfY1NAPy/wWDQcjpPTrEJj/8es0ONSAOOpDFwnY7Xg61vi/ErUDMm7YMHdx42VvnQN42gKbkeOhLsgfaUzOmrRnI5prrx4K/ypf/qe/q7XqNRTA/yMf9XH1bMUjl4Pccmuh7bfH904uvxKjYR5hy1/3MTXN9Xs2M0FV0PBoPwfIq3G9pRe4z5iL9B+NzJdleOpePI78OR/X7fxuNxqOspHEkZjUbhpq3jyMvkyFJuKtVPkIEGDzqOvDyOPFeefMOFgyBcFZRG2bSD2jFPMnqH7g1E2zA7Oh51K6AooWHUapTathoPv2l0DiNRo1HwUwfGCb0haOQQR9SicgL4AQhNpwA4ksMaV3mInHAuIAhZLBaL8J4RgIeoIFGlyWRiTbPPKdddl4bDYSAzQB+yQf4qN00RUBugTcYIMfnvGDQRBY2qqqHrMSUKrtfIFN8VFLATtRWveyYDRFI1ouSdWG0pSZKwa1IAaImYcq4HLe2vjisma/SmfsAxjeopICowA6R5nofI2znQ09I7XMf5XBsbp58M+r6of2ibTA4UAxR8Fdz1uPdl1YG3E8hOwV/twsxak4jYzYZOCHR8flzoSNvBzrxt67WKZ9QRa8fbuUby9FkPoq4qP198ak5s3F35stJx5PfhSDOz9Xod2nsKR9LHsihtJKsUHUdeHkcyImxc/VNXkzqO7DjSl2d7hgvBKlCfIgz/mQH6ZW41Rm+kXK8Gr8qNObvWG7t717tpPzYAW+96vXK1be2D76/KREENg4AYhsNhOJ9c8yDLw9CSg/PxEr3RaBSih4PBIEThFKTMjjmqWZaF9AjdXpZIwN/93d/Zzz//bEVR2Hq9tvV6HVbE9IWZRBfVuDXyR8QPfQ0GA0uSpBUVBEi8kXtn53ydICBTwEf1Th0edFTnPm3C25Ae807mr/EgcGoMCiKMB3kpOaufMCZsV5+70rF7svK2TyHKi/w9idHO0e6O0T+1I+rysvXkof2K+YCPbPlrNGLuiQq5aB9imKGTBp0UAraa507bOh50oIR6CnhV5tq29kn7Sd3eX7Uu7Ia+q54YE/Vrm36CoeNT3Z+y8a58fek48ttzpN4QPpUjg1zNOo68cI7041Uf0To7juw40pdnffExzq2DP3WuGrj+hmJ9FC5GUhQfCYgpL6asGLjEwIPiU0IUAPldIxiqfO1PXdfhYVu2ZKc+QHy73Yax93q94PRE1bL0GAEgT137NBwObbfb2WAweAR0fuxmx0iFpjSYmd3e3trPP/9sw+EwbIP78PBgv/zyi/3pT3+y9+/f22azCedr5Ix66T+kQr/RhdoCzul1ii1AYlzP7keQlDoLdalDaxQFmXhZeNtSQPHg7O1HnR156DVapwdY7Y+OQcEP2ejEA/noRARZqCxVjloHn73tqvzD9cnR/9RuPShhBwr21K0TBj/JYxLiwUx1C4GpfNTmaF8j+LEILalL9EEj2VpvDNhVTopJGin2duPtR4u3qVN697pR+TFWcvzxER2j2qpiV2zy4W1T2+7Kl5WOI78DRx4mW6QHPpUjzSysZHUc2batS+PIw0AfTcy1D+i248jL4shz5Vk3zaBxT8wx59PB40BeKDGC1/PUALXt2J2y9k//xiaWMSDSSJfWmyTthxJjkQf/GSXmeR5yhGNRx7qurSgK2263lud5SFlIkiSscKUiJxxEgcID5ykHT9M0vG9CU8b+8R//0f7pn/7Jrq6u7Obmxn7/+9/bH/7wB/vbv/1b++Mf/2j/9m//Zv/8z/9s79+/b8medki9APD1hi42MfB6ZVzq6EqajIuUBmSr0U2v/xjIKtkoaMTsxgOcBx1IW6NASoaehJSwPHB5AjKzVlqE+onKXknNA59GXbUfPs9fo4fBX9J2vjZ/Y6Dr/Vd9QwnNpxyYxf1Bz9FrNPVBz9Gceu2fRr00f/6UryIbjxEqG7UJ/irRezs/RfgeyNVfT2GrRjxjtqPnqa6V1P0kRvWnOvsUoXTl06XjyG/DkVmWhWufypFBtoeVsY4jL5cjE3dT8Ygjk44jOf8SOfJcebYVLgSmd9FavAN5A411mvpU4b5dPa7EpGDif9fjMdJB6D6CFCMes+PuJyhUDUwdS0FQ3wav/fUGpO0VRREcJ7QtOzEBuqQI4jTqUGbH5Xja1HxWTXvQt9Lvdjv7+PGj3d/f27/+67/a1dWV/fVf/7W9fPkyvC1eSQ07IIqgaS2n5KmA6SOsOgZy5JGFgqhf3vf2kiTJo4iF/qY68CCttqQ2r/32bamTe8flbyyK53P0lVw9Ceo5atMeMCjqU2qvmo+t4Kt2nMpn3m0TI1c/IWNiwQRJZUx92Ig+XI6NMmafMhNLDfARRR23nyAqbiErP2aVh9qrtkXdijvn0qd83XqeTqCQifqrgr72y5+ndXvCiZFHDE8VN3Ry1JUvKx1HfnuOBFvhxqdwJH+pu+PIy+XIWnTE+NCpn/B3HNlxpJZnffHxqU75z96x+F1zT/WOWyd/vj7fZuxc388YcMT6FQMU//I//cwb5+mHkpL2T3/Xfnrlq8zINdcd5MzMGnEOgLZpGttut48iSHqeB3b6rzrAqQECIhh1Xdt8Prd/+Zd/eSRP5AWJePmpAatzKaHoEj7pEXqsrusQqfPRQeSo0SEfdfA6V9ulaH0qN99f/T0G8vpdbU7/Me7YZMqfT0EX+psHCk9EdV0HWRIB9CkCqn99gDTNjs9wAUoqK3Si6Rr0PZZHrfJTW9dVNi8vvxOWBgp8nafk7nFKJ2ZKnip/LWq7jEXtgnMYR2ySEeuXEp0nZ7UHb3candU2Yr6lffff9Xra8+PyeNWVLysdR35bjvQ+8BSOpJ3+oN9a3eo48jI50myfZKQcCe6rnlRWHUdeBkeeK8/64uMYMMQIhUGoE6rR6vlqACpYflcD8NG2GHDSngfyU5MJDzTc5bP0T33eIRWA9KFJNcRerxfy01Ve/h/X4Uzkrh96GK5VR/TpAgoGvV7P8jwPgAJwhsm1fAfQ2VCDdgaDQWunKc03J+JH5EEjEErQCqTeydUuaJN/1EX/yX/X9zroX3VGnaxgN+q0qju1PY1I+gkHMtKJjLbF9fqbjySprSkQaL1KNpwTi86hJ4qSCr8hKyYg3g4YbyC5QxOawqq2ouPhO3aiulDfQa86sarrOuhS9aK2o+Skfqp2SF98HrvKX21NSU8ngYyL8ai+dPLiJ7p6nvZP8dFPnjlPx+ntCHl5+9PrFeO83Wsb6Fl/V1uk6Ng/RShdiZeOI78PR7JJAdd+LUeG0TXWwr2OIy+PIxPs5hBA0Bs3728dR14eR54rz7bC5SMVFC/EGGBnWRaWUP0Svnc0JQbflgKVCpeHbDEWv8zLtTGQaJr27ksc7/V6NplMArBj8DgJMlFQpW5A0Gy/5E+6AQ+3ah/0Rkg/0/c0S1sOpIahzqH18U+NlMijlydGxMPFPjqj9dEvXtTIP0hQ5a1OrKCogEc7mgqC7tAnevEPPapTeQdUB1bH94RHIULoJzUqK3VoxqSg539TgEfWZu13lCiIe9noOQpA1KFEzHX6LhOdwAwGg/ASUvRHn7BVlSuTGD+x0920+v2+rdfrAFia5qCTKSLECmoK4HrzxzMIHqwVeH2EEd/gPK9j3TpYf2f8XF9VVZCZ3nx4/dC24oCfJHnc8ViGTsAqj0u+v2pXHPeTJD+B0rZVXsjck4rW1ZWvKx1HfluOpE7146dw5EFYJydaHUdeBkcyIla4Yhyp11I6jrwMjjxXnj2lUJWsoOMFq0rlO5/5rp/1LhWBqIB8BEbb1N/I3dZIFvUBShptou5erxeADJDY7XbBgTabTWusvFPDExuEw/fZbGb9fj/spKTKph+0w00R4GZmllj8+Rs1LIoHd3RkZq2cYAU2BWWAmTGqoytREU1T4PHGTVEC9KkmnO91q2Sz2WxCNAo50LaXRyyCoXqJHde61Ca9/HBCdc6YYyq48bJOjWgRKeU4kw+iWaoj1TvAqKkl/FNfwIawYYiW9r0f+iiw7uxjZq13saD/sizDTRtgTX95p0lMHh4Y+QcRKUDHQFzlDVGpT/mJKfryabpqI9oXjcByPf1CNt7fkD3X0ieNrsVWjzQ9CYxClkpwKku1PSUmHTu/6e9+3H5Cq89zfIpQunK6dBz5bTkS+epN4ddypE6yVfYdR14eR1bYi/CCmYUbXP8OuY4jL4sjz5VnTSnksypZlaGd9ESDIaqAFCBVAB4Q1DAQtObK4jxme+Pm7dIedFG4dzSz4zKvkgEPbabpcRtaVgnMLOyYtFqtwnclP2QDqBDFpO8qEwXtXq9nKf1Ok0BsOBtyUwDiOiVglqV9rrNGxRRYcQQiKRg6x3TSrvrxEzUdP/rXqAv1aFSQOnUljgek1+u1bbfbkALAeHXsCvrqXLqqpxE1Pc/bn9q6n+B4kGNc6tRJkthwOAykkWVZePZgs9lY0+xfzjmZTGw+n9tqtQqpNR40tA+azqLEpn7ix6IPc2dZ1vIN7EkBB30AhkR0IYnxeBzARx9MBwjRn6Yx0F99R4najU5adrvdI6xAFoAudq+TLsUZHS+yY0yKYdgLNqKyRM7YGXXoZAlfpB21P/UH/xyA2pCOnXoUt3T8Kjcdu07YtI+Kz1q0H568lfy78vml48hvz5HIwm9w8TUcqaXjyMvmyJ6kACpHZllmo9Eo2HjHkZfJkf5cLc+6SyGDM2vfmXsH08iY/o4R0mEcjd8VFGLOrf1QYzGzkI+tkZKY42vkToFTt1Tlr+6IBAHhQDou3lauTq136kpUvOcDoKFO2glgwJhlQsxYe71ey+ExajVET/Aa+SBapFEN1RkGrOBFUbnruRp5hvP5yQAAIABJREFUoy1NG8FZAJRYRKWu60DWOvHjOoBPnUBtxU9ukI8u22+32xB9Qhbk2+u7TbB1BTxAUu2K8eo2swAqkcckSWw+n4cXZ+p7atI0tclkYrPZzMqytNVq1Yqg+ufwNG1ACYWJg+oEnerEQEld8/3ZCne73YU28RUiypCgJ3V0pECkUSH6zTnUrRFI5DkcDlsYgBx0wxeddGJrWqf2CZLzN5e6sqckpO36iY73L/rCJFHloeerLaFHjSwydsVC+qATVMUwrY9x0L4SEL8plp7yF/rZlS8vHUd+e46s6zp6s2T25RyZWHsS1XHk5XLkjvmW2JfKmz53HHmZHHmuPNsKl94ZIhyUqSSgg/JkgHI9wfOZpVraImpAaoIqGyOhLp1MqpPpcjH9Q/lE/NUI2eEIAOOzLifTN1IcMFIUTztEEc0sPDhJ38hP1UgW44MoKPSBdjQPmePIXIlMScBHAWKRbCVa2qqqKuhMJwMAi6ZAAKhch17UKVUPGDO501xDpIi21YmpXx1SbQ/5qKMBskrGRMrQqYKdAgjyihGrghN9xjYeHh7C70VR2GazCe9B45rhcGi///3vg21MJhO7urqysizt/v4+RKBpV4kUu1ZQ0Jtr9IkfrFarR5M8BUQLx/bHR6NRsCGuS9PUNpuNjcfjMFaibQrK6J1JGvKjbxpt11QCzkPPnKOTQ32WQImcCRV6QQbj8ThsP037noST5PjyVNrTvHa1A09amkqh4OwfXgcbsFeupW+eSPis7Wo/GA+RTPUPLT466QmTwm/dDdfXlY4jvz1HchOAfT+JIw+uoxPHjiMvkyPJLjJ309A0Teu9bx1HXiZH+uNanvUZrlhR8FSwUsH7O0lPJihCo2L8w8n1GSQ1PgABw1WjVkD2Y0F4GM1kMnlkyD7dwN8Fa0QFZWIUAMRwOGwRVFVVtl6vw3eVjY8OmJntDlFBooZ3d3eWZVmIcmDUeZ6H/uFo6IbziGDoGDTyoeNTQvJREY3QqRH7CIPKjL4o0CuQaX9wsO12a5vNxjabTdgS2JOnj4bweblc2mazsfv7e1sul5amqY1GIxuPx5bneYgiQiTYigKI6gdbUUBXO0Xeu93OlstlSKFRwmHyQfvYCqkUjDtNU7u6ugppIjpJUp9TnUDK2IK/sfJEgz/hewG4D/n/TKSIbmp6j0adlVC8/yNDbwd8JnLF5ClJkhBZpg/4jk5e1VfwNZ4viUUIPf7QL8UYJoXab7/rl+bEM16izWon6JpJnz53om00TdOajOrEXP8xbsVaZIjtaoSVa7Qt7aMfP7bEuLrydaXjyG/LkUySdrudDYfDJ3Fk6G+WPhpDx5GXyZHWHJ8NpP/YFLjeceTlceQpnDd7phsuGkaRasR0lO90iGiXFw4D1+s96CmxmFkLnCgKZpoL6wGZ6BF1cG6/3w8RIwyv3+8Hp6b+0Whk2+02jI0onbapjkR/AH/u7olEonwFDhSohs7n8Whkd+/ubL1eW7/ft/F43DIg2sPIaUuNT+WqUQ/aR7+a345xaaRIZV3XtW2323B9mqYhMqUPlapRewP2kwcmDByHeCBmQAtHZQwaMWJ8gNBoNLI8z225XNp8Pre//OUvwbmHw6GZmY1GowDy/NMHvj3hIzdSL9DtbrdrRcDMLNRH3waDQejbbrezxWJhWZbZZDIJYFMUha1WK5tMJnZ7e2t/+ctfAqihd78JCjIFiAFgbICXZ9NfxsZY+oN+qEv1DoFoZNPbqurVT2YUA6jfbD/JImKGHnq9/VbN/oWoCqZ+MqoTSvTAzmn4/na7DePk+RVNp0JnmkKjeKV90LQkiAIi84Snz5l4v1I8VfzgXL1OiVPJVH+H9FXGYKaXPQW/RJaxiXdXPq90HPntOVJT3bbb7ZM4Uid4HUdeNkeu+kcuhBN0ou/13nHkZXHkufLkGy6NCJ363QsHB+ez1mN2jLogWE11UIDVO28PvGYWQF2jdHoO0ZTxeNxSclEUwakxeKJlHnD1zhrHUaMGTLQPfhx83+12YecafTiSv610hPogqyxrOZg6r/aFKChF7/T5rsbOeFSHntQ4h/4DNrrETVmv1y1dmh23Otblaq8bxg4JUedoNAoOpLnf/kFljYxxTq/XCw+3TiaTACir1SpE8haLhb148cI+fPhgd3d3ttlsLEmOW6Zrn8Pqj+hfI1YACn2A8Jumsdls9miygQzRyXK5DHXr9r/v3r2zJElsOp3acrls+aIHf/xB7c1sD5Cr1cr6/X54SHm5XIZob4hGpe13yCgJKEl7nyeiVVVVAFeVmdnxoXYmANgFsuD5Dt09SvPytW3GSeoJ6Q7Uh9yxyevraxsMBq08dbVBjplZa3z6N8v2D0nrBI+CDQ6Hw2AXSsKaRsP5yEwx00dhfdRRJ+CKP/gEstNrFDuxFb5T0BN994TTlU+XjiO/D0fS9+Fw+CwcaWZWFqX1JZLeceTlcWQlr03huUduBHUnyI4jL5Mjz5Vne4ZLQZZOKBBqbqgORDusd/Qa/dJzOOaJA6UpgNEn+qUgqIRVFIWNRqNWHd5JsiwLu8soWI9GowD0AGuSJMFYtF1IRSN49FEVTnRFwZBrkyQJ0QYzs94BvDQaQV+IRnAM0FUZ6vlqvCxRIy+vB43KcY0Sri5Hx/QOIABQ2BBGixMQiUS2RLaqqgp9HI1GtlgsHkVpcW4loRjBDAYDW61WNhqNbDqdBnLu9Xr2008/2Xq9DmTy+vVre/v2rS0WCzOzkNe9Xq8DoPzhD3+wpmns3bt31u/37erqygaDQbA3ojaAAgTloyUKekp8RJiaprFff/3VfvrpJ7u6urL379+3InLYIXWVZRns9ePHjzadTlvECyBNJpOgG+9n1IvPKCjqBK+u69YD/ZATclX/U3vyPu8ncDqx0QmP2hg4QqqBkgNRSraoxifwA8aH3fi6+K4RTaLu2HtsckmUTycJ+h4RJX+N4NE2RX1bZaMRf9WXTqKxHerw2yhjJ0rSiqHdzdbXl44jvz1HIhPs/ikcqbsCI+eOIy+TIwOfNO3nIH3fOo7sONKXZ3+GC6FzzAO+Cm00GrUGh/I4FzDDUHWJWonK3zzojQPnUK9uw0ndGMR0Og2K5Px+vx9SIqqqstFoZPP53Mbjcegn48D4WbrWu+jxeBzGzZ38arWyq6urICOWhxkLhoTiWeYn79fMrKr257x48SLka19fX5uZhQiHgj594nqcSUmBPqphamoH49bxeeJQWaAPBVNkT0oFS8oYPjLMsv0Knjqu9gcnoj0ciogZoKaRGyVMiBs5syRO7jEOrw+Gz2az8NArD5SSI7/ZbOyPf/yjvX792v7+7/8+1Ec9jI+b5qZpWnpnzJpLXJalrdfrkBqDvADXP//5zyEKiK8AzJ6ssdOrq6sgC6JXu93ORqNRS0ecHyZztk9f0Bt/7EztgoibkqdO9nTiwT+uZyJH3TrB0joVX7AP+ooM0CHPp9Auk0P6MhgMgmwBfZ0QK4F4EqLPfvIKAWs0USOE1KVkzUQTn/U+pUTLNZzDOGM3yPgq9Xp/BifN2mkRGhVUXOzKl5eOI78tRy6XyyDrp3JkfZBlv3fc6a3jyMvkyJQb6qT9ugLS7PR5qo4jL48jOSdWzt5wJUnyP5nZ/9s0zcdPnBeEiQAU5PU8vSPUu0glAr88icAV5BGYKoKl0abZP0QJkCghKLDRPyUpVRIPHRK1U0IqiiIAro4d4WNsOIGmM1DHZrNpRTeIyCgJKKi0DO+wMXxVV8G5cHj6iSx5KFHBg6VPTW1QQMBo1DEZG/ngGsGjPY2gUDR6RnQOXWiO8GazsZubGxuNRrZcLkPf/C5bgEuvt38nB9EglpiJ2ul4iIBgE0Rt+b3X69nV1ZXN5/MwxvV6HWyTdhaLhZVlGVIJ7u/vW3ZdFIX98ssvdn19bUVR2PX1tS2Xy+DkSZLYZrNp2YYCKTbkozAQL3KAeLGH+/v7lt0BZuhQbU8jYLormPqqRl/b0dtjHjQTAAVUfUAYIlJSQv5+EqhtEcHC5zQyroBOe2pbukpA24o/vAuIcTMhASewY/qrslL5EjXWl7EqrlAfPq0EyVixYyaFOhGC3LV9xQAlcIrWrfqmfcVlPxnUqKHaDbL09tiVfek48j8vR+rYzexJHNnUxw0rwPKOIy+TI0kaU1+AD/W5uI4jO4705ewNV9M0/3D26sfnB8XrnbqSDY6jEQV9QFHP95EmwIx/SiwIOM/zsJTe7/dtMpm0AASQ4a5ac2N1+ZD8dDMLoD+ZTMzMbDabhaXxJEmCUWFsLMkCmDzcOBqNWsDIg45ZltnNzY3N5/NAhGYWHiBVg8PQY3fR9EXfXQEwakQAoDBrvzsEkFIDVqNXwMCwcDrVCf3DadXx0SvpIoD2YrEIKyeApC6rowvkQz951giwB6j9jaTmuhPFgXxJRyA3HT1ix6PRyNbrdZAtADYYDGy5XNp4PA7HSLf49ddfw4OnmgaELbJbFNvdAs5pmgY7QY9ElubzuU2n0xZh4AMKhvgeYKhL/ICaAgdki140Eq7pPWYWonu0RySa39nKt9frBV+kTnzePzBPQUa0qzrQ70RWsWMPlGYWJhuAvfpPnufheFHs36XDDmvYAv2hHYhAfU0f3FbC0xe74iv6YDB1a/oScsQmaE/JkXGqH+vEQ/EVW9ebZfV1H2TR37mG9hUTPIFdeuk48j8vR3JDqT7wtRzJewiRaceRl8uR+mY31QHy0zF0HHl5HHmuPHtKoZKCvynw3zFkPpu1X8CrwMdgYiktOnicBqWRy212fKu97yeOosLUO3VPAoA2AKLRDRwLA+Rf0zQ2mUyCsRI1RGGawtHv98OyNG2orNL0+CLGxNqpJUQA8jwPedt8JzrHuIkSqL58tEKNWMlFo45cpwSGrum/RlnMLORGL5dLe3h4sMlk0kqVVGdV0KzrY65wWe7zrXu9Xoj2AfyQsI6Dh5KVFHFy5IpzbrfboI9er2fT6dT6/X4AIjYQePnypY1GI3t4eLD5fB4e0CYqiSMiI6K+9Im6lWwBIAWf8XjcyonXGyT6jGz8iy/V1hkr+tKorAKYgjNjMrMwQdhutyHioySGbdN2nufB5plkoQudiNAfiJ7+47MQsJ+M4jsQEBMNxgqZp+nxAWfGyEQmyzK7vr4O2yerDaK7NE3DhHC9XltVVbZarez6+joQKH2jH+hPI8v4DXLBzsEafSeLJwbkq+PXvqq9gBXYBgW56oRE2/N4jM6p41OE0pXTpePIb8uRTESR/VM40hzudBx5uRxpgoHYOoEBfAnu6Tjy8jjyXHmWGy4FD777zqEQCsDDwLjD9aCkgKZgp+0BrDiGX8LXKBCEpAQFqAN4GrUhegLwaiRJI1YsX6vTNk0THjQGFDEc6qAtjEqXwwFTcnKJdBIRM9tHEuu7ZSA8opBMmmkL44Fs+U11h+HpMwYYvoKcRgEYl5KwtqNEqHKA2IqisJubm5CzO51OLcsye3h4CO1TF9cBplVVBdCfTCa2Wq0C0X/48KFFtn6yg61sNpsQaWUpnTp1uZ5xoX/0zZa55DoTcSUffTqdBv1VVRWiyRqxpm8KAEqmukTPGDimuizL0l68eBH6PhwOQ194aSTAoJMfCENTOuq6Dtvx7nY7q5v2ygbj88v2mpakaT96I6eRRp0wMl7dKUojj5zXNM2jVCVIx2MMdWr0lEgsflYURSBgbVttBv9irESaqRvfTZIkkI6ZtUhQJzikvphZ0As6VkBXn1Ic9asXyAHd+/PVnxWXaVeLTuzBZz8Z6cqXlY4jvz1HMlkfj8et1bOv4ciAzXXHkR1HHvuiHKmrHMpjHUdeFkeeC0o+y7bwDBZjpMNq/PzTTiIQFQ5RD8DSL7VqZCk2eAxQCYCldBxYl/c1ymF2fImfOhVOQ92Mg3qU2FgJMLNAGDgrRsndvo6fvkKmXAuhKYHu/x5fDofBILfJZBIIgaVlHwFUQtXIHL8BmMg3BshcQ/t855jqlnpYWmZHI8aJvtfrtY3H4+BU6CZJkrCpA3aCTHa7XXgHB3XQR40EaVQWGeN8gDDpD4Ae9tjr9ez6+trevn0bIiVqFxAcOgXEFeRWq1VoDzvxfeVcjWQxRh03RW1CCWu1WrX0QloJzyZomgFL9ZoSwLgDkBxMnBVV/AhfpO+MCzmXZWmTySQAftM0tlgsWs99aGQrSZLWREpJAMJXPNCxA9T8pZ9q+5AmMmXM2+3WBoNBSJvQSSGEw3mKERqBVPImioq+ISn1ZWSnkzr6iq4VayBjxSD8DFloNA9f1O86eY/ZEW0hA+qn6MShK59XOo78Xhx5fFEuUfmv5UiTCZ7eGHYceXkcmWVHjFWOBK/By44jL5Mjz5VnCVfSEYShgtJOck6WHbeDNTuSgUbUFKiVQPx3PYbhaGREz8FguWvWPFnAWCN9+lJHnBiA5E4e4+Y6CFSdjnFTD0BPu54gMZqiKML7T3RZvKqqQAC7A1HQnhI1ctxsNmH5ViNRAAVyQgc4LoSiUQqNEGhURclE7YL+0naSJIEgkR2RKF0KJspFpAZgQgbYCnJ/eHiw6XRq0+k0bEOLnWEfOklgskOf0zQNBEedi8UiOODNzU2IDtLnoijCzkfIXseJnbDNLORP6o2+s0H7yTmQD/UAgrPZLDwPQQpFDCj0BlpvoBRwFGyJNHENUWlkYmbWOwApdgogT6dTm8/nZmYhL520gfF43HoomlQFJgZFUYTUAfwF/8JudXzj8dg2m00rGob8scuY7VZVZePx2K6ursIzJ1qH2jj2gS551kVthEmD2i6EjX+jS8UtlZ3ZcVc4TY9SvMKH6BORax9VRpcK/vSLvz5CqxNptVsf+cZHPJ535fNKx5HfgSPNQj+egyMP2uk48tI5cpDjqGZ23CEUnTFu7LTjyI4jKU++4WLQ6qQ0qoNCWTgHoKUdVuD3RetUxekdKHWgFKIvV1dXtl6vw3kYEMrA0QBJjIWxKWhyjYIThq5pBRAWfVXFmx23p2Ss2oZGLhmXEkRRFOEZLmSDAWiuuy7HaiqQRmNoH+PEgQEMXe5nnKofJXlPdKojnMsv+Q+Hw0CuRN806sbYAE/a4XpIh12bkmSfoqDv26B9xseNBWMhjxkC9ZGoLNtvEfvw8BAiNNg5xJIk+yXw6+vrMAlBx9jfYrFopcB4+8AG8AvSN3a7XRgHBLZcLkMfyJXX6DTAqxEgQDPPc3t4eAjncR2gNxgMwkPsPnI7zPOQKoBMeTB4uVy2dI+dMy70PRqNQhuqnyRJWvnZyIUxQMZMPoiI62RPbRRbIt0D/y6KIuhWVwsUoCEBHQPnE8VTIKcdZK2+rH5GO7ShKWJq2/iOTojLsgzRQfqhvqx+o5NVxRd+17oZv+qCgo3p9658Wek48vtwJH6iq0hP4Ugzs+owme448nI5shCMZ2MWdAivcXPUceTlceS58iwphTSK8pRgfKQAY9dlSwU2lh+pl4Hyu5KN3rVTL2ChS7lmx7QD8oo1UqYAiLPHiIpzMAQMhn5p9EmvgQw0GnDquBKuRreyLAtAt297X8d4NLJSdmPCsBkDTshxZEWbamRECNTIbm5u7O7uznq9nt3d3YVzNTqm0TutnygYY63rfSoH4KjEojoA2DSvHtJnXLzEcLlc2mw2syRJbLFYhNSJFy9ehIgRpMpkoCzLEDki/5qIoUY/0APfb29vrSgKu7u7a+V2QzzUD0g2TROO8zCx10GStHf/UpkwXo0OQliDwf5FlCznQwrogLQIjfiQpkAOOfpL0zTUxVin06mZWfBRBUn0Sp9JBSAKBYlxHhE+zieXXwFZdzxjQkD7+MByuQwTRcUHyBB7h2Swb7VnJUh8z0c5zY7Eq22hT01XgLywE3SmPoXvYRMU+q6RScavBKMROB2PTjy0eJ/jfMal44eA9IaAttT3FJu68mWl48jvw5Hc3CVJEnaz+1qOpFR11eLNjiMvjyPVznWTCfTVceRlc6Se48uzPQGNILVRjJBNHnjoTvN+EYoKTw2agSmZ+MiOWTtnmrY5DmgQxer3+y2lI3CiNrpEyWf+cievUSx1diUr7SN9A9wYK7JhdxsiXACQAi0RFDML29QCDhiutsmKBQ8RsxuNGj0Go2SMnIbDoY3HY7u7u7PZbGYfPnwIwED0C115+Sm5Uj/99QBMHyAQ3XkJ4CVqpNHFyWRid3d3Np1OA8ET2YKMiaSpcyNbxkg0FyDRaAyg/Pbt20AUpDoANER6iLTpw6I8bAqwcr6SJ8c1NUCBR+2ccRCtpO7xeBxkjn6IhpGecHV1FVIGsGFeSkm0W6OJCnzqT/yeJEnYAStN0xClJI+c9+hAvETMIB8ikGbHB2HR/3A4DHn2mgJSlmVIF1mv10EeyA5/VP+BUElb2Ww2wb704Xf1ac1x18iv2ozqCV9WfVGvn3RqnzQyzvg9fvgJO/3iZaf4DxihbetknOuUEHTir+coNioWq0105ctKx5HfmCNlwslE/6s5UrAYXO848jI5Evuypr36qTc2+HvHkZfHkefKs24LH4scoQiN1NV1bYvForVsqBET7TzLzr3e/v0RAJa2RfvcXXPToH0AkGJgj+DMLLyjYjqdtu5y1bEwam94GAYKwYk1WgiRMlZvyF6hOKreSZtZK6WQgsMjU81FTtM0bNep+eBKdDiM9oM+j8djm81mtlgsAnCpYevyP47sJ++67M1fTW1R+aITyDvPc7u5uQlAwIOZ6BtSxvnfv39v19fX4aFTnVwACrPZLIDAbDYLOc/sPMQ7QsyO271CxAr62CR2x8spGZfaiZIBgER0EXny/gtsj6gVETXsbTQa2Wq1sjzP7fr6OshPJ2HqA+Px2JqmCeRBtJKo6mq1CjYACWADOmnQSCo6apr9w+68FPP6+tpWq1Ug5Lquw65HfqLmCbZpGvv48WPQMfZ4c3MTiIMdkUiBwBaURJUMyJPnGQPO2Ww2rZQdzqdeZMCKAxMtdrXCb8ys9RA9kx/GoxNc1Q/PXNAO0UH1Z1YkeLicKKh+Vj35G2V+10k6GKYEE7vB1uv8OV35stJx5LflSJ8mSB1fw5GUujpGuzuOvEyO1FVa5UiPyR1HXiZHnivPti28grIOQKNPLPkreBAF0vd8oFQIQXcQ0vZwbF1W1CVF3d0Hw6NPCpb0F0cCsABfzqFfvd5+lx8igdvtNkTKNEpDferQZhYcgndhAIS6NKwpFIyR/vptunUyjKz0oWhtU7cK1cghE+mqqsKOM0mS2Hq9tru7O7u+vraff/7Zfv31V5tOpyHiVdf7h4f9g5v0geiRkrJGN4iqcJ0+HEn/ifq8fv3aVquVffz4MURi1JF4UBTy1iikAh+AB4g3zfEBZLW5Fy9e2HA4DC/bxPaIFs7n80eRECJTq9Wq9RAt6QmAOdFrjdqoPjWqRcTNzEK6SVEU9jd/8zf222+/hQeMqZs0BwUCIomQ63A4tNvbW/uP//iP0I72kVQGyL6x9u5pRNnW67UNBoMQUR0Oh/bbb7+FXZc2m41Np1MbjUatyDh56Ni8Eiq/F8V+O2Te61KWpV1fX4frJpNJ+AxRaxoLxKBEvN1ubTqd2nq9Dg8FI2/sWSc/+ryCgj/1xSYqHqxjmMhxfAZ5aJQUX9Q8dsUCzvF1M2Zkq3KmnzoRVDuJTdDVLj9FKF2Jl44jvz1H6qT3qRzJC9/RXceRl8uRzeiwfbgdV1vhHfWTjiM7jvTlWW64NAKFg2hUSIFR75J5+/hyuQxgQue5+0Z4XI/y+Y36VAiqEE3J0CiKLuUjKLNjXu5yuQzpeiiaOngvxWQyaQGF3pWbHVM1fPqFEoNG4AB5vZYxECUgQsh5GDd1QWqMib5g2ETeyrIMoIPBbbdbWywWwYjK8vgA4ps3bwKIAPDkyPOAKLJSp9YUA84hKqMOQI55v98PYE8fiKRqigFgolGoNE3DFrKvXr16lCcOCfz666+hferTyYg+DEp/yWnHcXkRo+Zwm1kgaICqLEu7ubmxjx8/htQLTZdhRQnZoXNepDgcDm06ndpsNrP5fG7T6dS22629efMmRMhWq1V4mJkJgE7cFAx7vf0D8tgC/QeQAUH8OvjcYUUV0Oe33W4XZP3mzZtgh0QVx+Nx8A2VaZoec+KxFY1esm0z/tg0jf32229WVZXd3t62Ipx5nrfkDjFoG4yV8eouXUxMFbPwG8iESQYEgt2qfzF5mc1moS58RSd6RAZVvpoGhR9g/3oTrJMQJS6NfCJHjfiDIdiYkojiN332qyiKOV358tJx5LfnSH3PD/L/Wo7knPFkbHZYIeg48jI5ssQeJcCg9szKaceRl8mR53jyWTbN4I5bj/nIGMdQAMvELBkDBhiXChlDVABAIAhBDcHn/+qSK+2og2jfVdlFUdjV1VWIwIzH4xDNu7m5seVyGYiEvmlkUJXFDQ3GAOhrXi0P9uodfsvR5eaKUjsC5XfAl0gYMtCIAVEjZH19fR1yjJNk/1Bmr9ezH374wR4eHuzu7s4Gg0ErugBIA9SLxSJcR9SJ+rMsCw6NLKjPzGw+n9tgMLDpdBp0myRJAGbGw3hHo5G9f//e1uu1vX79OshzNpuFvqEPcrb7/f07WVjunk6nYZlbSQ/w6ff7dn19HW5MAe/ZbGY3NzdhNz/smgkIBLPb7V/yiEywe/QPOTBWTQcws5D/PZlMwvsveFiWfH7SH8jP14kX9TKJQHc3Nzf28PBgi8XCXr16FaJ+TOIARmxkMpns5X6wJeyZyCipB/gJxIWu1e90IgjR8fD2ZDIJPoPfECVl8ok9MHb6qFFwMwuEMxwOQ8QdWafpPp9+u92GZwrwCX1+RSfAikv0HfLSSTR4pA+A43dgIf1EV5AWtkr0k8kR/1S3OlHVyTL90THQP45xLmPW6zVqp7+Du135stJx5PfhSE1DQgZfy5HI7vWr19bf/a7jyAuI4zY6AAAgAElEQVTmyI/5ngvTg3yxEVJxGWPHkZfJkefKs6UU6t0kAgNQUC531Wbt3URQJMbOgMza70cgGkJdYRCyNBj7x4OJPpKm4K93rPTBrJ0HjjLI5QU4NeqvJKW5o/yGIxINYOyMjfQ+UgQwBpz5keyT445QRPZ4UFD1gMEBfIxd+0CUQ0Gedm9vbwMwmO2jSkQyRqNRiGyxaxT6yrIsPCCLc43H45AWgX6IUqRpGtIMzCw8m8BnXerHCc0sRLDqura/+qu/CqBNSormLk+nU8uyzFarlV1fX1vTNCHKU9f7t6yjp4eHh2DjODq2St9IQ9Bt1G9vb20+nwfQJnKJP2B3ClyQrdoEhMzWw7/99luIHK/Xa7u9vQ2A9vLlywDygO1ms7HVatWyz/v7+7BF7Xq9tslkYjc3N/bLL7+E9BV8BQAMUZzq+A4PjhdFYR8+fAj2ANAyVkBbo+4AFPaEDrJsv71wr9ezxWIRUofG43HISecZkjRNbTKZBCLCzna7XcjFB9Rns5nd39/ber0Ozy0URWGvXr0K6TBgg4/8E3UkyqdRN8UKfJ9z1O88jlD8qoMSk5IO7emEGDvUibWPripGolP1fbBPU0zAX43yeXzuypeVjiO/PUfSb+3z13Iksu44suNIuDAxa3Gkpjx2HHm5HKl1+vLkGy6AA6XTEb0b5DMDZ2kXA0WBOI/Z8cVw1N80TSAMjW4BoBrBY5ceoi3cJeMACA8FKsGYWYiEaLSfKFSv17P7+/tgsP1+vxVBgfjou0ZRIEb6C6ABGCgcY1TA1XzjXq9nabIf6x//9//Vmqr9bgCNlPJdjUdLkiSWWGKNNY+uQTc6SaAN1YPq+FXdmCXWqpO+JunxQV/qNjOr09SSprHZoZ0mOW4JkjaNNYe+7JLEsrq2cdPY7iDT2UEuVZJY71DfXOwQZ12mqa0SyeM3s6Rp7L2LZvB7laS2To/L0N6J7l10ViMs6zS13cEGdmlmVX2wqzS1uqosaRobiJxJ3Omr7BszS5hMpLbK9vIdH3xg0DTWbxprssxeH8ip3+ubJWY5QJIkljeN9SQVoElTm1SVFVlmo6qy/1YUluZDq9PEXu6K0KYHo+xqjMG0dntSsFTQ6fV6Np/Pw4SE1I3VamVNc3w2Av9iguCJoq73zz+QTkIkMfjBgSzQG9FW/G0ymdhsNrPJZBKImIeAN5uN/fLLL3Z7exvSHZj8EP3WiR/4tlqtQtoQvqx2gP8TfVcQ1okj+EfEk75BJqoHjepT8EfIG8LyBOhTHXQir36iBKVtKMGAZV35/NJx5PfhSOSMrzPuXq/X+h2fYRLGjQ39TISP/uF/+d8srQqz5oDTiePIJMKR1pg1Bx1bY01dh5fmmu25Mjm85iVN0ihHpgf8r+va0iTdn3+AAniW8dRVbY0dbjQbs/LvjnJpmn1fev1eaLtuamvqwyrroR91Ve/HVh828rDGmgO3W2NWN3Xgdvgt2RN/6FeSJmF+UTdtjkyTwyppXVmWplbVdZBRVVctvNINwsKxw1xC5xlZevCBqtzL0Y433GVx2JCm3wv9Qe5N3VhVV2Gegg/Sv6Io7P/Oc0uT1Hb/w86a9LhTIyuEGgDA7juOvEyO/O96w2V2TE/Qu08a1jt8jXwAsv7OlvQAjIQ6EABGpQNjiZsHLbkeYCWawR0+USSExfmMgwihph/M5/OwbA4pqBMRkcE5dOxmx7tnlskx6CzLwrsgrq6uggERKVIi1PQSdDr8/evnUOF3KYn7HJvK6TmHexDF9NY1nPv4rQvxY2Zm5Ynj/nyflXsqS5e+Ua/WX8k5MZc8d4z+ZJFzvRPrwnYS+T2Tv3xuIufFyjDPbddrby2ruxvd3NxYmqb26tUre/fuXXiR5Xa7DekU+MJisWiBY7/fD9FMoq28KJKiz1MMBoNQP30hVWW9Xtt0Og3pSHd3d2GLXQiOrWt5XoJIZ13XIcILhoA/WZaF6DKkSJ/YJMBH9jTYAQ4R3dfJJpikMuF8sERXG8yOE3ewleN5noeJtJKTTrwgIg2yaGRP+0X7bNzQlS8rHUd+e46kv0TSWU1L0zTsJMjzKqSn8dwKKVYhze4w8Vte//jtjKYr/2nKPHIszbLwnBo+yUoTONlx5OVxpK7O+fLkGy4GQO4sxqLLwXSSu8SyLEPuLxECs70xsWTaNE3YtWW5XIb8dB6URHhFUYS7eiKAZu2IIZ+plzthTT8gmrharUIkbT6fh8gFD9LtdrtAaIwR59KlR+7geWP4fD4PxHa8aUrCzjoomPc/MD5IFlmRw/rL//F/WX8w2G+H2u+bNY3dP9wfDDw/EO/QsjSz2dXsEGEs7P3732w4HFk+GFhyIJbisNtNfRjPcJjberUOOfcsvRdlaVm6fylfY7Z/aLrXs7Iq7erq2nq9XtjmtK4qG0/Gltjh4Ubbr8CMRiPb7nZWFoXNrma23e6sqkqbTvbOu9lurGmOL5/bbrf7nZUOW/WORiNbHyI8TV1bdnDe6hANyXqZNfWemFfrtdWH9Irtbmv1YSVwu9uF3Xvevn1rvV5mk8nUBoO+NY3Z/OHBBnlu0+nEhvnQ3n94b3XdhBz45WFjkeFoZGb7aF14QLkqrWnMRqOhPTw82HK5sqauraprGw2Htit2VhSlrdcrK3aFNWaWARJmlqbHSVFV1VZVpQ36A9sVO7u/fwhRryxN7er6ysqytFcvX5mZ2ce7uyCXXVFYWRbW7x1y4Q/2OR6NbL5Y2NXVlQ0GA1suFnufSo7bxC4WC8vS1Hr9npXFPqpVVpXl+cDqzR4A8TV84Pr6upWmsVwug00XRRFy3bEnUj6Kogg7mJHbv16vbblc2s3NjSVJYnd3d7ZarcLuUxoZJC2C6FpVVeFdOK9fv7bNZmNpun9QuNfrBT82O0bNmACORqPgYzw/AZEC6OALmKMTaCZvyBHsAMhJVdIHy4lmssmAYhOkB4YxYWcSToSxruvQpkbmwFwf/UdnEDD4QoHkOFcJpytfXjqO/D4cyaYCi8UibBpwf38fngHS3c24yWQHOW7W+Ps//8P/advyGAXPh0Nbr1dHjkwzG+QDK4vS0iy18Wjf5z0/96wsK7s+pIHBkVVd2WQ8CX1HZ6PxyHbbXXg+brvdWllVNj1wFjjiOXItHMkqCPJJksTKA5dkvZ41TW1Xs/3z2lV94Mjtzup6v+qw27Y5Muv1bDqZ2GCw39784eHB8jy3yXRqwzy39x8+WFPXgSMXi4UlaWKj4SjorixLm0wnVpWHTS9GI3t4eLDVYUMYVizZAZJUR7PDCl/N6lZqxW5nWa9ndVVZWVU26PdtVxT2cH8vO0umdn21DwC8fPXSzMzuPu45sm5qK3ZF8MWi3K9a4lML4cjFYmHD0dASa3NkL2m/V44gA3zUcWTHkb48yw2XbtfI4HAys3a+Jh3H4DAslKxv4t5sNuE9EDxUeHd3F87TAQLcLBNqbqUuy3JHjuABX4AEZdJ38tqHw6GtVqsQXaA+riFvvWma4HhJkth8PrfXr19bnufhAVSMcbfb2YcPH8LuQ7r8m2VZ68FHriGvuFnvLG1SW3+4D1GLq94+z7tvme3KypbvPu6NZL0LkYxX4/0kvV5t9zdQo0P6RLK/2ZnP57YhIjAaWdk0Nr25sc3DUVfp9vCOkm1paWWWVZVtPz5YOhpZvVrb9hAlucnHliRmSb1PP8kqs3q5sdUhP7pfNFZuS9uuVrYr91vW9svDsm6xteFgYE3Z2KBsLEv7tix29vDwW0i3ybLMfvjxxz2BlY0Vm6UNDy+fHFlm5Xq9f9fHtrRJ2rer2z3BrJvU+k1q1ba02+HENpuNzd/8Fh5G7hW1DbLGBqXZdj23frF39mq4j34WD/uI0rDZ33zeXN/YmzdvLK8Ty6rE/vznP4dJ02g0svKQvlMvN5ZUlSVFYZOkb4tie7Czvb2XB0Cd5rkt53t5v3rxwsqytMWmsNvhJADNdDyxzf1yP1F5+37//pUP90fbbhqzqrZitX/B5WCwf6nju3fvbJim1iw31hS13Y72D3J//PjRNofta+vl2tJezzbVIa+9qqwuS7PKrJemVpodb34PD4+/fv06gBwTJgCx1+uFiLGmUdFXMwuTS8B2uVzar7/+anmehx2jFotFmJwBcmxJTMF/2OQD3+RZAk0bUrwwsxCJY4IKSbGKAMEQWSe6yAPQRP118wImoOBO0zQBC5bLpY1GI7s73CgzKSH9g755wIfQkKOmQUB64JimSyj5K8Yp7mlahf5GFLBLKfzy0nHk9+FIbpaKogj+TmoU8mQSx8rWaDSyH3/8sYUTZVnaJMts2k9ss9nafD633Tyx/uH8ptlvsLBZPdgwy6yf9S2vD5s81FvrVdU+ZW3x0fLRyKrtysrl0pKmsevJHluSQyQ9SytLtkvb3t/bdDi0KytsXW9ttV5ZL9lPfnM7vKbjkIa2TkobJ6VN89SWZWHrDw9WkZKaZfbj7YEjq9LK9d5ePnz8YP2eWble2+ubGyvLrV0NUru6ut1zZNZYv9dYVW2tP9unlm0+vLH0wJFmhQ2TzKZJacVybddW2Ga3scFm/3zadr1/rmuU7W+Qb26u7c2bNzZLayvTyv785z/bwuFwnueW7JbWryorysImg8QWu/3E1urjLnf9ft9u8tyWy7llWWYvbg8cWW1sNhseOXI6ts3q3saDgVUf3trNzY3dzz8Eu8ubxqqmsnK9stxx5Isstf5uaQMr7OZqFDiyYIv37f69X9uq/Vw9E3/spuPIy+NIromVJ99wAXaAPXeldFiX6RAsOaa6vMq5kAlgy4Oa3LlzB43QmHizlLvZbMKOLUSxMBwMgJx2lIPB8mZuSIelVoxHH3RUMkLhjJsoATvp6AsBs2z/QOarV69ClIKlW+7iieJBIkQYiPgQjeBhZM05J3ownU7D7kA8aFyW+zeQk2PPmMbjcdj9aDab2XQ6tX//938P73MgcjYY7N8pofUSITTbO+54PG5tqwuQMC5eDInRrtfrsMxMfwArokI8BFrXtd3e3trd3V2oi3dm1HVtv/zyi1VVZa9evQrtEhnabrdhwqEP7w4Gg9C/N2/e2G63s59//tnu7/c3stPp1FarVYi8JUliV1dXZmZBrsjn/fv3Ybl9NpvZbrezt2/fBlDZrx4O7erqKqTL8GJGorjYAw/A/vjjj7Zer0MkkwmW5m7zcsGffvrJ/vSnP4WoEb6SZZn98MMPtjus7JlZAG7sCZDmZpYomeY56/I+edvY9cePH0PEiAja5BAR1QfLIZjVahXy1pEtOsev9RiEALjxDpfZbBYmT7rjErpmInp/f98aAy8pXa/XoS+bzSakfkAM5KuT9gQgM3aiqeAV+IKcvO6xeUgObEM2ZhbwAJl53KJoGgP9oy10pn+J4ClGc76vC//U38DxrnxZ6Tiy48iOIzuO7DjyMjjyXHmWbeFRNh3X5Tq94+c87j4BosViEUDbzMJL17wx9Xq9AM4QFFEsgJ/VHhSOMQN45KLSd66DKHSZPkn2O+mYWdiVJ01T+/DhQzDu29tbe//+/V6YveOWliiOupEDeeOAObnjRAfY5agoirCNLHf0P/zwQ9hFhqV2HIc7a+TO9p6QbFmW9vDwYOv1Oiwxm1nY/tU7y6tXrwKoqQMkyXHXI30Ikd1vdEmcyRnPBZA3T14wEwK2mkUOuq1umqa2XC73y/qH80gVmc1mtt1u7eHhwTabjf3ud78Lu2JVVRV25kEub968CWQCMLEDEc7+8uVLu7q6so8fP9rHjx/t559/ttlsFlKC0D85zGxZy1I9bZuZvXz5MkSDAAPsGCJBpkRIqAd/QvcvDitdb9++tbIsQ9RoPp+H96f97ne/sx9++MHm83mILKOL5XIZHjx/+/ZtSCcAJOq6DikIkAokzWSMyQ72ohHy+/v7MFHBx66urkKO9Gw2CwRAqsr9/X2YOBDdK4rCJpOJvXv3LpDW3d2djcdju76+DlvAYpek07AKoGkJROrwBdKGWBngYXy2CWbChB+DZ2b7CScpT9iqvkcGO9MIINcTrdM0Kgr64TcAnMk5pIa/IXclGNJXwEoIh3rBQSb0mi6BD2KDtMHvGt3EP7vyZaXjyI4jO47sOLLjyMvgyHPlyTdcdIwlxabZb13p7xIpAIwuYZKTPRgMgmNnWWaLxSJcA2mQ3gAh4XQYA/ml/KZEROQJo93tdmF7TYB+vV6HO2mMGYUSxXn9+rX99ttvIb+VLUUhApaIIbymaQJp7nY7u76+tlevXlnTNDafz0N/9X0c+q6IqqpChAaQWCwW9vLlS/vpp5+srmt79+5d+M0/IEnure5ARfSHOquqCvKm/6SmECXbbv9/9t6s140sudpeyXlmcj7nSCqpym4DbTQM3xrwT7ONegH7T7z/xlffVRuw4X7bVa6uKpV0Rs5MzmSS3wX7CQbVbXXJpb4pZQKCjnTIHPaOiBW59orYG9M1EwAATILNcrk0wGKcGSeuw3z6LjYECFhBbANmJZ1Oq9lsKpVKqV6v63A46He/+51evHhhS+YA02g00vPnzzUYDBSGoQELwH08njTo9Xr9QqoThqG1ks3n8+p0OorjWJPJRIXCaXNFAMonQa1WS+Px2Gzt9vZWlUpFj4+PmkwmxqRhUxSx5nI5C9ip1GmjyHq9bvNAYGY8J5OJyuWyMXZxHFtgzmaz2mw26na7xrhyENAymYxtVooGG5/DH/gs7BF/40NIIYIgsOBO8e3NzY3u7u4sCZHODBMJFgGS9r/T6VSHw0Gz2cwkPyR5+F0YhhcAwbN4xouxlWRBkM/zTL6OhPHd7XYaDocmywLQeWZ8Ab9gg1EYUnTvqVTKWHs/hqxEkFASJyQZYOPzXDMIAi0WC5O4kGgSF7ElxtWzcDw/8+tjEd/nc5wDG+Fn5kyS+SiAlBwffiQYmWBkgpEJRiYY+Wlg5J9VUijJ3holWQEbb9LchH/rZEJ5u8S4YVTy+bzm87kFGRgF3y7TD5rXa/oHZ7D5u1Kp2Bs37Bg/AxS8mR+PR5MjjEYjpdPnzisY/Ww2M+0pS7gAgSRbnvbANR6P9fT0pGfPnlkQ4DnjOFar1TJ9qzc2AHS5XBqwcL10Oq3r62u9fv1a9Xpd9Xrd2JrxeGxzUS6XDfwKhYIFeMaAQME8NptNAyVAHH19JpNRvV63+/D7ePi5IHAQeGezmY0hQESQRBO82WwuJAQADcEDpoMAP5vNDARwxMVioaurKwussH6r1UqpVEqtVstkNTwPy/qwIYC/l7uwHI9dzedzYyVJPDKZc1tjlsMJNLVazVqvem01YweziX1LZ10y44mWm2SAIun5fK4wDJXL5TQajSTJgly9Xtd4PDZ7QbJQLBY1nU4VhqEFR8+qEsiKxaKy2VOXImofeEZJur6+VhRFxrLCgMPcITNBugCT2Gg0TBKFfSAVgV2rVCqKokiLxUL1et2KfIkL3C8gQGDlGSaTid3ner229rL41Xw+N+DE3ulgRLLL+fE9ZGK1Wk1xHOvx8dFswBdD+9joWTviHhII/BuGMAxDY+kJ+gAw88p3iYOAALYM6JPU+gOmzvu3Z949C4gP8r3k+PAjwcgEIxOMTDAywcifP0Z68uzd46O8cDGAGI0HZwI5N8zDEijRbPIZjPl4PNoSK+yHX0L27CBOxSQXCgX7DM4gyd6EPait1+sLrSmDztKwdG6py7/R1RP82FgunU6bQfGWzdsvy/Gwaen0Sf8O80PwhDnyy78813a7tSVprkOg5jOVSkXj8dgCcqvV0mq1svvBiHibx2h8UEKugaOiO8eBfd0Ajgd7Kp2ZAGQCh8NBi8VC5XLZlnhht2gjTOAhWUAT7JeTMXi+U61WTbKyXq+tC9Bms9F4PFa9XrcaAekUsGFnOR/sYqVSMcDCxgCHarVqbJB0lghxbqQ9z54902g0MmZuv9+r0WjY92CKASrmj/nAvgBEgrkkk0CwV8bq9w1BODc23+/3zVc8i42NIzeo1Wo2BswnjCkFuSQ4jD9z420+l8upWCxqNBppOp0qm80qiiKzN+aRuSP4zufzUwev3/uhlxrBlkqnpGYymRigwLLxfPv9/g+YO37mvHyWhAdfqtVqJjvBTgEmzkO88Cwzn4P5BhioveE+YNCwJy8jIukmhvEcPugz1iRR2Wz2YjXByyt4VuIUvoodS+dkyMdh4su798B1OS8Jgh/b5PjxR4KRCUYmGJlgZIKRP3+M/LOvcDEoMFkwZwycf7OG6fBBO5vN2nJpuVzWcDi09pPT6VTlcvnigaWztp3JIKBlMhm12209PDxcGCjsCRPD75gQb/C8KbNk6N+aYbzK5fLFbtwsffvlSNgAjIBWt6lU6iL41Wo1m/xyuWwFs5VK5aI7C0XA6XRajUZDh8O5qxUGM5lMLAijBQe8JVkw9V1seG6WaxkPGCIMGPYPVjKKIu33p13SmQ8KbfkuwYWgArABRgS1xWJhYMm8Mn60UmXJnWXgRqNh2u3ZbGbX4l6QBgBiOMl4PDapAbp79tNgrqMoUrlcNtvD+QEg7ocAO5lMDFA8O/f09KRarWaaaBjD2WxmwRvGkLFHB49fAXiNRsPsKggCKzomOMxmM0VRpF6vZ+MnnZfl8U0CZ7PZNIYHxpJgA7tO+99UKmUsLfbmtfjYS61WMzui+Jtzorkm8cHukBJQGA4wMmcE8VarZb4Js+RZKM9S8UwkdLDIzKN/gYDx47m5X4CBayDlItnyyRK2iQ35+wDICfz4CZIjmE5+9isb+DcrAB7gmHd82cspOLy+3Y859uVlKHyGccHvuQ7JaHJ8+JFgZIKRCUYmGJlg5M8fI9+3wpX6H3/zAQfMB0HTg4iXIKBB90zSfD6/YH1ggxiITObcptIzTv6tPQhO+yNMp1NjWWBxJNkbP4ONA3sma7Va2bIpg+kZQQYag1yv13r27JkFFO4RIyOoSmftLMv2FFxy4ISetfKsWrvdNr23X+71+mbuwzOOOArLz9vt1kCSzjMYMu1x0StjxB6QcBicKJs9FWIvFgvrwFMul634FyCgw1Ucn3TisIF0AyLIZTKnYnHOsd1urbtRNpu15/DsrXSWttB5yjvhfD63olTOy3foEITNYp/j8ViSjJVDYoNMAJve7XaaTqe2eeHXX39twJDL5QygkFV4QOB+9/u9de/h3zDQBEi/s/p2u1UYhqrX68YGe0ZmvV5btyEABz0/kodUKqVms6nnz58b8BCoSC4YI8+qw5pJMh02dlapVNRqtVQsFhWGoVqtlur1utky18Au+H+fyHh/5B6CIFCj0bCWupyLOAEYkhgSIDkPtQDFYlG1Ws0ShHq9bjZHcCdG4cskqcQC7ANpC3GHcfAvJJ6Z4/mYIwAFGyqXyxdJFPfB8xIfPZuOj+OTsJQ+bvAs+JcHRy/F8mweenbG08vH+G5yfPiRYGSCkQlGJhiZYOTPHyPfd3yUphkwMdLlGyIPirMSgDKZjGmJfcDhISl6RBfLg6RSKZXL5YtN/aRzYVu5XLaOOLAnsCK0b53NZgYo3D9vpn/s7TmOY+tglE6fusGgM8eAYFAIhn7ZExYLUMQw9vu9SSx4ZgwIg/Ng6o0nDMOL5f44Pml4YX0YV9/WliLtdDptBdg4A0EijmNjTcfjsXK5nBV78jueD503OnhYMZyCfb88yGLUzOV6vVaxWDQWFF0vDgtANZvNCwcksB0OB9MXY1dBEBggE0QAvPl8bppfWEjGFbkKbJlPWPg3SQGBB2lPoVDQZ599prdv35qUBvumVsF3DEK/jlwAVoikxgcpH6z8c/qEBBv3GwNWq1WzH2okPGu8Xq/1u9/9zsAadhvfI7C9G1jS6bTtj4GtFYtFNRoNS0rCMFSlUtF0Or1gYX2Cw1xxDS/Jwd6ps6ATGb7KGHnmkDGhhoVAzT0CkofDaXNOxh4mmSJj6gVgwGBOfcvtIAiM1cUujseTfKZarf7BObBp/s+/tBAjuTeCN37Gs/F7xgFmjXMBJNg08YrvwjwT3/xqBckyAOSB3rOknh1Njh9/JBiZYGSCkQlGJhj5aWDk+46PIilkUgnADIBn3PgMn/OD65k1lhZ5oyYoMyB+aZlgHsfn7kvb7XmHdq5FtyLewgnYsCIMPgHTX5tz47xM0HK51MPDg3q93gVY4hyFQsGCIvcAI0UAXywW1uXncDi15oyi6ALMstmsXZ/AQNtSHIixRItOkOEcjCkO47vxrNdrdTodYxuRdDC+GHcmk9HV7zcZZg4IjjB+sK4UUHNfMC2MN22LG42GwjC0+ywUCrZkDsD75V8KVwFvv3xNEEV3TJCo1+uqVComVwCEfSKTSqUMXPr9vtmXTzjYa4QgyPK27xhF16x2u61vv/3WQECSsaewV1zDs68EwP1+f9HVC438eDy2Ll8AjWfYYKVIgLytE9BTqdMGjqnUSV5xc3Ojer2u6XRqTCCBcb/fW4D3HaM8eAEu+/3e2jH3ej1LgnhWWF/aFdM6l7kiUMM4cc4wDFWr1azAmfjCmNGBikDK7/Fr6iEIiiRS2+3W/IfjXdkTsi8kNcwPLDX245MNmGbOxUoBIIRO3dsrMcUnzf6ZeG7/t3/x4WcSS+adsSAWwHpzLz7p90BxOBwuQMcn1skK1//uSDAywcgEIxOMTDDy54+R7zs+iqSQm/MA4t+sYVr4f/87bp6bhi1hMP0SI+fACPwE+EFhif/d6/I9WAR+hhFh0pncarWqVCplQQQ5wW63s6XgbDZrTohhUazLvfFMdD1iuRtw5Pf7/WkPCZ4nn8/bcj4gwDI7zkSggNGkixJBG+cE2DA6DJl7QKoB+NVqNdt88OnpyX7f7/dtXGEbJdm+HTj4fr83FoMAwrzWajVb6vdspXTSXjM23qkmk4mm0+mFxINrxPG5vSdzwrPASHA9AgWOjwQD5yZhgP1C3+4TJja+5N73+9OGjvhTmNsAACAASURBVCQEkowN4/ez2UzH41m77IGG5IGf+RzJB2PqGbXtdmsyFtjSbrdrn/HJDe1xCYq0qW02m8aMUROBTxCg2JMFJpw/BGFAdDQaWZHvdrtVFEVWME1y4pfrU6mTFIHxRz5TqVQs2eAzkqzzF+f3cYdAh9yE//NMFj5FEgnzS72E36eEJMLLHOI4NruN49gkOAAbc8m88v+cG9/Ab4lf+BtzhQ4d/+I7BHR+9lIx/iZW+rjM83OPjBl/sDn+jV+QXDNmnqVMjg8/EoxMMDLByAQjE4z8+WPk+46PssLFTfmlPAaCIOFZGAI8AZZBknTRoQhw4u3eDxLsCg/MGzWDBZhw3SiKzFCZLO6NJWLewn1w47yAjn9DZ6mcCZFk90xAOxwOtg8BBZwsTxM4PIPJs3ANZCQYng+cBCACyLv7HTBWAAa68t1uZ4GFQNJoNLRarayjE0HwcDiY5IAuOz7Io5uHnfFADFvhx5nAzv8R4JFAFAoFawWLZCGbzWoymWg+n6ter6tcLmu5XFrxryQNBgNjbH2nHkBHkrULppUvLCfyHc4lyZbkkQMQpCSZLIFC2SAIrHNVsVhUsVi05MQ/I7IYdOvMGcEllUpZIN1utxdJznK5VKVSseSCufLzyR4sBINs9lQUvVqtVK1WNZlMtN1udXV1ZZ2YlsulZrOZ2Qgbf2I3sH4k2z758gwkQIQvLZdLNRoNpdPn7mzURpDseLDw7BA/+5a7Xo4Ew+vlCtwDY5jJZNTv9y/YL5h+78vYAckWYEA84BkZD3wde6WWA7AkXsH2+8DNuWDSABrPlHFv+A02B7uL3/tEmevyOWIQ9/wuE+5ZQh+T+T4AAvNPcuEBKDk+7EgwMsHIBCMTjEww8uePke87fvILl3979EyaH3h+B8jw1uuX7oLgrMn1elYcEkPgWjyoZ5BgY9Bro72lgE+Stc6UZEvVBBOuTRBEs8yb/Ww2u+imgmPzVkwAx7h5Y2dZ3S9f4gxopWEOfatPgjNv1el02t7uCT6MBX+urq4uAjHGhXH6oAnLls1mDSQwKs9ywGANBgNji2CQvOYYJ4LxQMMbRZFyuZzdGxptNnH0bAXngr2EKaETF915ADpADB09tQTL5dKeIZ1Om3yFuZLOnYn8viksl6NZRktNAGR+YV+wFcbRM2eMBTZAQJRkbA2Axve4FgDI87KTPEEBe4d1ZG5arZbCMLT5lE7gR8F5On3eY4Rn8npxfua7u93O2E3G2gdi5hHbWq/XNm+LxULSua6CczHWsLfYnGfPqUMh6FII7wMyCRnMNzYD24if+/hBfPLzgH8QXJknfMuzq+/W4nAvnq3jZ28fxDa+i794f/f3wHORlDMOgD5xgRjjf8Y2GHvm3AOQvxfiM78jTvnE1gNScnzYkWBkgpEJRiYYmWDkp4GR73vp+smSQi7CQ0uyn3kI/9bo32IJiBRwHg4H26Xcd6DxQOIDtmeGgiCwoleW2+lUBJMGm4bEoVqtWgErS7sEIpYxpXOxHZvM+Tdtrsn/4YQYAOwXziTJlrkZF7rXlEolKyAluHptLYyG/y4BC+ANgsB02/wehiOfzxvbEASBBV+KqP0yvZ9PHBXgY25gLQAR7glGB/20149TNAm7QZclAjHPk8lkVKlU7FrValW9Xs+cqVqtGrMZx7HCMLSxZ28RwAxH495xCsbFd99Jpc6tVwm4ki66VO12p85LnhmESYOZyWQyptNPpVIXBaT8fTweLZhJJ0Bhk0hsn8/D6vnrxnGsZrNp2vHD4WBF4Ox9Uq/XbRd6xp55aTaburq6uujM5MedPWeCILANNmGAYYi4b8ZuOBwqlTp1eGJ++JtxhHGHBUPOwfWr1aqazab2+721aCbWAHQE13Q6fSG3IAAyZsw1gRZp0rtgiI9zHZ90UBPi45dPOgBlQAP7LRaLF9f1HbbwQeILz4K9M64+WfdSs8PhYPGT8WScvdzMPytjzPl9EoovMHbvvgB42VpyfNiRYGSCkQlGJhiZYOSngZHvxYI/gRV/8vATIMke3A8GD+B/5mEIsD7Is1QJe8QDYkg4KJPKdQnmLNXX63VdX1+rUChcBH2uVyqV1Gq1lM/njd1ht26vNw2CQNPp1AaZt3ucAUPy7EOxWLTORYAADkUglaQwDO28LItjAEwwRuqZBL8MzjNVKhUNBgPbJBI2gTEsFArqdrsG5HwP7TKGCxMjnWQDMEHszTEajfT4+Kgoiiww4SwY9n5/2gvk7du3iqJIz549M0YSltWPG2DPkrVnUtLptLrdroIg0Gq1Ui6XswDIvGUyGZtj7MgHasZROrMVBCfugcAAwHrmlvHCUf3yvnc+EgzuBxtnw0PGHHYVAMfZGR9JxoJJsvPtdrsL9pNnYMNSispp6RrHpwJlzuHnhjmHIcS/YMBqtZpdg9bBSFpyuZzq9bolSlwLKQjjQoADOPEB33ENMCGxoFMbgAhbGsexBWgfM7x0hXnx7Ge1WjVGlWQslUpZLQJxiaCO32PLgBJ1Hp7twr58QTOHT16obcEmve3hP+8mowASia4HfQ+wMPp0rcOGYNm5V58cA0TYIcmJB0xJJiHjnvzzJcePOxKMTDAywcgEIxOM/DQw8n2k5EdpC08QYiJZTvUshXTuohQEZ80phsMg8kDvLnXS+hYjw2D4Huf1S4wMGsvpb9++VbPZvAjuFB4yWN4BCNbz+dwkCvl83tpl+p3QpXPHo2z2tOFgEJx2Q18ul2bEmUzmolAV5mU8Hpthvbt8GwSBgRa6cgIb48850+m0Hh8fdTwe1Wg0VCgUbMkUtszv98EbO9p9ZAYUXtIidT6fazQaKQiCi31hVquVaWwJRAQsgnS73bYAUK1WtdlsNBwO1W63jTk8HE6tbWHH0FZzj8ggvIyExIN7xOYIpt7ZOQByxpO5hrU7Ho/Gfi6XSxWLRc1mMzsX7BhFwhTsInNgeTkITnvDEPx3u52NAYwg7DLgDYNTKpW0Wq1Uq9VMxrPZbCzxWCwWxlQ9PDyY3TFvs9nMWLB+v29MmA9iyBFInEiCYAZ5Dh9cYQi5H8aJwESQ8+dg004vPwAwOB9BfDqdWqvcfD6vFy9eaLvdXtSsbLdbYyCxf38P2B1MGf60XC4tLgE8JGbMu2fJABgSxv1+b/vL4Hven71MBBv3NQbIInxHKII8DCbzQJctEh7P5uHvJKmAiSRLwBkLZDEkQvzh+YiTHpw8E4tdE5P9akly/PgjwcgEIxOMTDAywchPAyPfd3yUphkMLoPPzfIG6JdV+R1ggFFxozwgb+rL5dKWer0TYlgEUIIiQYriUwwMx+cNnkkMgsAKiVkeLxaLWiwWtgzLWzmAws/7/d66FLVaLZsQCogXi4VNAgwT0gGKDtHwRlFk98r3YQnZP8SzRAAgy9f5fF739/fqdrvqdDrGzDDuLH0DJjgQzATGjiMWCgXNZjNJJ1bw6enJpCUAe71elyQrfHyX8YDNaTabdp8Ez0qloslkouPxqGfPnhmb6bXegAdjzrXZvNPbFOdl3EhgYFu4R8adOUGbDzjh5JlMRm/fvlWn01EYhgY6JA5+6ZlAScckEpjZbGbBxxfuUtBMYOczFM5yPp57vV6bffogxsac1WrVkpXdbqf5fK7b21u9fPnSggI2QfEvCcZkMrGky2v6YbfxTR/IpHMROm2W1+u1qtWqHh4ejO2r1+sWpPkO98O/5/O5pBPLRhIBEHlf4t+woQBjKnUuMGbe2A8kjmM7PwGcJCCKIrN5fA8wAUCILX4PElpEPz09qVQqaT6fq1gsWutefLXf72swGFiCBADjw8QAgjfzwc/vsrwk6tgFzDfzQowjrnqWmWviV4wF8VGSzRFxmITo3ZWY5PjfHQlGJhiZYGSCkQlGftoY+VHawvu3fAaHhwBoCPYYBW+9GIK/UQIEG8exVOrPi1F5Fgvne3x8VD6fN33rfD5XrVbT1dWVfZeATEcfjJMl8jAMbX8DDBOAyuVOnWZubm7UbDYtkLJ07CcF9owlaAKLX7Ld7/e2FJrP522HdJ4dGcF+v78IBrAFBHsMkKVugiX3BQgsFgszqCiKLLCiEz8cDlawipHPZjO1222l02n1ej0VCgVdXV3p1atXxt7567VaLVWrVQvEBFnYzHQ6feF0GDS2Ip2Z3PF4bCBPoIUdOR6Pevnype0ez3mWy6XVE/glZt9KlXk9Hs8SAHTOSBA8g4KEJAhOWn1sF5BFZoH8A0YReYv3lyAIrCMUwRGb9kCHrcIMsiw/Ho8vNiYkqEqyPVWQA0RRpGKxqMFgYAGbzwOAm83GpAUE9Nlspu12a8zk4+OjptOpHh4ebH4Gg4Hu7+9Nm4/WG9CgKN/LCLyEhZoMAiPdv94FY34Xx7EVOCODQNpEkMafCoWCtdRtt9v2XWyE+WPsCfDlctl2tmdO+RmfhO1ljnq9niqVinq9nsUuQIEEiWSbg6SZ5yMhhx33qwqAJbZI7Dgej1bvQWLMHOAns9nMGH6ux7h5bby3JR/PPfPK/yfHhx0JRiYYmWBkgpEJRv78MfJ9L10fpUshLNB+f96MjuVWmAEmkDdGlmZ5UybQwJDQtQeD5I0VQ2SAuRaMBkupLDNjaIAcbBtvufv9ufg4k8nYzt2Ax2QysUllZ/lsNmtFvJPJRNVq1Sbs3f1FKKT0S9TpdNq6MgGAvJnDLAJskuz5s9msxuOxPQMtUDGUUqlkAZJAAUNEgOKZ8/m8nZv9Epiv6+trk1DgFC9evFClUtFvf/tb1et1kwK8W/gJ43k4HNRoNCzw4eTL5dLaoqbTaT179szkIjAOuVxO2+3W9N2VSsW0urPZTI1Gw5hFnp0lYQL1fr+3TRYBFD+WPtmhuJj5ms1mFhhhteI4VqPRMPkD48o5cTa/9Mz400YWrTGsUzqdNl05wQPAINh5CcrhcFC329VsNlMURWZngIF0TuyKxaJGo5EB2mQyMb2335keTT/L/PzNfKRSKWPQmOcoijSdTvXy5UutVitjwQ6H0w71JD4EWp8AYvc8H4GUJAQbBhBgpguFyw0/+Qzf8/UYvigXUOO8JDXEFeyWeWBFgIQxiiJjNbPZrMUHxni32+np6UkPDw/G4NF+mcQJSRS2QYIGyLI6IcnqI0gestms1U2QUCPfACwKhcLF5paeaeVaJLyeNWR14Hg8dwLDBolZfI6DWJgcP/5IMDLByAQjE4zEDxOM/Hlj5PuOjyIpRGZAwOYNzy+NcuO8QfJwBACYBxyLB4QNWSwWNuEURPLgOCDBmOXQYrGoh4cHlUolaxnKxBHI4/hUhMnSICwczGG1WrXl0uvrawssm81GT09PiqJIvV5P+XzeHDOTOXd1wXgAqu12q6enJ4VhqGw2q+vra2PeYIIwVMZlNBrZOTC05XKpZrNpYxSGoQaDgQE7zueXS3k7Z/md52XTxuvra2sdG4ahisWiXr9+rVwup5ubG41GIyu2huGEPfTMHY4SBIHJIqTzLuUEGZiEMAyNraFImxaujUbDwJAl+d1up0ajofF4rHa7bf9PO+N2u21tf4/Ho0kUcC4YKYIBwSedTqter1sCxDxvt6dNCmFECbKwkARKL3VAr8wzI29oNpsXCRGyBGwcx0VKUCgUTBeNXwDU2Bjyl+12q8FgYDKdzWZjLCoSgvl8fsHi5PN5C9K73c7OiZ4b2yGYZTIZhWGodPrc1hdwBEzxJ0ADGyRQwiJ7wEE3nk6n1Wq1dHt7qx9++EGvXr0yKQU6/3z+tN9Np9PRX/7lXxq4Pj092XP5cUT/TbEy15dkhdrYp68focU1Y0kiMBqNLCFLpVJqNBoajUamuweQ2XAUJpxkjsJqD4wkECTTBHIK/7lfQAY/Y28bYt98PjcbBKxI7rgHZFuAG/GGn4nlzLefQ88+JsePPxKMTDAywcgEIxOM/PljJBLNP3akv/zyy58EJP/8z//8JZ2G/CR6/aNfFvRvlDByXn/qAztvmbBdnBtNtQ9IJAMMUC6XU6VSsW44o9HIgh5sAcvavp2pJPu/3W6n6+trPTw8mK67WCyq2WyaY5bLZTUaDVtip5MQQSuKIpMv7Pd7W+I+Hk87w6fTZ219Pp9XpVJRu922pVi08xgDAZBroLcFQFnSRTvO79BzB0FgAW69XlvR62KxMBaV8w6HQ63Xa61WKzUaDdMy4zTIT3zLYvS5MAV0tCqXy8b+EBCQlBDokXVst1vbNA+nJYAtFgsrRpXO3WRev36tVCqlyWRiQXw4HFpwqNfrqlQqFvzv7+/NXgqFgjkKRbcvX75UEJwKbgF2NpH0hcqeMeZnWBs0w7BfPC/L2rDCnskBMKTTfhawje12W/l8Xo+Pj7ZUf319bQkBUhyvq4ZN8nIPQBTGEr0/40DBNEwb/sR38R/85O7uTu12W4VCQff390qlTkXJBDVYSQI1YOUZR0AVvyBZ2Gw2enx8vAia7AeTTqf16tUrYyZhfJFXIEPabDaq1+vW3po58+wff1PgDJMmycYIVjQIArveer22REiSyWk880v8QzPuX1zwfX5mNQLJDHKacrmsZrP5B/vEcD8kQshgqFVAdsGKAEwvz0IcxkZI/D0DLulCjpJKpRRF0f2XX375f38ScHxCR4KRCUYmGJlgZIKRnwZGrlYr/eM//uP/+WNY8FG6FHIh6VyA6QO716VjQL6zEcuidCHK5XK2RAqLwFvx4XAwNo0lfhzKazi9Fhy9KcYxHo//KNNCsSda8bdv36rdbqvX6+m7777TaDRSq9WyYALbFUWRDoeDJpOJyQcoviRQADS/+c1vbD8RQK7RaOhwOGg+n+vZs2cX58KpCZCpVMpa6fJWPZ1ODTxxpOl0aoDDmGB0MFpIBcrlsl6+fGmBbLFY6L/+678umKpsNqvhcGiAyvwhhWFJ2LOgMIitVssSC87FvUnSbDYz50Qnz7I2TAJL8jhAv983cCqVSrq5ubHAvV6vVSwWjREhwERRZMvvy+VS8/ncbPV4PBoLQ+tQOhS12227D5hA5gWAYjx4fsAhDEMLIARngMkzKX5JGufHbrhetVrV4XCqA7i9vdWbN2+sILdYLGo+n5tkgQQCUGMuAYsoivSLX/xCr1+/tsRtNpupUqlYgPVjUygUrJNTHMcmp4AZv76+Vq/X05s3by4SQRjw+/t7k0d5mRNzQlcwpAHD4dAKyUul0sXqSqlUUqfTURAE+td//VcVCgW12227VxIofMQztdjpdrs14CfZZQ4I5iStzDnP02q1FASBxuOxnp6eDOixYVpBI2GiVTTSF+IWwZo55/m8pIoEMwxDY/2YVxLyVOpcw4OP0CkMoAa0SGDelYl5lpBzIa0gtn+IdCI5zkeCkQlGMu8JRiYYmWDkzxsj36cC+SgrXJ4l4CbefRskCAAq/B6GhmAPM8X5GBAGie+zbIzkAvaHINbr9Qy4CC4sUTKAHrBSqZQtvUvnTfzQ9XI/LK0Ph0NVKhULaGjWMVRJts8DXZfy+byxEM1m01ijfD5vS9xhGNrzsaSJA3OgneVzjBHGl06fim13u51ubm7sjZ/7Z1z4/n6/183NjT7//HPTJqOBl07sGswB1240GhdMAGPAHGKIOKV3cjZgpCZhOp0qjmMr+KQjDsEaTTa1D/yOIAn7iNaXYCXJpDQ8J+OFU/uf+eOX+SnOZg5xYjrreD35dDo1h2VO6OSFzEaS2Q3/pjMWDBLL8AA3Rds4Mj7GUj6MEmDQbreVSp3qAjKZjL777jtLkKIoMoYS9gi2LI5jY009WGYypw0uKe6u1WrGppfLZbVaLUskdrudyuWy6vW6sW1ILZhz9sB5N7nBB8rl8kVihwyGxIfvfPXVVwqCQGEYWnAkCel0OsaIZzIZPT4+GiuZy+WsUxT2NB6PdTgcDLg5D6w0bNzhcLB9T9brtTGUYRjq/v7exoWgjxQCQMTG8RN8Ex27f6lBwuBlHJPJ5AJsqDPArj0Iwohms1m7X+IAgEkMelfCQdJHvOWZfp9sJitcH3AkGJlgZIKRCUYmGPlpYORyudQ//dM//XlWuDhYumMwYHBYpvMgwL9Zeo/j847rOBJv2wwEb/YUIMJuYFiwWwwyeuZU6rSzd7lctolmeROWje+gO2WJ0gd2llqZ9F/96lemYUdDvNlsbGnVL11yP9K5kHk2mxmLh+a8UqloOBzq+vra3vQlGRNEsIEJlE5L6v1+3/YIgUEgwKHNLZfL9lbOcioBDjZrNBqZjl6SdQ3CITAqxgUApyCW5WAcL45PbYuHw6G1BKaoEkaDsWWpHGYPNmM0GknSRTJSLpd1f39vz5vJZEzu0m63NZ/PrZ0u7NF2u7Vl7larpfl8bkW1rVbrQr5Sr9dtuX6xWKjf7yuOY9ukbz6fK5vNqlqtajgcGnjDfEpSrVbTfD43XXOj0VAul9Pj46OkU0IAg0sHqlarZYEX9utwOBhLMxwObaxIHLzU5OnpyZhbJBbD4VDFYlGTyUR/8Rd/ofl8rn6/r+vraw2HQ7Or6XRq3Zey2ax1TgqC82aq+Gs2mzVWCgYRGcLnn3+u29tb9Xo9mz+kJF6e42VSBGiug2xjNpuZVIAWtUhmfvjhB223W7169crsE7kEcoX5fK67uzvT3ufzeUVRZLUqSB0obMcvvKSBwI4MJIoi83XizmKx0GAwUKPR0GKxuJAr+OJnwBRQwv7ZZ4eECBujVkSSJbXU5gRBYKsckux+SGiZL+RZ+BAJYqlUssJiEhLmBIkLySLXY2yS4393JBiZYGSCkQlGJhj588ZIyK8/igEfY4WLwlC/VM6yqNdFM1EEMN4S/ZKeZ1UAFB4yjmN7eK//9JNDcGAg/AZ3BBaCLVpsnBWNNsFhuVxaS07ebll+3u12+vzzz401I4iypM1n5/O5bUiI3ON4PJoWe7PZqFarabfbaTQaqVarGXNIpyLGiSVZJAW73Wl39sPhYMEUBgpmAq2wdwoAgfEFvLPZrG3CR0DL5/PqdDoWPNiskmtgsBxor3O5nLUvJejB7ADCsKgwYp6Ngr3M5XKKosi+D8tFcM1ms2o2mwaijUZDYRhqvV7bc7daLVUqFQuEtVpNkqzgdjqd6urqypKXarVqkg0Ay2uHYaoAPQIhPtDpdDQajey5YVm4LswcGvHpdKr9fq9KpWIgR+vdw+FU/Alo4x+M2fX19YU2netJskSLACudQG4wGCiKIrVaLS2XS6s7APToZhTHsUk5sLnZbHaxYebj46OGw6Fevnx5IQcplUp6fHw0TTrJVbFYvGD7AOaXL1+q2+1qOBwaY7rdbnVzc6PJZGLP9PbtWwuqjUbDWDI2WJVkDPtut7OEhucgmHrwgH2D8YdZJB5lMhkDQzq7kXRIJ5aNxBVZTjabtSBOzEFGAqPmVxdgYwEymEbqZKbTqcbjscU9AJnYyXOn06d9Y0jwPKuMbxKDPHvpZW/EHC9587Kj6XSarHB9wJFgZIKRCUYmGJlg5KeBkdPp9M9bw+UHAFDmJgiETIj/HkbKQYDK5/OazWYW4DzQ+M/hwLBQMBm73c4MaLfbmZMSKGEQPRMBwzafz+13pVLJmEVYjXQ6bYWKkvTixQt1u13VajVFUaSHhwftdjuFYWiMEAH75uZGNzc3evv2rckqgiCw9q1cYzKZXOiT0c963SwBGVYCY0ilUnZtus7wZg+gE7TZP2U8HqvT6ZgTA2KHw0GtVkvtdlv9fl/dbtf09pyL9p/IEcIwNFaKhIAxJ+h5kO73+8Z8HQ4H0yMDwOl02uYSaQpJC4zaarUyloiWuoXCaY+PfD6vXq+n2WxmNQjL5dK6VW23W2M4aHHKNQh42WzW2EvmA3BqNBrK5/PGnJFM+GJK5pEi8t1up5cvX9qyfKVSUb/fVy6XU7fb1d3dndkiwBxFkdk+MiE05xS77nY7ey4SFJbcveTjl7/8pb7//ntlMhlrzcoczOdzY5aRA5DAeA0zevZms2lJGq1wqQHh2bE9pBeSzCZqtZry+bz5Z6/XM3vqdDqqVqt6fHy0zk0AHMFeOreVZt7oYrbf79Xtdm2cASAv28JvsL1cLndRO0Fck2SaeJKnxWJhdSFPT08qFAq6vr421pxkya8mSDKZinRucUucPB6PFzKN3W5nshNf4+ClaMRF33UslUpZkkPySv0CiQEgwfPzTNgLTCkgtd/vreg5OX78kWBkgpEJRiYYmWDkp4GRXtr87vGTN1WBycHA0QzDqAEOvL2n02lj43j75C0yl8sZUNTrdXv75GG8Fp1zERhhUtCAr9drC1j8fxiGJkVApw7gefYPEGMJGMDjfsrlsnVJGg6HWiwWtmTMjuzcB4aZyZzatyIHYKwAmuVyqVarZdeRZPp53up5Tt9JCADCaClW7Pf7ms1mxoARfDkmk4kmk4ktZVMoi74dVqRarVqQZOxIBJhDHAfZB+OJJAA9M/9mmZnuUSytMwc+mEsyrSxjQ2calps9cxxFkTk/OnrYDpjR6XSqKIpsPuv1uvr9vjKZjHWVQjPMH+yTwEUSQqvTZrNpQAUjhaMi31gsFhZc3759q9FoZAWprVbLGCqYXaQSBPlqtaparaZU6lSUmkqldHt7q9lsZq1bYTYBFjYBhUUjKev1ehd1H9JJCjMcDg3wAaxMJqPpdKogCHR9fW0+iL8cDgeNx2NNp1NLcmCAmH8Ca6VSseckaZBkTBWsFLr0w+Gg6+trY9qRsVSrVUkyuRPxBT+J49jAj32Ams2mxRFaaWezWXuWOI7NZ5BSIY96lyHHfpFHYF/D4VClUsliC3GEInrPmPFMu92p61ytVlMYhhYzCPD4Nv8HQGy3W81mM41GIwVBYJ3hiClslsk1fUE340usTqVSxrbzvMRWxpV4kxwfdiQYmWBkgpEJRiYY+Wlg5PskhR9lF0uCOowdxoGx8WYKkDDQ/O13nMZ5YOb8hDMwvFmiy2XpX5ItgXrAYJkegPfW7gAAIABJREFUw6Arjm9zy3Igb9oYJW/rLMkSfOv1usrlsvb7vUkWwjA0J9/v96b3haVcLpe6v7837TIyhE6nY8vhtOjFAAASgiyaVIoHYbhYRpdkjCHyFKQh0vntHE0wS9jIL3yQhRVAEw4jx9jDCvCMu93ONMiMP2NI/QHSABwxiiLNZjO7J6QAMIQsPz89PZkTYjPMNyweAXg2m1n3K8CQOfYbRbIUzvh64MY2SQCkc9ca5p3xnk6nmk6nNvfokpH6EBS5ZyQiu91O4/HYujFR5A2DFMex6aklWWco5D+tVssYR9hFwJIia8DS7wkDa+f9MJVKGYtHUkTAwueQKwEe2WxW3W7XdnGvVqt68+aNisWiyS5Wq5X5DYwb40aSud+fWkGPRiPNZjNtt1s1m01j/ACmzWZj4IOtSecCaQ8myEy8xCYMQ4VhqFwuZ7IIur/1+31tNpuL5BgWEHAj+Ws2mzanJDpovzOZjAG4TxZhYZEr4B/UG2DXfjWEYl0COKCH7ZGU0aAA5pSEb7PZWCcoWMg4js3XiasAB2O52WzsM8RjfN6vwCTHjz8SjEwwMsHIBCMTjPz5Y+Sf/YULA8VpCXI8GEECpsw/AJMO2NABZrlcWuADjHjT5G3TM2G8rRKAKADl/5AlfPvttxbkmRRkCAwkg8cEAiQUKLJ8WSye9huhew3G6JfIU6mUtbTNZrN6eHiw1q7pdFrdblftdtvYL54JzTuOQuCDzYTphNmEgYO9yufzxnD4OfBMFKDB2/p0OtXt7a0Fikqlov3+vA8D88V48Yw4MveAwSMPYH5Y+iUYPDw8aLlc6u7uzgKwvzcKLzFqnJD7ZkmYe9rtdhb8YXQAK5aRr66ujEXxoCjJAI+/e72eqtWq6vW6XZekZbVamQa40+mYpAG9OvYDc5jNnro31et11Wo1GzdYSrrrEADYWwQWFRZ5Pp9ruVzqq6++0uFw0NXVlclCfLBnbmjFm81mjXXGRznf559/bteN45OeH+01DI+fZ/yV5AMb3e/31m4XFp/uXSy94wf8jgQF/wX8sS/mnDFbLBYW5AEOxtnLqQBJ7IBOWNI5AZlMJnrz5o3Vq6zXa9OtU1xMMkmBNKsCyDIqlYo1EiBpggmEccPWiDsAmLcpalLwEc/Ke6Y0CAKTOjDPrFAg7WL/GnyXOEIzBUkGLIyXT/6wOeIGtkximBwffiQYmWBkgpEJRiYY+fPHyPcdH0VSGASBter0bAgBkyU+3iLt4qlz4TD6XbqCwKayfMobP8YFW8SgMhhMuH/jxLiy2dM+GezJwLImWm4YK4ycweReuXcYGkkXjBb3VCwWbTxo0RkEgRVcokePoshap/JZxgQduQ+gABjXYnmZ+/RGTpEiAdqzDBiLZ6no5LRerzWbzbTf71Wv123JlAAOGwNwIrHA8TBU2AXYTYI+MgO0zEhFcCSCIAydB7PD4dStZzQaGfCyNA6z6dv0Hg4HK47M5/NWTIl0AOegrqHf79tGkAAyn0XCMJlMTG4BAwLw0iK50+noxYsXiuPYpDuHw0G9Xk+lUsnGjvFgvGFiANJyuWzBqFQqWSIQx7FGo5G++uore3YYrna7rSAIDHQIlKVSSff395JkjO90OrXNF7F39nuBsfKSChg67AkQR3YTRZHq9brtteKDYaFw2qMEtgrfRi6A7QKcJFz7/d602fg7zwxQ+ETVt6GmVgEpA0w+AN5ut41tg20EZBln7hMfKRaL1u2J56Ad77v+ToLISgF/8ztiD/cryfTgdGDCJwAUGh1gczDLJK20/8U2YfWpxUH+wM/+WQESkhwSa+Ku9KcBJTn+8EgwMsHIBCMTjEww8tPAyPcd733hCoLg74Ig6P2pk8DA1et1m2wOf/OwMr8/9wV4wMbwdspDEsgYOP+Gie6cSZVkoIQz0F2GN1CWbQlYnr1hmdIzi9wfb+r+bZp7JWAweTCNLDN7pyEg0vkmDEP1+30rasaBcW7AEaaCoj8YKgIorF+xeNrPA7aBP7AH9XrdGCpJVhw4nU6NKcvlcuac0in4bDYbK1CF3UEewOcIRAA4P2OUhcJpP5fNZqPRaKQ4jo3dCcPQtOT7/f5Cz0uywf0RQFim5zOMD8yDZxe32611DmLDS0k2tr4jFYmIT1h8IIO1rdVqtnyPvKBcLuvp6cmcNwgCa2mLAyNfgJEhQSDAE3iZX9jcRqNhGnU2sWTZn0QEfTg6dkm6uroyYN9sNsaKAw6vX7+2ccK/KAgnMEkyH4VpQmteLpcteSJQMt/4GzawWCysOJs4wd4ldDjKZrMGuFEUWZJI7YcPjNgG9w5bFQSBgTBBWDrt+0NXLfyiUqmo0WhYhyyYUuYMYGQ8OfdkMrFCZnyK+yGOlEol676FXwFGrFLEcWxjRzLJH2yEgmZAlLhIIsyc+USFuMb4EdfwB0DKAwXfAxA9Mw/gJMf5SDAywUgpwcgEIxOMTDDyT2PkewX5x+Px/3vf7zkY5GKxaE7lgzVBhUH0kwJTgN4U3SdacknGuODoMHE8MEbEIGCMLAMul0sz8iA47X7NcjAD5CfIg4iki6XI/X5vzA/FhhgQzoVEwE8KLCYFusfjUbPZTFEU6fXr12bYfsL9RPM7jkKhYJ1bgiCwXbaXy6UBDsHI/xsNNiwGy9+whLBxMHzcZ6VS0fF4NC07+4xQ1OqlJ+l0WvP53DZ+ZNmea/rxQqPMEjtBDycgiGPQ/X7fkg1aG8MWY3OwFYVCwZbW+/2+BQDGgOsw9iQdsE0kIPwO1sQzuZwHoGM/kyiKbDNCukRFUaROp3PRFQtQhJ3ygPP09GSgwE7ubPAIGNIR6ff+aiw5QbJUKimfz+t3v/ud5vO5adrxwW63aywf34UJJUFB+uKZHmRKaNozmYx1o0qnT3voAEbYGQkfMgKfuLAnDP4PS7bb7WyD0eVyqWazqc1mY7ZN/IEJZZNQZCqANIwfHacYuyAILMnwNoxcgqSMQnzkELR/zWQyJqHy8QrpF7bD+ZknAIKY4JMKYhp2zgoJgR4b90BD4gSYAPqSLmRr0uUKCM/D7/wc+8J0fDQ5Lo8EIxOMTDAywcgEIxOM/DEY+VEkhQwAwUI6d3UBRGCtGFAGjLdWQIa3bwYCY+HNlTdW3pC9htUvm0qywlQCNODFuVnKxFkYTK53PJ5acvrBZjmdQC7J9jdg6ZPlXcaAfU54S2dZM45jffPNN6YVJfACUjgSn+dZAErGoFQqGZMQx+f9Q9AQ06VlMpnYfWGIx+NRzWZT5XLZ5oC2v7vdzsaQ74xGIwM23x4Wg8P5FouFgQw/o5OG5SwUChYIuUan01Gz2dRqtbI54Br5/GkHd6Qc9Xrd7gsWEpDHYaMo0nA4tPqCWq2mZrNpvyfokkjQDQo2BjYYFtJLP/iO19xLsk0r6RJVr9dtrAE6NOiSTKbDcvu7xZlep04iBWhPp1PbM4QkIZc7dWCiSPTp6cmedzQaWXEr991oNKyQni5V+AfsLMlNFEVaLBYmyyAZWS6XxqABwsgMSGJg62GaCN7IFGD4YeVpe0288LIgGFv8lzkjEcCP0a03Gg1J0tPTk7HdjCtjyr3A7kmyuEWM8R3VkN+gXyfu8RmA3DOg3Bcxxce+/X5vwIy98EycF2CBUcafGBv+xj75QwJEzGSOuA9Ahet69s9/Nzk+/EgwMsHIBCMTjEwwMsHIj9JyioEnmPklbAI1IMGN8Tb/7j4GGFY+nzdnZtkT52RwPdPFxGCgfunRXzOXO+3tsVgsFIahMUSeBcRpYQLH47E95+Fw6vDT6/UUx7G1mIUFpBMPh++aw9s4z5ROpy1grtdrTadTffbZZ7aUWyqV1G63tdvtDHT7/b4VN8IQZLNZvX37Vjc3NwaA7B6Oht0boCQDPc8IwnZ6nfd8Ptdnn31my7s41Gg0UhiGF86LRhoHgjVFhoAMgiDJ3LFEz7i8y5QxL1wbJ97tdgYEcRyrWq3aErSffzobrVariw0bKbrudDrGHDPnADWbQxK8uUfAhiDAc8Kq3d7emr78/v7+AiAI/B5EYEYKhYJpymGquHecu1qt2lxiR55NpLvTdru1rjy+G5gk00ljI5wHX0YjzRgRlEh8guCkgYflw/6DILC9Lfjdcrm0vT1Y9geksCt03TzfeDw2FhO7wx4BEc+CAQYkV9gjic5ut7NrHI9H9ft9e7btdnvBfrOhJiAFw8ymoTw7Mii6g9HRzK8GMJcAFysV+L2PHd6+fHvkw+FgduL/j7hAXEKihG3yfNwbrCIA4lcLYL+JA5wP28KfeBlIjg87EoxMMDLByAQjE4z8+WPk+46fvMLFgKDN9O1oCe48LINKIIRtwIiks9bdM31+Kd0PAi1iuRYBi9/DmGG0vmCPQMAkMZAEJpyLZX0c7Hg86vb2Vv1+35aW2RxROhX03d/fWwEqoOQZwlqtpmz2tJt7u91WpVJRtVq1NrDtdtucgXElWHHPXictyZiv6XSq4XCoMAw1mUw0Ho+Vy53a/NKpB62zB2b0xdvt9iL4B0Fg7BCdhdhgkMANyONAxWLRWBfYHYy1VqtZMTaMHwE8juOLgkkAL5s9da0BVEqlkjlkOp22ccNusAcYv3K5bIwqS9gAOVILzyr7+b67u9PhcDBdM9dnHmDvjsejLf3zfE9PTyYdoXYAn2AefE2DT6oAdj/3SBQmk4ldv1QqqdFoGCOXyWQsqaNtrHTuUAQzXa/XFYahoiiyInD8BH9er9fW0Qv5Sb1eN6ABmABfzxwCPF7WlM1mbQ8NnhNQKRaL5guMC+MgyXT7+CtzASMKsBKUYfmoReH6lUpFqVTKnov7pgB3vz/XgxAHYP/YrJRVgul0amMLaLFHyXK5tKSVBJff+ZgpyVYxKCrnuYibrDp4dpOkBT8kbvmVDZI0YioMsHTeWNLHO9h+4oKPwfz9pwAlOf7wSDAywcgEIxOMTDDy08DI961yfRRJITeC0aO1JCgQWDDmdxk1Bi+TyVjAQDvKA+H4DA4sgmd24vhcjBwEwcWbr2fpPIOE4WI8nBtGgLdrgBKmYbFYqFKpmAY5kzl1arm6ujLdOcueGAKGg0N2u13T0t/c3KhYLGo0GhnQzOdz3d3dablc2u7sqVRKYRja98IwNDaRMYZhWK/Xqlar6na76vV6tnEggQ2nxwELhYIxK3RwIkDH8XlvAs+YHo+nwk863MCwSTLHZW44JwGUMcOpuB8cNJU67c1AYTDMMP9GZ75YLDQej7XbnfbsIGnh8zgv+8yk02kLDMgssEUkCLAoQRCo0WhcMHvUKwAMzAvP3263dXV1ZeD76tUrhWGoXq+nzWZjbWIJEj7QwPaQFMH2sjs77N58PlelUrnQKmNfYRgaA0XHJAIKiQy2OZ/PbS7wH5g2zwoiKcAnNpuNMepIBQASmC4SPdhGgiqJHn9T7+HZJ+LGbrfT/f290um0SYeokeBzxBRsgda62Cf2zFgAMGjvuVeCKoHeJ7okAJvNxqQwSF6oZSFWEXi9lIFEFHDwIEvsk3RR8wLbhn+QfDFf/rsAA4eXkBEjsfF32Th81McCP+/Y14+RTCTHHx4JRiYYmWBkgpHMWYKRP2+MfN/xUV64kCDAsOFski52jobF8iwbxoBjck4GHjYQA+DzMAoYNuwDLB6BiUFiqdEXCrNkzyaCMEbH46lTDPINro1DsefEbrdTq9Wyc08mE7XbbSsAxVny+byNRbVatRanABUBFiNik8tKpaLFYqHb21sdj6ed1AmajB1ORIBstVrW8vbVq1fWthYjJCDyXKvVSk9PT5rNZgb+bMI3m80MyAjmgDJ/arWaBc7ZbGZzy74HtPCEsUGf7fW2+Xxe8/lc6/VaqVTKmF6YD5wH+6CA83A4FY4/PT3ZXKbTaetSBCOKUwKAURSpXC6r0WioXq9fBHS/MR5sI+1zSQgICDxHrVazsc1kMjZmnU5HURTp+vrapBOAJAEYW2cujsej6ZpJfPxmj5lMxmwOtkySnQ+fAQB7vZ6ur69NfoKkJAgC20AUAAGwcrncRV0GgZjP8oyeMaJ9MddhPAjA+XzextoztfgwDJpvf4wv7HY71et1VSoVtVqtP7gvWhYjlUBmBauGrxLc6VJFjPGSAYIpoADLDHt4PB7Nt9m1njgDkwxjjL3gp/gQMRAfBnCYY2IpiR3yB7+qIMlAilbI+Ajx7ng8WuLgmXqYPB8f+ZkY6pNr5pmYnhwfdiQYmWBkgpEJRiYYmWDkR5MUspyNsRF4/ZKfD9YEBgKxf+M9HA5m9FEUXTwkD10qlazlKQDxbktIAhiOydsnEgSMiME7HA4XhYSwE3QP8svZ6NLX67VJAAaDgTkyAREnoi3o4XDq7uQZNMaRJWkC1eFwsK427JXhmQbkIldXV8b8wEhIsqVpAr1nNAj6nJMd7aVTAbPX3wKOsH+wlrS93W5PnbXQQMMopFIpa7cax/EFU8RyNsGCOUA7ThE58wX4w8gxjvv93tiwxWJhQQwA4dx+eXgymZid0q4YPTfsKLYJC4htMX6cE6aOlsjsIXI8nrTV+/1e4/H4IpFip3qCQDp97pZEUCOwMpfYEucBcNipHpmDt2NAEp8hAQCgCW7z+VyPj492Ds96E+hg2GBUYeoANcaQgIONEqhgakulkiU99XrdlvQpwgUcYacpSI+iyFhB9gja7/f2/MgD0um0tRRuNBrq9Xoql8vmN+Px2BIYDn9dzsk8Y0vH49FYc0AZwCb++ASJ+8GGJV1IdRgXYqekC1mMZ1gp1mbuYNM8yBBrScr5m3hAkiadW/pyf9wvYAUzGMfnVsHcKyxjcvz4I8HIBCMTjEwwMsHITwMj37fK9ZPR83g8ms4zCAIbXIwfo/MDQZGq15cSjDabjbFXME+0WAVUcECcAQCCPeGa/J5AxNKsfzsFODA2ZB71el3r9VqPj48ql8uq1+u2PN9sNjUajdRoNDQcDm2PAtgGv3M50gikIHTUORwOur6+1uvXr+28vV7PurkQjAuFgrrdrr2Jt9ttVatVNZtN3d/fm1wC46EA+XA4WMFzLpezzxyPR33//ffm0KPRyJipRqNh9438AelKtVrV9fX1hXSDZeU4jo3tYQ4IatnsZYE158SZ6LTkl+IJXHF83tsjl8sZI5JOp21vFpITnAMpDC2T2T+FwmFsI4oidbtdA1m03wD34XCqO5hOp/r++++N+YD5DIJAtVpNu93OluFJAkajker1uqrVqhaLhR4eHvTy5UsLyiQK2B41AYVCQePx2HzA66SpSUDyAQvG/JPAcT46HZGsEHxh1rGRzz//3PTpsIdv3761RMPPJ8lKuVw2hgjGFh88HA4aDAbabrfq9XqaTqcmIxiPx0qlUjZeLOMvFgsdj0erISARGwwGyuVyajabyuVyxjJjBwRnNkZlo1CkC4BUPp9Xu91WFEU2dzCfJBmwgbDg6MBZHchms8Zmf/HFF3r79q3FlVqtptVqZcGXuhVAEAY4n8/b3Esy4GXlAADKZDLG4PlxguGG/fTSMPyR50YmQTLBZ4mhJAGABSBBLCJ+AmgAUvLC9eFHgpEJRiYYmWBkgpGfBka+7/jJ6ImTLRYLeytmwnB0dM3H49EKPiUZs8KyOv9mEDBilghh/xhIlvq85lmSJpOJOVu9XtdoNJIk68iSSqWs+A4mjyCPLrXT6Vh7W34fhqHS6bS192QvEVrEeoedzWY2gQ8PDxfOh56bXcyRHbBkTNDjXoMg0Gw2s70oFouFer2etR3NZrP2HDApsGTD4VClUkmFQkGtVktRFJkhSbIlX3S6OJGXihQKp/0Unj9/bgwOIAGAMU6wODA2m83G7pu5Yl8KGB4SBq+d9jbg7QSpATpt9OCHw0GVSkVhGBqTSfvQ+Xyu0WhkY8Jyfzqd1s3NjZ6enmwZmWJbHJLnAazYZ4L/f/v2rdrttlqtlhVfw/jC8jDWFLI+PDxYAIzj2AqMmXPYMTZrJNgByGz4OJvNLphSAtJoNLKxI+mCWfIMNYkBDDS2XSwWjcGCjeQ8BDgPVMy/b9eLpGWxWKjb7do8TKdTK3qH2abLFPeUTqc1mUxOAer3IMk4hGForFocn/b6QFpDTICp2u12Fww9sQUmar1eq9VqmU0Qr5Ag7HY7XV1dqdfrmYSoXC7r7u7O5hWglHTRGIA6hP1+r4eHB7NP9nchCSZ2Sef9jqrVqgaDgTGoXtYBUHkGmX+TjCFRYr8bZBUw1iQUMOT+JSqdTl8AHjaK/76PvUuOP34kGJlgZIKRCUYmGJlg5EehK+M4NvZhNpvZEuN+v7dJZiJxSoyOgMkDEkx4o2ewCEBcj+9JZ8NmczUmh7dgAjJBF6kCnWQ6nY5t/ocDwTx2u11JMh20JNtxm+Xs0Wikv/qrv1KlUtF8PtfLly+12+1s74/ZbGbnxODL5bJub28Vx7Ha7bYymYx++OEH5XI51et1Y9FwYByC693c3NiSJuAGm8GGlQQ+jIsNNykYRt4Bc4d2ljECZCgwpRC1XC5rNptpvV6rVCpZNxuWv2EHYGuLxaIFkqurK6VSKWNmGUevCUbqApO3251qCADUer2u6XRqumRYhuPx1O53OBxeaLhhGZGA0Lno7u5Ov/jFL4wZ5vsENYJSuVw2lhEJBCBZLBb1+vVrG9M4jvXq1StNp1MLdt1u1xgzz55gz/gJtoyMwncbAkikU6BiM83dbqcvvvjiQk6EXINAQY0HNQfo6ak7ADxgSAlMSEW4brPZtG5ctVrNwGk2m9m9p1IpdTod89mbmxvVajXNZjNjuCmY5/y5XE7D4dDmErkUMhBkLchYkNP0ej1NJhP98MMPSqVSevXqlSVnSByIF5vNxgAf+ZRvndzv9w3IHx4eDASJLa1WyxjOKIo0n891dXWlRqOh1WplXbuYX+o8kC6k06dC8+l0qmazqXw+b53LkJ0gbYqiyPwBkMOOOQBH2FWSL88EY1uACD5J7Q1Ay9wBKh6QYf58YpscH34kGJlgZIKRCUYmGPnzx8j3Hekvv/zyxyHG/3D8y7/8y5ftdlupVMpaQOIAGLtnYGDCCJRoMjF2ltN5+4fNQk/r9ZcYOYV6gBL/xz4BaF09mB2PR1uSZvJZJqxUKppOpyabgA3c7U4dfl6+fHmxrDufzyVJnU5Ht7e3Fpzp1AQTBKBQiNrv9xVFkVqtlgX8crmsMAztvBgHXWwAPWQTaLRhljKZjAV1v1wahqEOh1N3KeQdvNHjoEEQ2IaK6/Va3W7XJCx8niVzAiPaXK8ZR0tMQgHTOZlMTD5Sr9dt+TqVStlmiyQC0ondRcM/m80uQG65XFpAfHh4MFbDL1lTu8ByPg5TqVS02+30n//5n5pOp/rVr35l9pfL5WwfC9gwmKP1em1MFsvf3O/xeNock+DARoqHw0HPnz+3+4rj047xALFPEghwy+XSwAiGC9uMokij0cg218QO0DBj9yQugCfnyuVyVryL3eKPJFjeJxqNhgWR0WhkNQIAXq1Wk3TeAwXmko0tCbYEKcZ0t9vZ+aifIAllA9AoilSpVPTXf/3XWi6Xxp4T4I7Ho/7t3/5Ng8FA3W7XCp/L5bJ1QqP9M/aKbT4+PpqvsyEqyZJ0qu3I5XJ6/fq17UfE+KfTaXW7Xd3e3qpUKunFixf2DIz3eDw2WRNsNmOAHQGKjB3F08w13wEYWfnwyRPxFnaPWgfADfadZInYCEvHOPJdr5sHUGCWpRO7Nx6P77/88sv/+5OA4xM6EoxMMDLByAQjE4z8NDBysVjoH/7hH/7PH8OCj1LDhWGm02nTneM8FLtJ5176LAsTgNCf87aI3ILACIjwpslgMkC+WBM2AsNgCZGl/TAMLZjwhkyQZt8EgCKXyymKIis8pjsMgWw6narX6+lwOBiTV6/XzYAxFNhImAo004PBwJb4WXIFtKrVqulsARMCHEu63W7XPlOv1zWbzUwmQtEnb+o8PwEQeQIyjdevX+t4PBq7M5vNDERHo5Et4RMsbm5urHAYvTrzSBF3JnPqYERNADvZH49HSxL4vi/oxpCLxeLFfiYYPEv3LPezzwqsDKzYuwXijUbD5iKdTuuXv/ylbm9vdXd3py+++ELr9dqYouFwaIkP7CJL+8zjfD635AHpzWw2s2JpHHs8HmswGJjt0s1osVhYUEAClM/nbal7Op0ay0WStdvtrEsUPoRGvNls2p4sdD1Cb0wnJxg9mHFJ5h905kFC4BMwfC0Igot9Zfb7vTqdjjF/URRZ62bqDDabU2vp5XJpkiXmC7+C/ZxMJioUTht3djodG38CKvfS6XT09ddfaz6fq1arWeA/HA42l7BTdEY7Ho8aDAbWNhdpA6w29RQUvQMaj4+PJrfh3vCf7fa0Dw0JNONJQrdarQxE8VUYeZIwWLXBYGDyED672+3s37DL0pmVg2HH1kjuSE6QSMHoSrI4il15aYm/lk/aWV3hHMnx448EIxOMTDAywcgEIxOM/GiSwkKhYJrhavW0CzZGC0ORy+VsiXS/39uSKHpiGC1ABQYKrSRG788L0MBc8PZdLBbN8ZFHsIR7OBzMKGD40JWjMZdObEWz2bSiy+l0qlKppNFopFqtpiiKzFkxlkqloru7Ow0GA9OWFotFY3bQ8rML/eFwKjz95ptvbJk6nz+3psXgkXHEcWwsF8ELYIOlK5fLmk6nF+wXrUDRIOPQBHE0zzgZgJLJZPT8+XPbDBN2r9FoWHcX2NNms6lSqaTxeGxgjEwDmch8PlcQBBqPx1qtVgZ0sDMsIx+PR3U6HdPmc1/YQz6fNwkGS9O5XM6SGZwMRhHn4dzlctmYlR9++EFXV1fGMnqGKZPJmKwAW4PVkE7sFXKWp6cnlctlFQoFvXr1St9+++2FphsWkDoKNNaTycQY2NFoZNp6DyIeFGGpK5WKsXHL5dKkL94nGS+YYKQSo9HIEjISKALOeDy+0M0DJH5fGj642nTVAAAgAElEQVS/252KSpvNpjFkgONut7MkajAYqFQqGasPIMPuEiNgK+mqNJlMLLD3+31VKhU9f/5cq9XKuills1lNp1Mbx5ubG6XTaevIRS1KGIZ68eKF1Yjc3NyYPcKO00kOwMjn82q1WtbpCWDgWQFkdP08N2NLknE8Hi3pkmQJ3d3dnUql0oUdsVJBYwSCfLF42tTVy6h8YwXiIbEykzl3UuJnrkGSzXl8QoONkdhg71wnOT78SDAywcgEIxOMTDDy54+R75Pd/+S28AyM14KzDIdTwijx/0gaYLcoqiMAMThoq9FdM7AsM1K8y1ssGvZKpXLBUuRyuQv2K45jK+bFyWEHMBAKHLfbrbFXtCWFrSuXy/ruu+8kydiaYrGoTqdje4PwzARAloM9s4JDRlFkgALDB5vJMi2Tjw53t9tpOByq0WjoeDzt9fD4+GiBj+D4+PhohZc4AHPV7/dtmXm/PxUw8n2Yjslkot/85jf2f5lMRvf398ZMPTw86M2bN3p8fLRgTnvS1Wql4XBozkZBLqCezWatqw57f1xfXxsrt1gsDKiKxdOGfm/fvtV4PNZyubT2wyQSJC2bzUb39/cWpBg/zjsYDGxp/9e//vUFkwaDCuuEFj6TyVh7Vhybnd8BoHK5bM+63W5VKpWsZgB9+36/V7vdNjBtNpuqVqsGNGEY6ubmRrlcTnd3d5pMJsbmUYAsSY+Pj5JkDDPXhH2DIfLFqD7IwqjToQomieV06hAmk4lJDPBPlvABdgIe491sNg3MKc4mSdztdpZM4KPYgAcrmF9YcQDmt7/9rYLg1AULhrBYLJp9MK+wdg8PD/p//+//qdvtGisXBKeOWWyoyn4sYRgqCAIDAnzshx9+0Gg0uuhShk2m02nd3t6aNpygS3E9fl2tVu2ZgiCwfV2Gw6HK5bLJZVKplM0Pc8dzkhSSLKBLR8JCwkQba1Yj8A1sA/BhTjw4etaQGA8wJceHHwlGJhiZYGSCkQlG/vwx8n3HR3nhotMIgcKzLPv93pxOkr1xl0olW46DQUBXzIAQiPk+7BoPDLtBIRvBHOceDocXGtB0Om0dT2AWJJlGlmVDnAkHbTQaprcGMFlKfvbsmVarle7v7zUajYzhe/78ubrdrgqFgj3T999/b8/QbDYvJB/IApbLpR4eHnR3d2fFi8hLVquV6Z4xLJa9WVaXpLdv3xqjQoCBtcHQ9vu9FR8ioahUKhbkl8ulXr16Zc+K9OXp6UlxHCuKIg0GAwNKWITD4dTydDQaGUOKcTKGbMRHYvH555/r2bNn9iycC1a01+sZU8Gy8nQ6VbFYtIDb6XRs7mj7iyOSAOAYSDWwGVjWt2/fqtVqaTKZKJ/P23I39w9bxnzBekVRZHIdHFs6FY63Wi0DNa7rl8JJbpB2wCqS2MKMwnLV63WT6LCrPInwcrm0ZX8YF+Q6Dw8PWq/XGgwGdn3un2diyT8IAptvitO5NwJiq9Wy+cDPdrtz9y+eG2BYr9cajUaWAAHKMHmAWKVS0X6/V6PRUC6X02effaZU6lRA3+12Va/X9d///d/GZvtWxuzHg4yCehCYZ+ncPhYw/uKLLyw2kXAQuwACEtl+v6/pdGoxbjgc6vvvv9fXX39trbHZv2a73doqBDp/5CiMCVKJVqulTqcjScYsAtTcLzEAEIb99OCLTIrifUl2fWpTuK8gCCxRJaEgjkrnomCkb7B+nDc5PuxIMDLByAQjE4xMMPLnj5G8uP2x46O0hWepF0P0bNvxeLSlT252Pp8bwwbLQbCEtWOAJFlwY4IZHK+bJSAAaMgKKF7lbRe2qF6v24DCcvj2osgf2FxyNptdLCsidWADOZb/YR0IELwFE5QIgqlUysACZ81kMtbqs1AoKAxDYzg4J+CChno8Htv+CcViUY1GQ2EY6s2bN+p2uyZRSKVSF4GdcZvNZrq6ujKWCD1qtVrVmzdvNBgM9Dd/8zeKokiNRkP9ft+eA/kHLAJFpcwtwIF0YTAY6Pr62sZkt9tZjYIk0zfncjnrikRg3m63pldmXtm7IY5jmweciGBDsKJbEMwI11+tVspms7q6utJ3332nTqejer2u4/Foz0Vik81mL9hb6RSc0um0aX4JuKvVytjQ8XhshbowKXRRYqyZ3+PxaIkW0pp2u21doPhsoVCwJf4gCCywc+3VamVSFuYKliaOTxtgUqyOH8LqwE4RVNLptBXIEkT9Mn0qlTJpDmCOFh1JEnvvUAgOe0Wis1qtLFHkfhjrp6cn03v/8MMPVtROgTjPT8B9/fq1vvnmG/3t3/6tvvnmG2OEj8eTvrvT6ZgM5sWLF6rVanp6erLgDAg9Pj7qeDyq2+1quVzq5cuXarfbSqfTZpepVMqSxmazaawyKxS1Ws3uVZIVA5Pk+cQahvL29lb1et0kWaw6IJdBe+/ZU4q+CfjMDUkOcc6zcCQcfIc5ITYQ74hpyEWS48OOBCMTjEwwMsHIBCM/DYyEqPhjx0ep4cKZKBrjjRfHwdHQfQJA2expgz46oDBgFDvypouunDdynJ2HRXvp34xhHihwhWHg2jAbBETfsYZgydv8YrGwVqi53Kk9J4ZAAKE9J7uDM+EUkwKW6PdJXrLZU7cm9ONovsvl087fvMFHUXSx5Nztdu1eYUmy2awmk4mCINCzZ89MroGzAkQwTKVSySQKdHKirezt7a12u1M7VViXzWajTqdjwEQhZqFQuJAB+JoCpDTNZlO1Ws305pLU6/UsgOFszM9gMLioHSApKRaLqtVqtsxMNyE66zB3hULBCm494yvJWMt2u21L4tjVr3/9a/393/+9sbSPj49mxzBeJDG0Mubc6Ln5bKlU0nw+NwAjaeE7+AUBDPYTFg3NPfMnSe12W3d3d5JO7PWzZ89sE0lYXFgwvkMRO0H5+fPn+o//+A9L0ABvZBvs/wLbWyqVrGg9mz1tasl38UOuD0M+m81sPvBREjn/nN1u16RDzCcsNEnlfr83CVAcx8aYIymAoWdse72evv76axUKBV1dXanf718ETupNmF8SMnTnJF1IF2CFb25uVCqV9Pj4aHEslTpvfIisg7kmycU+iVMkMbTcZayxGWox1uu1JZ4k4J5BA8SxIVYg0K8z9sQowJaEmfhLbQCHZ4D52TddSI4PPxKMTDAywcgEIxOM/Plj5J/1hQsHQ5PsCx6RJvggmE6njX2CSWB5kN/FcWxMB7pXX9xGUMEoYY4AksViYcu8y+XSllWZkP3+1GcfWQWBjEmAedzv9zbJ/I6OL+v1efPCVqtl3yGo0TUJVhEJCcWNFBN3Oh1b5gawJpOJ7u7uTP+KpOHq6krH40lrSwFqvV43R0f3zfVms5lubm5M1oBUghaYMKoATDp93kF8tVrZ9abTqa6vr+3cOFmn07Ei0XT61AHm9evX5mAwC8gX0CCHYWhFw+jtYQ0BWBhU5lU6b8p5OBysSxEJwPF41GQyMYaYa3Nu5hBbg7nk2bGT4XCof//3f9ff/d3fWSChSBUmmOV7pA6SjB0GnLhfdNMAMCBFogKzXC6XbRyxdZ5/MploszltKsrzsrwNy4t9zedzbTYbSzZgoAgmPBMyErr+kGgwxmi9CcLMKX7ipT4c1Fdwb74WBUCTZHECcEBDT5CD6ZzNZibT2O/36vV6NpcwgiR1JGGLxUKtVkvr9do07J999plGo5GWy6XZKC2d37x5Ywwy++wwz8wB0phCoWCJL2wi84lsg+ci4BOE8bdarWY2xPOj10fTT9wEcBkT7NbX8lSrVS2XS2Nive8B3MQqkmHqW7y/IXfz/sf1Ye08SCXHjz8SjEwwMsHIBCOlBCM/dYz8KJJCjIs3Pem8JO/f4FlmpGCTz/i3QgaJg5asPARGTPCDSYFVqVar6vf71pmIzxP00FmWy2UzUu6PScKxfDcfimm5FnIM3uAx4NVqZfKEzWZjhaAU5G63W2WzWQ0GA223W7169cre3ne7nR4fH7Xf7619KROJvIOCwkzm1DKVLi43NzdKpVJqNBp6eHjQcDjUF198ofl8bm/1vm4AjTAshCT7eTAY6NmzZyZfOBwOGo/H2u12ms/n6na7ajabVqxdqVQM6MfjsQUugg/zBxMH49BoNIzRepeJYFkXR8UZPUOYSqVs/wiAWDqxWsw/duWXmZEyALCbzbk9LO1Nv/32W11dXVnbVp9UeMc7Ho/2rOyZgn3DHmLf2A1AB6MryQACG4elYfPGw+GgMAxt741Wq2UdfUhioijSV199pUajYWOJbAf/pM0xdgnzCtu0XC7Nlwh0fp64P5bQGQ//HGjlsS8SOVjYVCpliQ0tXGFnGc+npydLBmAzy+WyHh8fDXDRgTPvfk5evHihIAj0/fffK5VKqd1uW/JEMvbFF18ojmPrsEQBLqCHhCCTObVupsPTfr/XdDpVtVpVEAQWL5CoMA60wZZkUofFYmHSDor6gyCw8xGr8CeeCx9FEiHJGHlsE/BDehPHp6YMy+XSVi7wIVhEDkAPG+T5SYxIFnme5PjxR4KRCUYmGJlgZIKRnwZGjsfj/xELfjJdicNjeCx7wmwwwLAxGCGTwu9wIN7mYSEkmWFjiDgvnYt460d3C+NBEOAtFUkE4Lff760QF2cjaGHUAASDu1qtrA0n8ofD4WD7PKCRhrXxGu5Wq2XBBJDlmQeDgd68eaMwDG0jvDg+dYqCASHIsuzZarV0c3NjXWZgjpBgMB48H+wRzwMQM19IF1qtlkkbCBi0v6WQOQjOBY3T6VT9fl+TycRkEdVq9UImw/wRNHyR5mKxsLoGDB6WkRayzLskCyywTgRB7hvmlPHCxrycBlaC7kokGqlUSs+ePTMdfhCc9tTAZgBigr1njBkrlsPpikOggc0jYYKdXa1WVqjrGcH/n70327HsOq61x+77vt/ZVCZZRYqUdWFBgC/PheG3kgEeWH6J8zT+b2zAsGRLBE2yWF1WNrvv+/6/2P4i56YpmmVSN6y1gAJZlZk715ozZoxYI0ZE4HxwkK4eG0Bj8CEp9ng8bt3GJJk9HA6PBeTIGdBtZzKZE1vAedzf3+v169cnmmX2krVn1ggOEm08z8jaJZNJm60CMzSbzU7YVb/fb4wwv4uCXr/fr1evXplEh6CEswaThz6eYK9UKqnb7Wq1WlnB7mQyMaYWOw8GgyZhcO+PIMqtGchkMprNZvriiy+sMxXO2mX1Oad+/2NXOs4vEq1A4NjpijOOr2O9KaQ/HA6WycAXcb6xK9aSbAaSC4AI0AfAAGHu12Vj+Tr+CUbPDfS964ddHkZ6GOlhpIeRHka+HxjpSg+/ff0kNVwYDkWBFBdy6NBvcpgpGMZxuG/JsHmwgPTux+GhqZRkiwEzst/vTdMLG8tMDEAul8uZXpg3ZUnm8F2GAjDEyZJqjkaP8x06nY5pktfrtbWCde+TTQqFji0yMXgYAdgEd9hct9tVNpuVJGu1i0whEAhY4SVv59fX16Yv73Q6Buo4IPbF7/dbVxn+DmMCuxoKhWxY5XA41GKxsA5IOAXWLplMqt/v20EB/GezmWnPAQUkJgAD6W6fz6dSqWS/HxYLx8egOpgQnHggcJx30m63zflhB5KMhSCFTlCw2WyMdQGsAHbp2JIWrfXDw4Oq1aoNo8RZcLgAEZe5woZhPRaL46BLdN5IA2BK2dvFYmG1FN8OYpBTMA9jv9/rzZs3evbsme7v71UsFo3FOT8/Vzqdtg5knL9cLqflcql0Oq35fG4aa7Toh8PBzhkBDADKjBcAibS5z+ezfeQ+OdN0GpvNZiZLgZGCjQLk0YRTgM0gVXwATGen01GhUFCj0dB2u1U6ndZoNFIoFFKpVDI7xMZ9vmOxLXNbGO54dnam/X6v4XCo8XhstkSwGQ6HT5w3bXX5+nQ6tba42CQzclarlQ0pBfSwV4Jm9jAYPNZO7Pd785vT6VS9Xu+k8xbBMD4Oto71wQdyP9gmPpmaH9Ye0ACQ+F4+g+9z5RaSrHuTd7375WGkh5EeRnoY6WHkzx8jeSH7rutHZ7j4cH45i+syZrwFwuSxKJIspe4yAnR7wSGTtoMN2u+PrUU5vNzHbDazwkBYvXQ6bW/IOBo+w2XrkDe4xYW8ObvtWdl0Cmdpg8rbP/IRN3W/2z12ToLpGI/HlqaezWZ69uyZYrGYdW8BdGBzXAaA+yctjzP0+XwqFouSjk6MTjmAlJvyJrXK2rHGAE48Hle73dZyuTTnH4lE1Ol09OLFC9NVA8gMk0ylUspms9aBZ7c7Dgd88uSJstmsGSzacpgEgAOJi5suJrXLQefQ+P1+mxDOoEycPSwOuniXzeIQRSIR1et1O2g45OFwaMwazFi73dZgMDDpAoAKYCYSCZVKJQNLCnNh8wAwVzZC/QSyHxgrv9+v0Whkkg/Ah89er9caDAa6u7vTarWyAutIJGK/T5IFEXQHogsVzmE6nVp9Ag4H4OF8xuNx2yPAE5ZvvV4rm82qVqtZoTgFtzhbpCsUEQ+HQ5M1IU9gLTebjQaDgcLhsLHMyCpWq5XdF0EP4AfIw5gTePp8PpMOTadT/elPf9Jms9HV1ZWKxaLJOhjYyiwfzhtsMYwtdkIHqVwup4uLC2PFJ5PJSYclbM6VNMD2E2gfDkfdN0xcr9ez8wvzRsCCHh4AYM9gXgEfAhF8GIEZvs71fa7EjYvADF9NpgO/6l3vdnkY6WGkh5EeRnoY+X5g5Pe9cP0kGS70xxx6CuZI8/EWSxoVZgVnAnMTDAZNNy7JwEl6dHiutleSHU7eOF3HTjqVtK5riGhQ3VQy6VKAhWGCOFQ0mn6/3wqKMTDkATAPgUDANpMiYNgYF1BJ4/K1ZDJpDAHdXGA20bNSvMp6YEDr9dpazrZaLc1mM9PiI5/4droVsIYdSKfTymaz/20yO4d3v9/r7du3isViOjs7s3WCsYANI9XMYEzWgRQ6TACHD3sBwAFkUsAAL8yW250K50gwQqobpwCTHAg8zivBWQD+FFTSajUQOLailR4HMaLzJlAALGEcYa0oTIZFXSwWOjs7M4CNRCLmeGCG2WNqAehERGE7BdBuEDEYDPTxxx+btpmCZHfeDA4DW8BeCoWCgTZOajweK5fLKZlMmozIlX5wv7Cr8Xj8hN3C8fA92CmAn8vljC2UZMEIPgD/AZuJnYbDYbVaLaXTabMb5Dc4cs4j526/P866Qc//wQcfWPvl/X5vASL7Pp/PzUFnMhl1u13TtRMQofVOpVJqt9sGmO6/w37C2GPHOHDOLP8G6x0IBOzfkVyxHm4Nh1vvgE1ixy5bD0AAPPg5V95G1mS3O+r82Q/uXToGX8g6/icw8a4/f3kY6WGkh5EeRnoY+fPHyO+7fvQLF6l2DNaVRsTj8ZNCWgybN1aYKRgoDJcFAIhwCrBa6HRhm1gk2AeXwej3+8pmsyYxcFk20pT7/V6j0cgWH2OXHluwUvjKm7DbeYVUOo4BY0bXDesCSDH4DceOHODs7MwMHBkHrBUGAVhz2HBwGBCHzDXQ4XCo9XqtTCZjzj4UChkQoLmFaeFnarWa9vvH7i/r9VrpdFpXV1dWKBsOh02KglaWA4rTcQulKWZ+eHhQrVYzFpYLo+cZMH73MAIyru2hM14sFlYczAyPSCSiSqVijBCHBafEIYEhAbxms5nVDJTLZQ0GA9sL2qLiVN16AwCQ4BS7dnXaLoDA1OTzeU2nU93c3Bjj6qbk+Uycm3voh8Oh2TgBDc+4Xq8N4Pg82EDkMsvlUqPRSOfn51qv1ybNwLmzh0gzXHkNLCPdu3jeTCZjRegU91Kj0ev1TubCYKuRSESz2cxqBjgnFBinUikr1AccaEkdCoXU6/XMFwBET58+VTAYVL1et6COugR8CZ91OBw14c1mU6vVSk+ePLHADlkJAeZut7OgKxwO21mECWevYNLQrrOXaMj3++OAztlspmg0qul0akENRb/YiBtkwQJyRmHYuAdJJ6ww60kwhF92/TMsLbaGZAbQdzMg3vXDLg8jPYz0MNLDSA8j3w+M/L7rR79wwYJwqCUZGwALx0O4bUL5mqsrPhyOWmb34MB2oKd2Haf7tgm7BiOIcaBvZ4HYQBaY1Oq3Qck1jGAwaOwHP8fPSjr5bEnGHsFoTadTK2rE0SQSCTOGbreri4sLhcNh9ft9Y318Pp+xCqRFYfBYb9gDF0jQ7CIBIA3NvRUKBTtsSEvQiLsFkDBpGNlwOFQmk7FhkbT33e12Go/HZvg+n88OibuujUbDpr4D6KTAF4uFCoWC/H6/sVXYUzgctgODw8ERplIpY6/cw+LWMSQSCcXjcd3c3Fj7YKQFrEEikbA9px1oOBzW27dvtV6vValUVCgU1O/3TXvMAQsEAup2u7bfqVRKy+XSpBW0CMYGkN6wHwAee5XNZq2ugqAEUODMEbi8fPlSn376qYEkAzUBKZgdSVas3W637T4A8u12a0BCgAcQIKlA/uAGfsih0H7zu8fjsRXOJ5NJk0s8PDyYo0QOROqeICqZTJqzhlXK5/PmvAFNAjoAhsDNldxcXFxoNptZfUOv11OxWNSLFy90eXlp9sgFqGJvnEWCIs47Mqx+v2+a/X6/r8PhYPtPK1pXtw9ryH4B7m4wPBgMzIfiE5HMfFtuw+eylqFQyM4LwTIg6IIMhenu97MOBEz4VEDF9fHe9cMvDyM9jPQw0sNIDyPfD4x01+rb108iKZR04mRJ5eGMMMxA4DhALBw+du4ZDAZWGImEgQdkgfnDIsI8SbLP5oHR27IpvHXicF0WcLlcWuoeFo3UJYPVOJAY+m63UzabNYaQ4mMcO8YGI8hzrVYrO7ykYykU7Xa7BqA4JJehYT1YYxyeJDvsfA1ns9vtzKkcDgfVajVJR4fBQalWq9bJCC37mzdv5PMdC3QHg4ExQZVK5YQ943e/efNGhUJBi8VC7XZbyWRShUJB0WjUZAOwmTxfOHwcIBgIBDQYDHR5ealOp2MBAmtJjQAHkSBBeiwQXS6XyuVyVgAO4Lv63XA4rNFoZIdvs9mYBCSTyZgzoKVwMBhUNpvVbDZTOBzWxx9/bOtKgMNBhxGFnaELD5KMfr9v9sA9w2ziVNgvnhP74D4AcVd6gU1LMmkF600hOXYDqwXwAYTIGABU1h/W1+0UhV3TQQnmcb/fW/vYi4sLGwR6dnZmE+HRoiOXcYvqYeywEewROch0OpUk23fXEcfjcdPww8Jls9mTGUIMdf3mm290dXVlTBVSm91uZ7p6QHAymRgokSHA/qbTqfb7vTnxbrcrn+9Y1E0dSyQSse5x+CVaa/O9MIibzUbVatXOhd9/rEtIp9NKJBLq9/sWZCAxQk6BHAbWEGYPZhWfy+9FykLgia0RIGDjgBD3ji/HJ/1PDJ53/fnLw0gPIz2M9DDSw8ifN0Z+Hyn5k71wcZEuxcli1Lxhut/D2zlsC2n5zWZjm+gyaDgJ3sx5MFKHGCGHolAonLyFkqoMh8OWfnRZLxZwNBrZhsHEMQDSZfYmk4kCgcCJ/pZ0ZSAQOJF1cBjQp8Jk0gq31+spEAgYK4AmlAOM8cGKAozr9ePMFgCbA7/fP9YCkKYej8c6HA7WgQcjdjX2HK5AIGAsKwcRXavLGPJssCqAuvQIcovFcUgkxg4b0O/3Lf0Na8tnAp44T+4NJrPf78vv96tSqWgymdhhRzohHSUQd3d3KpVKpjFmPXGkFKn6/X4bVkmavlwum7Pb7XZ23ziPxWKh0Whk9h2LxdRsNtVsNm0+DjIiAg7a1LoMMBKV+XxuM0Ly+byBlvTI9K3XaxWLRRvqORqNlEwm1e127XMBPQpkmVUCew0zBYtaLBa1Wh1ntTBwE+CByYpEIsYosZaLxcLkP9hDo9HQxx9/bCwTgQDnnHPDvUoy22f96vW6SbAAYQYYss/sMcEhxfTlctnY6dFopOvraxWLRVtr/NJ+v7ehlgyMbDQa1iksn8+bDblyumazaZ8XDAatq1soFLLACF8XCoVMWjOZTOwcce+xWMxkMolEwoLc7fY4WyWXy2kymajf7xsjTt0Ea4AMAzvjjBCcE5Djb92aH2xru92arIuvAVjYIAG6d/24y8NIDyM9jPQw0sPInydGft8V+Oyzz773G/6n6x//8R8/y2QylpqHJcOJ8eaPobGIfC+AgB6VBcPR4+RJOfPGiTG4DBwHDYAKBoOmkSVlyqK6unnkGDgE7m0+n6tarapQKNim7PeP2lM2IRQKneiv+d2hUMjajNKVCGDj2TEcGDLezmG3kEYAqDgz1hUQdtupkvpPJBI6HA7WMYbDyRt5o9FQMpm0LkSwprRF5V4AQZjWYDCoUqlkQB+NRpXL5QywYVhSqdTJTJJ6vW6MHsXBMDiwCxwMvsa8EEnG+NBNCm0wBZoUMFMUPpvN1Ol0LKCJxWI2RBDGCl19MBjUaDSydr8cOFgngBr7RYLB5Hg6c5VKJY1GIwUCAXMEgcCxSJQaB7oK+f1+q22AJYtGj0MWb25ubL4KmnuceDwe12w203Q6VaVSUb/fVy6Xk9/vPxmoCUNTLBbV7XaVyWR0OBw0HA4Vj8f18PCgeDxuAdJmszHAwJZYB2pN1uu1tbJlnzKZjO1NOBxWt9s1BopggICCYuVUKmVBJTZE3YXP99ihiza3fr/fNOK9Xs/0+5w3WDECA+49n88rFotpuVyqXC6fAHAmk1G73dZ//ud/mj0iUYnFYif1GvgjbJcsAv+/2RybAazXa2PRuEdJJ75uvV4b40qns9FopHK5bF2ykKiEw2EbCBsMBu1Mw2wC2Pgmn8+nwWCg8XhsPpcgkgCdoARb3m63J1kOSQacBJoAyX9JYxqfffbZ//tRwPEeXR5GehjpYaSHkR5Gvh8YOZlM9Pd///f/97uw4CehK2E+ccZoq2E5cOAwCrzR8ne//3EaOmlnpAK8eTKfw73Q0gYCAUvtk+pNJpPmGNbrtXj+xbMAACAASURBVPr9vmmuccSkVgEf6XE2BUzDYnGcsfHw8GAsHm1l2XhJlt6czWbW8QmZAIeJTYJVQ3NfLpeNLUGfS1EgTJ6kk6JTQCkejxvj12q1JMk64wAOsAKZTMbui841dAUCkPP5vNUc4EgwSgwccIJBkmTgk0qltFgsjO2BuQCkASsOZjQatbaj9XrdZlkEg8fiTxwqQzpxljgg2FYOII6Q2R3McRkOh1oul2YDANdgMFClUrEWxNPpVMPhULPZzOaLJJNJSTKmZDgcml0TsMBcxuNxXVxc6ObmRtvt1rpZ8b273XFqO8/I1wksgsGgMUEEIsgy0um0yXmw9fl8rg8//NDOEXaCPAJ2tFwu63A4WIrf5/MZ63U4HDQej1Uul624d71eG/vY6/UM6HD+1B3M53Pl83kDR+QeBDOcDexOkp3lcDhs5wF72G63qlarkqR///d/V61WM5YzGj3O9snn8+Zoe72e9vu9crmc2cNgMDBgdWUyAAUBz2azUa1WU7vdNu21K0lxAZYzDKPd6/WUzWa1WBxnw1APUSwW7dzD7lLwDKtNMBmJHAeiutKuXC5nUgt8TDAYtEwE/oZuXDQGkB47gtG5DYAnCOd8utIkGF3YePw5Ugn2RzoFRO96t8vDSA8jPYz0MNLDyJ8/Rn7f9ZPM4XJT6dIjM8LbLXpxWC3e2mHmUqnUSeFcInGckA5bJMk+jxQfg/ZIndIhBkYBx4uUAXkBCyPJZpK49xWNRu2Nm9/Fxvv9x3kak8lEs9nMWuKSvq9Wq/Z70bPifChk5SDTAYf7mU6nGo1G5uAAUnSp/H5ACBkFn8cBicWO09TPzs6USqVUKpVUrVaVy+UUiRznt8A20A2IVOt4PFav1zPgGAwGxizwPMgU+v2+FVtziF12kWJISScHCpYJ/Tup/2QyaYcYQIHtASww7vF4bMyb6+zcYmkYZHTdyFM46ADkeDw2/S37zfyL/X5vXydQ2u/3toawaTA2rL1rX/P53PTxsEB0wcJ2CYDo/IU0p9VqGbBxj7BgzWbTHA3MEmzWarWyblLz+Vw3NzcGKG5A5Z6pw+FwUjy+Xq9Nh57JZOxnYHJLpZKy2ayKxaJCoZAajYY59fV6rUajYZIImPFKpaKLiwvbEwq/eaZUKmWB2mAwUKFQUKlUsu5ShULBirUnk4nJNMgYwNLW63VjY5HipNNpq89AhoJfuLy8NHkGzBvsJC8ZAOhqtTJQwB7q9boFdDwbARId27gPSeaf6BYFy8vnwrwVi0ULoJAAsWcUlBN0TCYTkxu5kin0/9KjrISuW9w/QOPKxjgrh8PBfhd24l3vdnkY6WGkh5EeRnoY+X5g5PddP1pS+Lvf/c7kEjw4Tps3XjbFTcv5fMcuPLw18pa9XC5NU45T9vl89ibNwUJbC2uDkfOzwWDQev0HAqdT7SmIJCXPv8diMQMdn89nTpvvAdhcrSZvxiw6hwSNKqlc2niig8bxoiNfr9dKJpPm9NA9Azowdjha7h2nMZlMlMlkFIlEbMgfTpM06WQysQnrrjOGFUBLv1gsbB9jsZgNH4RNdB0qbJ5b1ApzAMOQTqfV7/dVKBQsJU1wwfqgjeVQwLDBEgFGbqoZcB4MBvL5fMrn8ydMmmv8OFr2FVaPguFcLmdtYl+/fm3rRPEwAUsoFLK9Z8Ah6zQajdRsNhUKhfTq1St9/PHHVljMfsPWSjJnA9NGEIaTaDabxkJGo8d2z3RyQmKRTCbNNpDurFYr6+IDc/bmzRtjZkmhu8xlqVQ6GR7oBkUw65zXSCSiVCqlwWBgAQDMDoHiYrFQvV7Xcrk0UGO/2Nv1em11GaxJLpfTH/7wB5XLZVUqFb1588ZYNr/ff1LH0Wg0Ts4NGQFJBjbUbdBi94svvlC9XjdHTcczghAkW3SvglkF9JD8jMdjxeNx6zDV6/UUjUZVLBY1HA5PuiMh5RiNRhacAt6wpwTdi8VCs9lMpVJJsVhM/X5f+/3ewAawgOHjrLjrCsu8Xq/NnwKU0mOwjx+JxWL2d4IL7nG3250UFXuSwne/PIz0MNLDSA8jPYx8PzByNpv9WUnhT/LClc/n7RDAfgEu3Bh6TtLMOGhXawyTxeLAIuH4ACmcAJIGNpWUNFpOOt9IMiZnsViYU2SRYFVczelmszFdOhvnDnpjIjxvwRgFn03HJobtcZBwwDBx/C4OwX5/7PBye3urVCqlcrlsG1sqlaytKg7M5/OpUCjYYUfzulqt7A/SiW63a44EPTSsItrf1WqlTqdjaV0OJqCO8dIpikJFijCRgLBPrA/24fP5TA6DHIC9Yr9gQ9HcM5F+PB5bmrvb7ZpcJpFImN45nU7bWuM4qU84HA7W8haHwYGMx+PmYAD4aPQ4t4Hvw44BApx8oVAwaUK1WlW73dZmsznpRFWr1RSNRjUej60T0X6/t/WrVCqm+Q8GgyqXy1oul1Z8SoEqDC1McbVaNXmSe77Yq3Q6rVwupxcvXugPf/iDOSm0/LC0yJw4w1xondkbJA90nbq/v7fgiz2inoB/C4fDajabur29tc8KBoMn9SwAPLUCMKKpVMq02NhWNBpVrVZTNpu1s7RYLE5Y58lkolgspkwmozdv3qjX6+np06eKRqNWJDyfz0260e12TV8uHSVHzIKBmd5sNqbN9/l8enh4sCGvyElgEQF8/IKr+8dvIItCe//27Vs7W6FQyAZHTqdT9Xo9qyEheHCf2917/CfBGAAvydheAmOCNQJx7h9QJSMDUIVCIfX7fe+F6x0uDyM9jPQw0sNIDyPfD4yczWb67W9/+5d54fqHf/iHz2irib4zGo1aqjUcDls3FB4QYwJUWq2WstmsstmsGR9Oxe0YAjvkSjD4OynjQCBg3Z/QBdO5xa1BcGUVbHY6nf5vBZvodBlYSNcYmEO00hwKSaYxPRwOJ4xJOHxs9xqNRk90oYFAwNLykiwNXy6XrRVrPp+3AkuAFXZvv9/bYEHWHX03xoTTZHjjer02/ar0qK9HwxoOhy3dXyqVjI3DGe73e/V6Peu8tFgsLC3LZ2LAHDb0761WS71eT7/85S/tcBAw4BTdYIKDidOXjuzW+fm5Pv/8c83nc33wwQcnaW1YEA47em4Oi8/ns6/v93u1Wi1z0JFIRN1uV5JULBYtxUwqH2aZlDlBRyAQMB1xvV7Xf/zHf1jBqM/nU7lctj13Z9/EYjGTZlDgyqDCQqFgrVthYfr9vvL5vLHYpMVJybvdpUKhY2vfUCikt2/fGtu0Wq2MSeUMoeNnL5H1UOCNc4FppWMTbY9LpZIxXkiB0KAD7ofDQQ8PD5bSn8/nVnQuSV9++aWKxaI+//xz0/szNLHb7do6wzA/PDwYeAJeblvoeDyuL7/8UtfX1woGg0qn0zo7O7PPogHBkydPdH9/r4eHB5VKJWUyGQuC1uu1Xr58aQHybrczCdF6vVYul1Or1bIaDzpo4eQBYlhApBiAcL/fN9lEIBAwWUO327XZONgJgSTgxnkLBAJWAwDQEGSFQiELwmBn2RNXmw5Th33ze/gd+NXBYOC9cL3D5WGkh5EeRnoY6WHk+4GRo9HoL5fhogMTkgYeEsPBgVO45nbdcVN9GAtv50gwWGTYqlwuZ738t9utTdeGLWJxwuFju0YOM06GhWPIG2wab+gwb65ml03HAWNkOC8KV/l8nDUbiBFFIhE9efJE+/3eJrJT3NjpdLRcLg0A3QGYgC8OBTBD2sEwOdYcZwETRCrZbYO72+0sFQ+TA3OZz+etw896fWyx63ZhwlGEQiFzsIAVsgIKmpn1wFT6WOzY0jSXy1lhMEPnXKaUe3YlE/v9Xslk0pwbbBFzKyQZgKJjJiCQZN8HoPZ6PW02x9ayo9FIb9++Va1WM8aOegWfz3cSvMDK7nY7PTw8mDOFsfv666+N3Ws2myZloKDXnYvi6o5hVAks+v2+drudDdFstVrabDZWbIs0BufD3A303KFQ6GSIJRpvSVZrsFqtlEqlVK/X9fLlS0kyhtcNDmFvYBJns5nu7+9NHpROp5XJZNRsNg0YACAKfWHLYA2z2ayBJ2xuNBrV69evVSwW9fLlSxWLRZMowHQj61kul7q7u1MmkzG5y3a7tcJ6Oj0Fg0GdnZ1pOBzqxYsXOj8/N1kOz4+zh211g9DZbKblcqlsNmsSB0B1MBjY+eEPzjgQCFjgC5MOK83zwgb3ej1jwe/u7tTv9/Xhhx/q888/V6vVskCE80BTA5w9gQa+NBAI2JmDwYc5hLkDgF2J2Lf/jk8j0AsGg+r1et4L1ztcHkZ6GCl5GOlhpIeR7wNGTiaTv1yG63e/+91nzNbg4NFtR5K9IbspPEmmW0WvyYMEg0ErWqR4D6YMQPD5fBqPxxqNRvY7WCicP6l6jESSgZfbtYRFprMNxsLXfD6fFdC5AyBJ81PEy7/DXHKfk8lEy+VSlUrFGDy//1gQO51OVSqVFIlE9PDwYIYGSxAOh61IlEMCAFIgSxvS5fLY0pPPxyGSDoZB5TCGQiHlcjlVKhWtVitLXaP3HY1GarfbxqQiieHtfjqdqlgsmqaW9eWe6MQkSaVSSZ1Ox/TWFxcXqtfrurm5MZYDJgzjJdhYLBambXZlKICNz+fTaDSyVDc2grOYTCa2l5FIxA48axaJRFQoFNTv9y0gAPw5eKT/W62WcrmcRqORDdwDQJn7Qi0AUpDlcml7TLtSvhfwgPXkGVOplM7OzhQIBHRzc6Ovv/5aV1dXJr358ssvlUgkVCgU9Ic//EFPnz41lok2yRRhz2YzDQYD+2ykO9Pp1Fgntzh2s9kY8FATAtsZCBxbyhJIUHuAjXQ6HQt8ttutBZKcDwaYck7C4ePgxkgkYgwdNSqr1UpnZ2em7ae+A5CHFYOJ6vf7xpLiL9Dmw1D2ej2TJ1xcXNizTSYTdTodCygBCpeJLJfLqlarJ8FeKpUyNhlwj0QiVkTf7/fV6/UMSNiLVqtlv4OaDVh2AiokP69fv1Y+n7cuV7CqBMWsMcEqjh9/RDZkuVyesHK0+PX5fP+NuePzCRDxg3zdy3C92+VhpIeRHkZ6GOlh5PuBkX9RSSEFwbxZoivlDRA2D3YNwGABWBS/329v0xTQdTodY4pgeEhVYoTozFlYJpAjRyAdz2JLsunhqVTK3vK5V4yDA0WHJpy3e+hJb8MWhEIh+/9AIGAs2cXFhXK5nG1oJBIxoEwkEtZxBUfHJruFthzO1WplrXUZcIcGHJ0+MgrYUNafN3nWAWBCksLvhU0CiBl4Rwce9oeuTuwlHYMAREmqVqtmkN1uV9PpVM+ePZPP59OLFy/UbDaNsYIBdhm37XarwWCgXq+n0WikbDarRCKh0WhkBbZ0F6pWq1osFqZnRnYQDodtoCRFuJLUaDQkSefn55JkuvmzszP1ej0dDgedn5/bzJB+v69sNmugiiOVZG1x3YLadrttRcPn5+d2rzCOfr/fugjhjKLRqNLptG5vb61+4nA4tqRdLBY6Pz9XrVaz9clmsxbUDIdD6zo0m8306tUr0+sjU6CYF3kGUpder6dUKmW2FY1GjSmFHcdGYZ9wfsykoT30ZDI5kUxJRwkRjPxyuTSGqVAomGSKYCeTySibzUqSzeBwnR6Odr1em6yHFsPVatWY8/+Sv0mSZQey2awqlYqKxaIx5ovFwiQiw+HQdODcF76Gc4LMQDqVBlG0i69CzsL3EvDsdjsb8Ih/JNPR7XaVSCSsk9V+v7eOUjh1np36EgJpnpkzBCC4ungkUuyd9DjkknPv3rvL1LOX3gvXu10eRnoY6WGkh5EeRr4fGDkej/9yc7hYBN4Q3ZQboMG/SY9MGywRbAfOjnS33+9XOp02Haz0WEy62WwMsNhYd4MpeuOQ4jTpXsTvgwVAMkERIW/nq9VK4/HYhrOVy2U73LPZTLVaTb1ez/TQrgHsdscWsZPJRJVKRalU6gQckH58W69cq9V0f39vwwqlRz00mnfSmJPJxCQRw+FQ/X7fhlgOh0Mz9NFoZIWWOGD2AHYFNmK9XptjSCaTymazVuRZKpVOmL7FYmGzW0ajkRXODodD+12NRsOYUaQm/X5f0WjUnARyGwqa5/O5Xrx4oUAgoGw2a7IOtNir1UrZbNbSxu76sJ7ZbFb7/V7lclmbzbGNK+nuw+GgbDarjz76SM1mU69fv7ZJ6KTXU6mUscOr1UqxWMwKeWGp6T6EXAD7415x6jDKOOm3b99aZy+kRQQhAMNutzN5Bd2IYH+wM3Td33zzjbXRpRsRuvxOp6N8Pm8FrrCGu91OzWZT2WxWgUDA6hxgeQlWKCiWHougGcCKDAI5SSaT0WQysZbVBF+DwcDYK5/vWMBODYTf71ez2dTh8Nj9B5Z9Op2ajGM8Hutv/uZvrNV1Npu1ItlYLKanT59KOspA0IXjJ/AH0hFUaM8Lw0mwNZ1OT4pqcabz+dxAKRqNGvjudjvT/nOO8HO0CeZ53Ja++DCYd5gzPn+73ardbhugwyjThQw5C0Gmu24EisiJCHD5DLIoBPCSrG2zKwEDUAjUkePw+d71wy8PIz2M9DDSw0gPI98PjMSnfNf1o1+4WDzAAwPB4fHWiPY7GAyeTBanEBTmjFQ2hxejkGSMRSgUsm4nOFe3uBanO5/PzYHAYh0OB5MQcJA51DjRXC5nb/6ka3k+mKVQKGSOjNQyLWV5W57NZiesByyHJGMGs9msFZ/W63VLUZNW5llh2Ugrw6LwrOVyWavVSr1eT9vt1liOXq+n8XhshcnRaNQAwu/3mwNMpVIGojgNnPhqdZx/USwWtdlsTF8+Go00Ho9VrVZNb81cEZ/PZ/MxAA708be3t7q8vFS1WlUqldLt7a3NgKAAG1uKx4/T3svlst0j68D+0OqU/eQwxeNxDYdDDQYDpdNp05DXajW1Wi3bz7u7O7Oth4cHXV9fW+DBnsJ2TCYTY/AIiObzuYbDoTFp3W5Xq9VK1WpVh8NjVy5kOX6/X5VKxQ4tz9NoNMwGALVgMKibmxsVi0WdnZ3p5ubG2CvWloGPBCQ4zlQqZewg5wEJAA6UgAzwhf2eTCZmuwQMkmygJ06K8wmoYNv39/fKZDJmo67+HDac9UbWBHvHui4Wx1kqtVrNpB20R45Go8rlciaXwaaxP5fRpwgXQIUNzefzVscBiw8I0iIZW+QeX716ZYXFfv+xeP7+/t46SEkyWQy+yGX6XH+Cb4zFYhoOh5KO0iKY93a7bUw0jCcAiOzMlai5gbff/zgzxJVA4G/5msvaucE560dAQCDH373rh18eRnoY6WGkh5EeRr4fGMm+fdf1k2S4uHEeFu2qJHsIWDIMGPkCqViMkf/n7Vl6nAqPTpyHZzEp/OTzOVQUVsIqzGYzm5dAKpffxRsxz0MaFcOAaeQtezqdWjFkqVRSMplUs9m0rkQwfRjB4XCwAYpIJrbbrUk/AEd0smhqeeuGKVmvj/NN+H0cXjSnACfpaAp63cPu9/utgw8yFveNPRQKqVKp2Bs77A1MJWsXiUR0eXlp4IfGeb/fq9PpGANL0evz58+VTqfV7XbtIMP4NRoNdTodzedzFQoFA3Qc/suXL3V5eWlgHYlENBgMrBibgkraoEYiETUaDU2nU2OKYCNYy0QioSdPniifz6vdbuvZs2dqt9uWskZn/e26BmQ/sL8wScgfYEUpIMfeYWbcWgA6PmHfsG2Hw0GNRsOc7zfffKPLy0uTruCEr6+vVS6XDUQ4H/w/mm7qCG5ubvSLX/zCag7c9P96fRyWisPmvMXjcbMxHBHzLmDPXJ0+afxisSifz6darXbipJAmwDBx1tmf5fI4H6NSqeiLL75QuVxWvV5Xp9MxnTrr7kqn6GzEJHsAnCGVtLAlyEQy4TK6i8XCdPR+v1/ZbFapVErL5dJYZ/xHvV7XbDazM3Bzc6NarWb+ARABqKbTqQEIATPntVwum3wsGo1aC2j8HAA8n89PwAlbxP8hvYKJJfhysxYE1pIsC3I4HOwPzCCfD8MIi+dd73Z5GOlhpIeRHkZ6GPl+YCRS4e+6/D8WTHBQOEDeIiXZwaXb0XQ6tYJOFgAWje4kgA9s32azMeeAVIEHZVHcNB9FkDBY/A6+nwUkvQ1DwlstrJvbIpXiYxiodrt9Ag6wN7wJ+/3HdpP5fN403DAOhULBDIpDj3OXdNJKczKZ2BR2ihnRhZPedtnT0WikVqtla49mvl6vm+OjGBWDc50rBx4HzcEGZEhh03Upk8lYS83pdGpD/qgNoIsUjvTq6spmO8AEwGK22+0Th4vjGA6HmkwmqtVqms/n+vrrr3V7e6tAIKDz83NFIhHTFTOLASC9vb1VMplUvV6XdGRAYG9g8pDrwFRmMhljBUmVD4dDrVYra+/77WdjWCdF2QDwaDTS+fm5MpmMdSPiMMPiwTqHw2EbXHhzc2Np8t1udzIbg+BmOp2a5p39wY7pzgUDutlsNBwOdX5+Lor3CfAIjpArULMBexUKhUyLjg0RcKB9B/BZ016vp6urK2PWGBbZ6/WsqB5bonDWZRNZYxjRTqejbrdrRazJZNICK+oVKM6dTCamA5/P59bFCgkWa//VV19pOBxaUINenGAN1oogZD6fazKZWKADSCHNSSQSBmIAMI6ZQBrgm8/n6nQ66nQ6VrAOu0lxM+fO9U3IiSgCJhjkD5/j8z22KuYPHdVYY+wQ+wFQJP23r7msIxIu7/rhl4eRHkZ6GOlhpIeR7wdG/kUlhYHA43R5nPy3b0ySpTE5PK7Gl8XnzZAbxuhg2TgEvOnztgsIsNB8nYVArgBjttlsTqQZq9XK2k/CDG63W9NGk/pfLBYqFAp6eHgwJoCiSdgzDpyrJeWAk3KHqeFCq9xsNi0tzRrx9syGw6DALPAWztdoU0u6HZAulUrWcYg3epwCoAqgUFSLfAI2BXCXHp2P9Ng6EyDn/nkuHEAoFFK/37c9yGQyur6+1osXL6zzDh1vCA7evHmj7XarTz75RP1+X51OR1dXV2o0GuYYer2ems2mPvzwQ9OBMzUd5gH2AyYFeQnabLoA0YIYhkZ6TD27xZ3fZqeRUpTLZQMXn89nbVjRM8Pe4Sxms5mtLfcmSS9fvtTFxYW63a6CwaDy+bzu7+9Vr9e12+10fX1tQEo7Yp6VegcKc9HeZzIZFQoF228cHufGDa6wBRh0V6tN+h3ngh11Oh0bVBiLxUyWQdH6aDQ6kS7hD7h3LkDJrSHpdDoKBI4FxMhyYOoYdgkDR6taV150OBzbDz958sSCwLu7OxUKBR0OBwsGAFAkFfgkpB4UNRMgYss+37E98GQysQ5y7DFsOhIw7IWfG4/H5kc5f/gNF/ABbRh3GFDAzw20XcDgfgEdAI7Al4sgH/t2bYM9RsblXT/88jDSw0gPIz2M9DDy/cBIvv5d108iKWTB3JvnzZOb3G63xgLgYLh5ug7BJEmyVO5ut7OiPRyMqxVn0WANASbecH0+3wlIuRuBQePYADo2KZFIWLobZkM6OhdmO9AuFaAC+CjAhUmIx+O6vr62YXKkiHFarpMG/AhueBa6+fC8pERhOHFedAyClbq/v1etVrPvlWSOlD+hUOhktgrspOtsw+GwMW8ArKSTYXakzLfbY0cndLwwEzhx2EValdbrde33e93f39tes78ffPCB6appmdtoNNRoNOygVCoV1Wo1rVYrY4XOz881m83U7XZVLpfNkR0OB5NdwKphp6PRyBw+TDN6YuyWQmECF5zCcnmcel8qlbTZbOy/HEAKt5FH+P1+s3k+OxQK6dmzZ3p4eLA2vMzXePXqlYrFopLJpMrlsn0WLC6dkXDEBDvBYFCNRsP2DsANh8O6vLw0xnU+n5v9up13ABg3gHBlT8gmMpmMnT++B3a33++b1hspEEES5w7pDCwybBP7hZTDDQ5gfOkutlgslM1mbU8pmn54eLDWv+fn58rn8xqPx4pEIgYotLN1pVdci8XCno92xuzpeDy29dlut7q7u1OpVDrR20vHNtkEoXw2YNDr9ez8EIi7TCnrwl678ofdbmd1L9QLYE+cUxhRlzlm//CPfKabkQHIpMfOc971bpeHkR5GehjpYaSHke8HRv5FX7gwuPV6bSk2HpgHBTBYFDdFhyMDgEgJxuNxax1JURw/z0a4hu0WXR4Ox+JQCn/d3+dKDNB/7nbHTiocbj4fjSmsIUxWqVSy1DdvygCDJLuHyWSiXq+nUOg4/BBw4fcDlv1+X4VCQel02hiOSCRielZS5/wuDhCFyBQ74kQikYgViOLwBoOB6vW6GS+F2y57Nx6PrdUr97rZbKxFKkWyHBIA1WUOCoWCIpGI7u/vdTgc9MEHHygaPQ6/4+0fyUY0GlW329X19bUGg4H6/b4+/fRT3dzc2JDAXC6nq6srjUYj07tPJhN99NFHdvjPz88tVQ2IlctlY15Z52QyeZJaxmGMx+MT9hbmFYaYWSGz2cwCTr7utl5mDQmElsuler2eyQoYFshBRuMO8+sWXdbrdd3e3kqSfS2TyZjuGwZ6v9+r2+2azGE8HluraNL4MMU4fuoIKCjGwbF23Nu3zwyBGJ+BVpv1yOVypoXH+VAjst0e60Skx6JiGCqXLZQeZwFJMkeHs8Zm0cDjGJfLpTlNv99vXYVWq5UGg4F8Pp/q9brdMwCJPAK5CSw7No0zpbUuMhpJ1oEL34XPQZ4SCoVOWstS50FdAr4Pto4Algv/yDNSOOxKXPB/+EU32Oasur4appjvcf0Y98bnuoCFjbvg4l0/7PIw0sNIDyM9jPQw8v3ASL72XdePfuHioWHupMcWrTwcN8T38He02XQUIfUM20faFCMjDT2fz+3zDoeD9eSn3SULSmtOCl5h/GazmdLptAGW9GispMrd3++mlg+Hg2lyWWy+Dovhyge+fSDpysRbOcbJZnE/3DtO2nVGGBMOZLc7FmNT+MphgSV5o2NRkwAAIABJREFU8uSJer2eteRcLBb2cxjScrk08AA80RzDbLKvGC7TxSlsdBlY5B6kmJfLpfb7Y0Hj5eWl3QssCKBWKpXMwcK8AIB0NGJQ5vX1tTqdjvb7vQaDge7u7owhvry8tPs8HA7WCcfVWA+HQ0uDE9Dw3/V6rUwmo263a3vI53DwDoeDyU4AfXTI2WxW/X7f7I0gizayMCs4bp/PZ3NnRqOROp2OMV+z2cy6LDUaDeXzeQukaAGLPAVpDqwprVYzmYyxktjeeDxWu90+SaEnEglj8Xhu6kewe86Ly4xzrjm/q9VK+Xxes9nMWuvyGfw+tz5ltzvqqJE+hELHQmmKvynKxTlvNhsLBmGggsGgEomE1XZQg0L7Zkk2WR5Qor6EmTWsD8EbAOz3+63OBvBBguIGp4fDQaVSyYIp5B3ou9Go53I5zWYz9ft9AxdAHzkJNrjbPWr26Y4GsPL9wWDwpOUujD5r48pc+HzACr/s1gS5wQ7BpQvy3vVul4eRHkZ6GOlhpIeRP3+M/L4Xrp+kaQbpc36pCyau7pV/w0hxIuiwXQNbrVZWHMtDUYQZiUROBs4FAscBi0gTeGjSlBwwukPxNVeDzffhqCkyxMgXi4UVvMJY0AmGDi4YDAdGkkk3MN58Pm9v0RgR8zIwVheAWT+Mh/UF1NLptKW2/X6/sTHSo6ZYkrEAGBegg9TBZUFYP2ZVkHqnuBJnz88BjqxltVrVJ598Yp13AFsONxrsxWKhfD5vDpbBi2dnZ0qn08aIUTQJ45HJZNRsNs3pz+dzYx1hn9wCSFg6nCLdtfgvXWsIBEiLu8ELgcN2u9VkMjGgdlPQh8OxoHg4HBqz6fP5NJ/PTZLjpqM5P36/3+of2Nv5fG6tc7Eh7qvT6ZywXtFo1IqWfb7jDA9kFhRbj0YjC94IjLC/4XBoBdXdbtdmgYTDYTs3PMNms7GAaLM5dhNbr9caj8f2ecFgUP1+X4fDQfl83p4RLT7PA0iwhtg6Dg4ZB22bYbgBUIIXHF46nTaGC8Y/n8+bXWHj2BIBFMFJv983uQxnJRgMWrE7basBUQICzqokkxDhA1xWkjOcz+dtzabTqcmKaETA7+RzCShd58/vxbaRIyF/4txJj3U++A8CcUDTDaDwIXTdIhNBcIzv864ffnkY6WGkh5EeRnoY+X5gpJsx+/b1o1+4cFSk6qXH9owsursgHFj0k0gDeCPFWboaTkm2caSvSSHDqPD70WaS+uYeSJmzMPwONgFHDsiRauSNdzqdWktWuqhIj+lFDurhcLCOMwDRbDbTmzdvNBwOlclkbNL5ZDKxjdztdrYWbDjPwO/huTabjbXwxQmh/XXTocglkALwu7jPeDxuRbOk0GEt0frC9sGI8JzSkT2kGxTrBGNHETAMzHq9NueNTpxhfofDwdLq2AVsKLIAQBpnCVNVKpXsPhi+ibwBuQl2Apiw14FAQMlkUrFYzBzAdntsQzwajWwtCWrcQMINVlhXnO1kMlGz2VQkErECaAqzGfRIe2QONCz1fD7XcrnUxcWFza/Y74+af7/fr2q1qs1mo06nYxKKZDJpw1A3m8f2w9j+V199ZUHWfn8sssX5LxYLtVotm8XCM/R6PXteHCuShMFgoMlkom63a1KE9fo40f7u7k7hcFjD4VCvXr3S4XAs0nVb8JZKJaXTae33e9Ns84yLxUKTyUSDwcDWAseLbTSbTd3c3OjFixe2fjhlGPDtdmsypGq1av+OPfN3BnniT4LBY9E3MgtJxnbSNjibzSqfz6tYLJ5IEPi9DPLEfrE5VybSarXMJnK5nHa7nXVZw4fhVwnYCRwY/ol/AyQp3gVIONOxWMxAG38Mg8q9Exzy71wE1zDU7te864dfHkZ6GOlhpIeRHkb+/DHy+66ftGmGJEu1cfFgbCJ/B2B4CN7e0b26b72ACs6fwXNoo2EvYHtI7QUCj0WGvJnSdhdGzE3vspD8G+yguzF0MUK7itbblTZst9sTxjAYPA76Y/4IzoWDzTpReAvrBCPEs+CwATDu1y1qvb+/N2032mkYIP7ARjCPA2YSmcV4PDaWBNYUAIbBIl0NCwIj4xY4h8Nh9Xo9xWIxAzba1br6YQYjTqdTAzqAnEMsye6HQk7sDI2tJCtqLpfL9jthLVz2A7sixQwbw765+mgOEoEOrJxr43RPglUdj8d6eHhQPB5Xq9VSLpc7Odw4bfTmpLulx7oLHE2n09F2u1W9XjfbiUQimkwmBqqu4zkcjgNJS6WSdXByuy3lcjkbIImMiD+JREL9fl+z2cykKzz/fD63YAdpEE4Odpbi61gspru7OzWbTT19+tQkCdhNNps1x8fP45ilR/YLlhmHeHd3p263ax2TaFVL8Ob3H7sdwYLhA2AcOaPMFpFks0Ww78PhOGhxv99bK1pJ9juy2az5OreOAbkJLZNTqZT9DjejwUwhv99vbZSxA+yJgnS35sS1YVhOAI/7hW0jWEJvD0Bi12jNARP2AjkQ5x121v1/73q3y8NIDyM9jPQw0sPI9wMjv09S+L0vXD6f7/9I+tPhcBj8ue/hQPKAaCxd3aObIkbvyaGBMeNGObCk8Uml8kaMk4eJwxlyoDFIipR9Pp8V8sFukWaG0eD72DgKF11nn0qlTjrycMDYPADH1a/T8Yj7brVaur6+1m6302AwMCDudrs2IG46nSqbzSoSiRjbBSCRiqa1Js5Ekrrdrtbrteld0TqTTuWzLy8v7S0dmcV8Pj9JoTO1nM/CsAqFgtrttjlBNOlIjWD+uJii3u12td0eOy5VKhVjwPL5vB4eHnR1dWUHodlsqlAomIPHAXK4XFYK6QX/xowJF4Bg/8bjsRaLhT0j7ODhcLCZMQQ2/EGCwMGjZSxgA2u73+9tH/b7vd1/r9fT06dP1Wg0jLGOxWLWPQn2hI4+2WzWApvNZqPLy0tJMnDn+6fTqWq12klnnUAgYMzPdrtVoVAw0Mpms9ZJjO5IboelZDJpem709q6ECQeCY6GTE46Vc75er3VxcaHlcmnP8vnnn6tarSqfz6vb7do8Eeoa4vG42TcF+DCVnU7H6ij8/uMg1NevX9s6EgTB7I/HY6vZIFMAg9tut1WpVAx0XTYcFg92cLPZmG0R/G63j/N2YL86nY52u51pzWOxmM7Pz9Xv9zWdTrVYLFSpVE5qb77lX5XL5ewcA9w+n8+kSYCVK/lhf8ggcHbpBEXjAtd/AaQwnIAKQQL3yNrANLu+F5DyrsfLw0gPIz2M9DDSw0gPI8FIl2j49vW9L1yHw+H/+76vY2BozF39J//O31lMV47gtmmE+QCcYFz4GVJ9pG75PDdVu9s9do7x+48zONAWIxdA8w0ThN42HA4bU8Wi4+A4LIFAwLoH9Xq9E30oQwElmdOTZAxDoVDQmzdvlMvl7I37/Pz8xHkAFBTh0kUJKQUsF0wRDB+Ajc4b44Kp22yOcx5gyrLZrILB4xBTHCNOge9B8oJ2XpKlfqVjsAC7gBwBwwb4cPocbg4TIM7cjHa7rUwmY99DChwGAvYkmUxqMBhou93q1atXSiQSur6+NvYFY08mkxY0SDLHt9vtNJlMLJBw95SUPfIObAB2lmdBpy3J2Fy+B0eJFIaf++CDDyzgYC7Mzc2NOY77+3vrNoX8gbUNhUI2f2e7Pc69wZl0u137/dgKzoZhmbBZ3B8p/el0anr3ZDJptRs+31ETPR6PrdUu6xEIPBZ5Uz/CYMRisWiDIzl/wWBQb968UaPR0OXlpclnCJT++Mc/qlQq6YMPPlAqlTKJBP4gnU6bXeVyOQNVniEWi5lTpWaCwI1nns/nuri40G9+8xvd3t7q/v7edNfSkRGG3ez3+yZHAIQ4jzCXkUhE2WxWw+FQm83G2jBLx5a2iUTCzgOgQ3ALa0wROrIHfCBF625g4zYY4HziCwEBwBkgpsYkEDjWNCDX2Gw2J13I8JucScAUqRuzYmD/OPfe9Xh5GOlhpIeRHkZ6GOlhJBiJL/iu6yfpUsgBJEXupt8ADw6wJEufw7jh3ElPFgoF7ffHrjowDHQkYiGRXPBmi3FwQGBvVquVFZ3iAGCEXGeHM2aBMUS/32/3S2tQChkXi4UVzqEX5+DTKSWbzdrBPDs7k8/nU6VSMQDbbDZ68uSJrQUD8Pb7vX7xi1+YJMJts4lR4tQ48IVCwcBpuz0OEsSQ+QwcJFpWvo5uulwuWwo7n8/bfAbmNjCPAfaR1qocLN7ukYiMRiNlMhlL/TPkEDbk/PxcnU7HGCbkDTBjOGoO1np97Mjz8ccfm03l83ml02lLZ9PNirTzaDSyQwdbzKEklY6T6Pf7KhaLFtCgSca5LxYL69hEMTesWigUsqJngprVaqXxeKyPPvpINzc3KpVKxrIEg0Fj35AINZtNZTIZZbNZDQYDpdNpmzUCq3t+fi6/32/DGmESAdDFYqE3b94Y+4fzJzDJZDI29HO9XtsZ5PzSSQpQx9EATgQQ19fXNsgSpi8Wi6nZbKpSqcjn8+nXv/61ut2usaiSLDgolUpWXwETl0gkrH6BtWc/AfvJZGLgHI/HbS8JQHw+38l8nFevXqlWq5l0BNYcBpOfoYX0ZDIxFn86nZ7oypfLpbrdrhaLhWq1mvz+YzF3KpU6ke1st1tjDwlM8As46+VyqXa7bfUI0nFeD8wdrCAyL84V7B7Mr3u28QlkEFztOkGidAQvshHSo3yCP9gvZ4jP5fm8690uDyM9jPQw0sNIDyN//hj5fdePfuHy+R7nJPCg3CCsFxtGyhbjR4PK4uIsSIXudjtbADYch4Ox86aK1tNNraLLRjOPg4Kl4/DBKsEEwoZgeBzCeDxuaW6fz6d8Pq9kMmndU3D8sDTb7fakyBFjq1ar+uqrr6zoFiar2WzavAhJVqjI4RiPx/Zs6/Wxk1IikTC9PU4ymUxqPB6r2+2qVqvpcDiYjGE6ndrndrtdjcdja0cajUZtICT6+UKhYKntdrutYDBozmw8HpsufbFYaLvdmq59vV7bYeXzf//73+ujjz4yBhGGgo5K19fXBugYNcwtOmq3hSjpZQpbcYIUKM/nc1t7nBE1DOv1WsPhUO12W/f39/rbv/1ba6VM0LJcHudYuAwuTh92ms9D3xyNRq1FLXaJw8xms8aC5XI5A02KUbPZrKbTqb7++mtJ0l/91V9ZATlsynw+PwGReDxuzCNOHgYLVomfzWazKpVK6vV66vV6qtVqZhPpdNqYcTrwcB6oA+FsxONxJZNJFYtFBQIBXVxcqNVq2YT4xWKhRqOhfr9vrGSr1bKADE15KpUyp/7ixQv1ej07NzB9sM+5XM58DQEfzDhBD3M9YCKpq3j79q1ms5kKhYKeP3+u9XqtYrGobrd70iK3Uqmc+AQccaFQ0Ha7tc5UzWZT+/1eT58+NSkSjOFms9HV1ZV6vZ6x8vzhHOKkg8HHmUN+v9/YaaQbBOjYFbY3nU5NMoI/mM/nFswmEglNJhOTvDCElDVBngJo4x/dGgdJdr5gvmHwvOvdLg8jPYz0MNLDSA8jPYz8STJcMFpsAOwdbB0HjwXlTRunjCZdkqX2+V4K+3K5nB0WDFaSAQRv+qFQyN6kSd3DrCUSCUs5InWAHSKFGgwGrVVotVrVq1evDKjYWFiO+fxxxsBkMjHdNwwULBUGtFqtdHNzo3w+b1pXWJVQKKS7uzuVy2VtNhsr2EXyIckKByXZoZvNZuaIhsOhtQVOJpN6/vy5er2eMXZ0GkLL2+/3NZlMrAvRer22AwAzBbOCpGAymajRaJj+n9Q6LUgLhYI5umg0qkKhoOFwqFwup1/96leSZAzXcrk0bfR2e+yYk8/nbe9Z8+l0qnw+b2wp9+T3+21ex2w2M505RdcEJjC8rB8BDNKSDz74wGQFpLHb7bZms5ntAQ4DqUs8Hrc2si4LPRgMDISm06mq1apqtZrtN4EDYEydgCSTM1SrVb1580b/8i//or/7u7/Tr371K71+/dpS5wBhqVSS3+9XJpNRu922Qtt+v69cLnfijOLxuEKhkG5ubkw6Ua1WLTgLBAKq1WpaLBbGCFEgjt3s93s7d6FQSA8PD7q7u1MgEDAgjEQiKpfLev78uQKBgLUiJoDZ7/e2brlcTvV63RyZqx8nGKQddblc1r/+678ql8vp4uJCX3/9tQWxAB8MGfcN81gsFq0AGAZxNpvZmtO1C8kTLY2xMQAPoCEzgIQlk8no/v7ezs1kMrEAlK+7GY7VamXBKkHAZDKxomjqHWBN8T98P2cYxrjf70t6rDnAxxJ8b7dbGxaLLMOVoLm1NtTbuIG+y0Z7ksL/3eVhpIeRHkZ6GOlh5M8fI7/v+kleuNAU4ygOh8fiahwsmkuGzfHWSqEZBiod2QRYO7TOLCCAT5EeqULYuWDwODE+EomY/hOtNguOHCEej1tak0nnsFCkNnmDh2Hka6S7D4eDbm9vzYnxvX6/X6VSydLsMDfr9fqkxeZyuVS1WtVgMFAulzMtKBpd2E7Sn61WS4vFwli6drut9frYUpbBjZlMxoYaApCkQyninc1mms1mxg6iiWb+AZ2q9vu9nj17psPhYFrXcDhsBbYMw8Po0PD2+30ryhyNRlosFjo7O5Pf7zf2AEeP/KTX6xlYx2IxZTIZ9Xo9Y4xIqS+XS6VSKQNxn89nTCjsGb/XrXuAAeRQ7vd7+zotQQFuSfrwww81mUx0f39vTCx6XhhqagH2+70VtJZKJU0mE0v95/N5PX/+XP1+39L2Z2dnqlQq6vf7BlBo/yuVip49e6bhcKh/+qd/0m9+8xubTUNdABIfl8GcTCbG8MG6Ii/ZbrcWbKRSKaVSKTWbTdVqNesyBNAS5OVyOW23j/M2cGxIZXK5nJ48eaLb21ubJ7PZbPTNN9/YeeVevvzyS3344YcaDAbGiBHErFYrA1y3/iGRSGg8Huvs7EydTkfD4VB3d3f667/+65MA9HA4mKyAzkcUzWezWY1GI5M3AQTMQCkUCvb7kMrA9OIrYOy3263Ozs6s2BeAjsViqtfrJk9AmoPfIpAGWPhZmF9qZJibxLnn2QiICZI2m43NN6LlsCsPIjigIBypBIGC67Nh9vDX/Cw2ze+nUJxmCd71bpeHkR5GehjpYaSHkT9/jPy+Gq7AZ5999kPw4s9e//iP//gZzA83xds4jCjG6d4kxWccTBd0MAQ2D+dHAafbTcllC4vFojneQqGgSCSiTqej1WqlTCaj/X6vXC5ngBSJRCylyAYCMsgx3ALjYDBoBxHWEceOJATGgM9iM6PRqCqViskr6FqFjleSsXwwCNlsVrVazaQm3BPruFgsNBwOFY/HVS6XjQ1zGclKpXICqty3q5Wl5SoSA54Dw2m1WvZvnU5H5XLZnMVm89jN5nA4GIu1Xq9PWs4iOymVSsZWxWIxSysDwLvdzuQnLjMDaxSLxSwt7/P59PDwoPF4rOVyqV6vp3Q6bX9ndgOH3z3YFG2n02l9+OGHkqTnz59rPB4bW8HQvVAoZBp4wI7DNZlMTgo5s9msnjx5crI+pVLJWMhKpaJOp6MvvvjCgldsb7vdWgtbnAuMeDAYNHYJfTvMIfpz5prADFFEij4adq1YLFrggvZ+OByq1+up2+1avcNqdWwx7Rbcco63262xu+v1Wm/evDHm9e3btyoWiwZ4biqeICGXy9n5eP36tWUAOGfpdNpm0FQqFd3d3SmVSqlarVq3Ith3Aj6CGQYvUnOx3+91eXmpTqdj8hN3KCTZgtXqOLj08vLS9gYnyxlLJpNKJpMW0ODckUPxPUi50OwPBgNj2gkWkXvMZjPzXwynBdTdi6CRPaVegJoY7BsbhTFkPgvnm/0AuAFYzjB7QCDh+o9AIKBOp9P47LPP/t+PAo736PIw0sNIDyM9jPQw8v3AyMlkot/+9rf/97uw4CeZw+XqxXGeFPi6BaI8KKDABpK6I12YSCSs0xAHJ5PJGCjBDrKAMFnSsSC40+moVCrZXAPSvP8VLJzcO4cK5qBYLBrbwR9JJtXAISMXQIfMfdAVBZYKo/f7/WZcu91OZ2dnko4FgOfn5/q3f/s3M9DpdGqskCRrRQoziIM7HI5TynEcsICkOrl8Pp/S6bQZR7/ftwNIAfXd3Z0Nq2s0GsYi+Hw+K7pMpVK6urpSPB631CwA6M7dwMExi4GCwsPhYO1aO52OZrOZyUlg+jqdjgKBgDkEimUpnGS+DM+SyWRMZgED1el0tN/vdXZ2pnw+b9KG7XarVqtljkGSer2e7u/v1Wq11Gq19Omnn1rKGxuBIWQ44WKxME0xLEk2m7V76fV61haWgAnHf3Nzo08++cQKrAk+mP+CE0faU6/XlUwmNRwOTR8Ne8MZYB8Aamw2HA5rNBoZq5zP560DGFIAtPcugNMxzN1LbMHV6iMbWq1Wenh4kCR98skn+vTTT81+M5mM/vjHP1pL58PhoC+++EKRSERXV1cql8sWPLJH/J3idPTZnJdvvvlG+Xze7osieWpPttutsbAEkO12W7vdTqPRyLpFIX8A1GDPABqACHCgyB8WzGV7CdKCwaD5LP4NAKaAlxkvBB8wrjDtsJCbzUbj8dgC3mg0antGBoFABKCnID4Wi6nT6Vjba2o++B34LPwT0i2YcAJAfob99uq43v3yMNLDSA8jPYz0MPL9wMjvkxX++dzXO1yuk41Go3ajPp/PUrosKg9MCpafQfIgyXSXpCsBHd5oXQd/OByMDWNDWBBaS6J1XSwWNq0dNmI0GqnX6xnLR4Efn5XP5607z/n5uTlXGEBSkhwyQGk2m510ZKG4sdVqSZKq1apJIdbrtRVA0l5zv98bg8nb9WKxMElENBpVsVg0p46enSJR3vDRVaMZ3+12xhwCAIlEQo1GQ41GQ9vtVi9fvrRDzbBM9NSLxULT6VS3t7c2g4O98Pl81j2oVCopEAhoPB6bPaCpv729tVaibiABCLbbbWP+crmcDakMBAK6u7szm0DmkUgkFI/HrbNVKBRSoVCQ3+832Ua5XLYhhTCJMB/r9Vq5XE61Wk2hUEhff/21AVqtVtMnn3yiarVqmnNsPhqNqlwuG2gSuLBn2+3WCtKbzaY5xM8//1yRSMTaGqdSKZMDYPdomZvNpv0utxCUwA05jvQ4MHA0GplEJR6PKxwOG6sEEwgw5XI5LRYLPX/+XMVi0SbOo9VvNpvmDGHV+HsoFDLd85MnT9TpdPTP//zPuru706tXr4w9urq6UiRynBFDDUksFjN9/X6/14cffqhf//rXtg+r1crmh7RaLbPj/f6xVSuBKDUcOD2CQBxou93Wzc2N7dFmc5zfgpwDH+H3+63rEeCDNMhlSvmMxWJhbYoBZPwbTp2iZ/TuMK0ACLIQzrDbUQn2HJZ4NBoZcBwOx2J5gBQwoBibmUPdbvekvgCm3ZUMwdhRZE8htcv081ze9b+7PIz0MNLDSA8jPYx8vzHyJ6nhQv4As8YbXjAYNEeDNhr9Nzrp7XZrDGA4HLbPIh0oySQW6KxJA+OoYTK4GD7osliAnVtkCrPS6/UUCASsYJGNYT5BOp22t2/0z6Q8Kf4D6OjK4/cfO8zM53MVi0V74wbYSFninIbDoa0L07TRHKdSqROWZjAYaL1e2/BAuiqtVis9ffrU2AW6zJDuXq2OQydhqdHrIjlxWTQ37UrnFtaNIk8G7pGaRu5wd3dnXYMCgYDy+bwFEDAFDPRrNBpWMHk4HFStVs3pkJbfbrfmjPP5vEajkdLptHq9nrbbY9cn2gXTXpe1XCwW+vLLL21eAmwRwQCsFAERNkfxMM9Hi+ZCoXAi4YB9BDhISdOud7c7DvOkW1e327Vhhblczpw9rPNgMLC2z6TYb25urKsW8iOXseYeOQcwfOz7fn8c3Hl3d6d4PG61Eq9fvzamrlQqGcs7Go00GAysVTDBIBIAVzMNYxsKhXR1daVGo2HsMg50PB5rt9up2WwqkUjo6upKq9XKpCmcNYDA5/Pp7OzM2FIK/rFNOk6Fw2G1222Vy2U7T51Ox4AQNu7zzz/XbrfTRx99pOfPn6vb7dp+uM7UDUYnk4nq9bpCoZDValQqFYXDYUUiEQv0aHncaDTMb3U6HVsjSXYG2S+3EJzAeTqdqt/vmy31+32TbTEniYDVPcvsfzgctsCFsx0MBjWfz60bE8HMZDI5ARgYUgI0WEiYZXwGMjHvevfLw0gPIz2M9DDSw8ifP0bSoOO7rp+saQYggmGThoPB4i0dJ4yOGRaBnwc4EomEOV0OOs4abTrpdHcRksmker2eNpuNvWG7bFY8HjdGCR0yB4auShhCoVDQbDaz1CgOnZ+BtcNpouuMRqPKZDKaz+dWuIdzqVarZrRucSBMFGu0Wq20Wh0H51G4vN/vrQvPYrHQ27dvlU6nlcvlzLCy2axt/HA4VKPRUCQSUb1et58nPYxumXkaPM/FxYV8Pp8B3HQ6NcNijXkmahPYG54DJopnIZggHQ1zRecqilFJqSM3SKVSJtcIh49zSh4eHozhgBFJpVL605/+pFqtZvNGcCqhUEiZTMakCPP53BzVarXSxcWFMZFPnjxRtVrV27dvFQweu3FFIhFjCdH1wiiHw2GTPiyXSxUKBW02G7VaLZVKJfX7fXvubrercDisZ8+eqdPpWNDAbI3r62tjXH0+n0qlkmq1mvr9vhqNhs7PzyU9ynIIIGj7DEgDikiXYOzOzs5sbs9gMFA0GlUikTDdOzNpkLUsl0tdX1/bvm63Ww0GAytW73a7xtYvFguVy2X98pe/1O9//3vl83nNZjMNBgOTPdGtDJnIYDDQxcWFEomEFRmHw2Hr8AXwr9dru29J5hvq9bpKpZJpsMfjsZLJpElGaHl8OBwLf2E0kRjQWhqbI6CCOUMegSQCSQIMLT5kNBqdFAIDyovFwoZ7EuBgOwTD4/HYiv1d2RlBNi15eXa+j9oYfpZGQh9IAAAgAElEQVRgDB/E82w2G5PCRCIR5XI56zBFYOKycwAL98f/8zXv+t9dHkZ6GOlhpIeRHka+3xj5k0gKMWTXSfF3NgD2zH2rhTXCUaNPxVBhyVhcJBc4dUm2QbASOCvSm+l02tgOtMb8fpdRcwuX2cj5fK7hcGhg0ev1rLC02+1aMajP51O5XFatVlOhULCZBBgyAMEbNGlNn8+ni4uLE/bYHQqIfls6AjbPTKFvNBo1ZuPp06fGOtApqNFoWAEjvw/giMVidigoEsRoU6mUGR8OdLk8ztpwO0oVCgVlMhnbG4qRI5Fja1GGB5L6dpme0Whk6wGgwcSVy2Xlcjlz5tzLbrez7kM4coqUGXI3Ho/NaUiybjyAQTAY1NXVlUKhkIrFoh16isjX62Or43g8bvMeKNx8+/atTY4fDocmtRkMBsagzGYzayPMPUYiEbVaLWPper2eBUGHw8G6ja1WK2NaKHz2+/2WTqdL2O3trRXBU6wJY0dws9vtjE3EYWy3W5VKJVWrVT19+lSlUulEm4wNwPTB8IzHY3U6HasR4ExytpPJpAGoJF1eXqpWq9lZo+NYPp+3TlKSTPZCAAYbyP6+evXK6gTOzs7MYa7Xa3U6HTUaDbNZipz5LNg89PkAVqFQsCL+1Wqlu7s7s3kYL4Jdl9lLJpMajUbq9/sWYGDrMKfBYNCCPwIk/EMwGLQA2mXb8EFuy20Aw+/3W0czSVbjglwER+8y58ggSqWSJFnAACiNx2MlEgmTomEjZE3wjRR6A3ysM+y2d73b5WGkh5EeRnoY6WHkzx8j2bvvun6SFy6MwtWPf5tV+Da4oE9HYwtrwOfxcHweGmtS6aRROegUZ7LYfA1g2e/31vYWJpA3aelx6jT3ig57v38sCAwGgyedeHhTJpWM4+/1enawACfS1n/605/UbrctDUpBMqwGQOLq9+nw8vDwoOFwqM1mY88bDAb18uVL7XbHlr9fffWVdSFCq47DRiZB6pWWrBSHSsd2wzh99LiA4cXFhQ3cRFfLzwHirCldbmAzKMrEWUynU61WKzssdLxx9fPSIzOMVGM4HFp7YBg2utjkcjlj63AsOANqDyjYTiQSJs/o9/vWsWqxWOjFixe6uroyZgknjz4XB8kh456x+91uZ8W5SC1g6mjrezgci2UZkOgWegLK7MtqtbIZK7vdzlgt0thusBUMHjsSoc8mIJrNZqbNp3tUIBBQoVAwB4qdZ7NZVatVmyfT7/dP5qlw0S4Z9jsWi1kgEQo9zr4ARLEjzsl4PFa73VaxWDTAxOaDwaDZE0wutkjBKoDA3iKvgeGnZiWfz9vPwLoCCJFIxCQFoVDIZu+Ew2Elk0mVSiWTl+CwN5uNOXtsCnkRQZsbOAPWrDtgzzPBBJK1wIZDoZBJuWD4OQfr9dokZzBuAAFNCmgwwGdhX0gyCEj442ZR8C08C4EG/tK73u3yMNLDSA8jPYz0MPLnj5H46O+6fvQLF04NJ8WFE+UNGGeH9pnv4TNYDHSibuqQQlYOLRIGDIm0KBtC4RwbV6lUrIMT7Bgp9kqlonQ6LUkn0g6cL6wU947+lA5D2WzW2pTydr/dbpXL5U5as6LNpjB4OByaQ1qtVna4eDacA0YHE8gQQwor0VA3m01jysbjsQaDgTk3ZA4AMgWjOHpXH4z2HObQ1dU+PDzI5ztqtNGsY9SwQAA5WmmcN/uNfAImEecHk4DB45AozIRZJyWOrr7VaqlSqVjXqGAwaAP6cDDcPw620+mYk8dRw4xIR83u7e2tyuWyrq+vVSwWrUOS29UL1hknC3sGSwwz7Pf7bbjlYrFQvV5XoVAwpw+TAxi49kgg4AJaMpk0CYnLuiHlIT0P00qKPxwO2/fhEHe7Y0FtOp02CQbn2OfzWbtgHCOyGSQd2+1W7XbbnNpqtVKpVDJGrNPpWP0GzwArhcwHtgz9NAwhbXG3262++uorq0fAZ0iywGC321ndBsEj60VtCd+HzQHMkUhEw+HQ2LhYLGbrB7it12sLHGCEeV5XpuLWSOCzkIEQpBKUSPr/27vy6Lir83pn31eNNBotlmzLNsY4psQrhgSzGBuMDYQlTWh6TmtImxQCDUsoISVpSxsOZGvJH06zkBKawGFtWVowkBwIa8DYBmyDLVn7MpoZzaLR7P1DvZ/fCFm2QA7YfvccHYw0y+/3lu++3333fZ+cy2HfUFkkAdAaxjnMNic5kiDVv1cqFcTjcVFG0+l0lY2N446LYMZX9bNLpZJ8PgmFcVFjetAcqTlSc6TmSM2RxwdHst0nw4ykhVfVJNUWofrWeVEMjnx65JO6agtgsGFnTHbYmCTF96mV1jk48/nxCtrNzc3o7++XA45UnvhUzBoDJDn+l5PbYrHIFiT9nlTwOEl4PQwyDocDFotFbA5UKT0eD8bGxuthNDU1SRDn5GQAYGBQi7mR5KhccNuVh15dLhfq6+sRjUaFSDnwAYgKQ6IGIBOd3+/xeGAymeTgc7lclsOq2WwWs2fPFrUpk8kIcXPLmJ7o2tpaUTC59cogw+DHH9XioZIblZG+vj65HuCAauT3+8V/z8nMBQy/gwRGdYwKELMDsZYLbSQcZ6Ojo9i7dy+sVqu0CRcaVKl4IJpjiQGXWXH4O47PZDKJmpoapNNphEIh8YPzWmmboX0kmUxKdjEeAOecYABS5wAXDDwYSuUomUwKOXPBzIxXtLLwfWrbqxYim80mJErQqqKqj7xf2lqY6lb1wVPl4pxRPd1UbklY9HUnk0lZLOXzebH0FItFsQtwsUerDOMSxxTvmWc0eHDZZrOJ2uXz+aoWtlSz2ea8ZoKLN6/Xi6GhIekLqthGo1HiB2MF7V4keYvFIjVvSDRsEx4iJ6mWy9WJEfgZagzlIkklC8Y1jmOOC9UqpsY+vo/zh8roVOqdxuTQHKk5UnOk5kjNkZojZ+SBSx0UfMJTiYVPfnwNG5MDi9t1zPTCBuKF80a4XTmRXPh55fL4QUdWkI9Go/B4PDCbzXIQuFgsyoSNx+Ny6BWAKGFqJigGH7vdDqfTKUGXk5qHIfmUzglLxS0QCMjgotJDvy79srRDAON2ARbD42dks1mZOGwDtgvVlnw+j87OTsyZMwejo6OSUYf2C6NxvJ4IB4bRaJQAwyDGrWO+dnBwUA7kejwe8f9TXaWixu+nn5nV3KmisH8BiNJD5VHd5iU4Eai8xeNxyYI1OjqKdDqNVCqFsbExKWZZX1+PbDYrtVQ4Fvn9fD/7n9Ya1lgpFotSTI/XYDQaxS9OW0GhUBAy48FWBkCmP2bf8CAsMyFRdaQCzXFuNI7X0GDApCKWyWSkUCX7xeVyIRqNwmQaP0zP7XYGGy44mF44kUjA7/dXqUDcpjcajVWLAWaZoqeeqhAtN1wQ0YbD8dzU1CTWIo4JWg+YBWl0dFS+n/OLQZZKtslkEi+2mnXIZrNh9uzZ0l51dXVVNgkSIFVjErrFYhGfNgmN4x1AlYLF+MEUuCyqycP7JGGqf2raYfY5Dxuryi3foy44Sc7cRSiXy2LtYLxgLR4uLEmqnG+Mm/w7YyH7sVwe9+wz7nChw8U7xy4AmYMc+xMX/1SpuUDRmB40R2qO1BypOVJz5PHBkVNhRs5wMcCxQ/gDQII+L4aThzfCp0nggEecQYgqDH/o72Qn8t88yDc0NIShoSHYbDaxMtDrCUBSb3Jrkof76KXn99JXTwLJZrOiEJCcuA1O24Xf75d6GLQC8Nqo+NTV1aGmpkYCC7dZY7GYDCr64mmbIHFwW1TdwnW73YhEItLxw8PDsFqtMsh4yJHKAL/X7XZLNXNu+1LlowpjNh+ooVGpjBe/pJ2Cbc8+p9pK7yy3Z2kboKWC/cfBy/+nXYCHE7ltS2WF9TVIVkwPynFH4nW5XDJ56WNXgywXLBwPAMSqQrLmljUVHvYnyd5iGU9PykUU1Q+z2Sy2A76OY9rr9aJQKCAUCskii2OCCpvFYpGtfqa9pY+flgAGFY4T+oWpPKt2BraV6kXmuEylUqKuc56RlBgI6Tnn4WSqs5wHnDNMx8zFeDabRTQahdfrRblcRkdHhwRk+vo5/thuauDl/ZCwAUjmJh76pw2J5MA+5nkJBk4St9FoRCgUEkvPxIUZxxDnDccALSW8dxaqJBHwvqlaqzGQcY9jmGo+38v+5X0DkMUKyYbWBRIF56RqN+NCjud2SLIkZra7ehiZfct2m7j7QEWZcZoLEPaHxvShOVJzpOZIzZGaI49vjpyxOlwMArxJqia0CqhPg3xSZEcxOPLJkuRAMuE2HRsUgDxxEybTeAHB4eFh7Nu3DyeddJJkXxodHRUPOycvt6AZzOhJ5RMwX6f+qMEtlUphYGBABj2vjVv+nMxsE/pVY7GYBN62tjaUy2XxtLPWBlUOvp9bwYVCQQZBLjeeDreurk6CADMNUd1xu914//33ZcJyoHJAWq1WBINBGcRUzVjLQt1appLC4GI2m0UloCLILWfVpsDJa7fbpYAgLR/8Tto3OFZ471T9mBo4l8shEolIgOAixWgcP9PAA99UJhi0OZbolWewoI2Er1eJm/3PQFIqleQgptVqFYsEVR2LxSLXQJsN7RjsR1oiVFWHpMlsVEajUQ6M0/qSzWYlGHIBxerqVHRYq4T3wPul9YNqWzabhd/vFzU1FAphaGhIgj/7jWOM84N9RDWOZMQD5bQecfFA5Vftx4kWKvqwae+gxYfxpFgsSgpnjqOxsTEJerTYMFMbFTUAYnOKRqMwm82iRo+OjoqVitnQGJcm7uCwjaiI0tvPQ+JsXy42qB6TzEneXMDwvYx1HAeqLYFjSc2QxnshgfH9KqGp9gueqeE84XtIDCRvxk++l3GJ4BzlPfJaNaYPzZGaIzVHao7UHHnsc+QRtRQCB54i+bTHziYZ8MLU1/IG1MNo+XxeOlB9uuTnMKDRAsAnbiooPp8PbrcbPT09UhF8eHhYFEGqQrzGYrEo9QVIJOVyWQ6QjoyMSCAfGxuT+gU9PT0YGBiQCUoLB5+QOWm4hTs2NibZfVgJfXh4WLZ+BwcHEYvFUFtbK9mIqMZRqWQwIzGwgzOZDGpqalAul6VAZD6fR2NjIxKJhAR/PuXz4CDtAlR+aPugiqQGYAYxqnwsOMin/lKpVJVtiWRMWwaVM4/HI4c2ubiw2WxybwDE2sC+Yn0Yptal4qPaOqguZDIZ+Hw+uV+qOyRQ2h9IFPRn8z7ZH1x4AJAzBWxXjhd6ta1Wq7QR1Uv6vJ1Op5BBU1MThoaGUCgUUFtbW0VOHDvqZOd45RhzOBziO2bWqGQyKW1GKwj7c3h4WNLvUj2jqsqUsqVSCYFAAIVCAQ6HQwqI8v65KGIfcwHCfqZlhgsEtgdtBlQFGYA457kAYJpnqpG0KFitVsRiMVHD0uk03G43+vv74fV6JdaQlJgdioTEvuC5hGQyKX3OGMSFLschxzjHF+eL+qDBRRVfA6AqExwJmfdNSxZ/R0KmxYaLW/6X6jNrllBZY+zkYo39wFiq2ic4t+x2uyjcjHVURhk7VAsGCZvgIomvVxdbGtOH5kjNkZojNUdqjjy+OXLGHrjUrTQGInas6qPkj3rIjAcgaVHgkyqDO1/LrUO73S4dTtWLqgoP/6pZfTKZjKSKHRgYkG1Vo9EIr9cLp9NZNbjYkNwy5SCrVMYrj+/duxfBYFA8u1T2eDg3l8uJ9YOEBUDUG241t7e3Y+HChejv769K1Us1x+l0SrvwKZ1qAwMHCZRtkUwmcdJJJ6FSqeCdd96p8qPTb6/aEli4jkoUr5fqQyKRkInPtimXy+jv75cBSv8tP4eqLD/L7XYjEAhIG9LLbLFYUFNTI9vvqtLJgMj/54RLp9Pwer3S9nwP61ykUikEg0FRQvL5vNQXoTrKVKEAqrIaUQVkcGW/AxD7C9uMXmgqOvn8ePrmYDAIn88nKm19fb0UQgyHw0gmk9K23ELngoTfw3GXz49nDSKZpdNpSfdLYuOWO9tGVWCpQjKAcUzTB83XciFF3zwXFiQw+u/ZdpxrVOUZWJmpif3F++Dc4AJDVeJZw4cKbalUEo8+vemMI6oFJxQKIZPJCEFy/LItR0ZGhJjsdju6urokHS8XOyTUTCaD+vp6BINBpFIpGVf8OxVKALI4sVqtCIfDiEajVWMwFouJUkd1FjiQZY5qHscXFUyj0SjqNYM64wX7jkEdGPfvc7zwvAgXiEw6kM1mJdZx0cLrYFuqBE+S4bhn2zEG8rUa04fmSM2RmiM1R2qOPPY5cirMyANXsVgUewCDlOp3VImGjcUfelFJPrwREhB9nAyovGEqf7xpqgUul0uK37ndbuTzeSQSCaTTaRkUg4ODsm1J0qI6oW4Bc7vTYrGgqakJAwMD6OjokLogDOiqt5z/ZSDggODg5d/7+/vR1NSEwcFB9PX1SQ0GBiaqByRbq9WKkZERqWhvsYwXRozH4wiHw/B4PHA4HBgZGcH8+fMxMDAg95XNZiW48MCkulVLMmQfcECRIKgslctlqYw+NDQk10EVgf3GvjMajQgGg1XKpap0MOVvNBodH4z/H2yotNHCwm1tq3W8UOXIyAjmzp0r3n8qIFRP1e1qtiMAUXlJcl6vF4FAQA6kqgoVFSC1GCXHHlUl2iFYqJDeciqwLO5I1YbKJACprj7RjkMl224fL9jJayNpVSoVJBIJjI6OF+JkAVGqfsViUc5ZqJYGZhkzGAxy6JUH1+12uyhVzEDENqMXX1U6AQj5MViaTCYJxH6/Hw6HAw6HA4lEQiwODGYMUHw9gyzPShiNRtTW1spCIJfLIRaLoa6uDsPDw1WqP5VPBmKOA8YjkgGLTHK+c3GrLnJJhLxOxjPGHbZfLBZDuTx+4DYYDGJkZASVSkWIl2o/s0Wp6hu/jyTP9uPc4z1xLAKQsUi1m2OPC0Qq7mNjYzK32ccmk6mqCCXbV42jHN/8N+c6xyVJvVQ6kHpaY3rQHKk5UnOk5kjNkcc+R06Fj/zAxYDMLDOqr5sTkAGKryXBsPP4BMsOo4JHhYedQeWLn8vgSJXJ7XZLETMOKBbR4+tol2AnsiOpQKiKGz3Gra2tiMfj6OrqkkOJVJw4Wek7pnedk4aDYmxsTDIH5XI5UaGSySTK5TICgUDVgUtuH6uKCQMqlUwSGu+3u7tblKW+vr6qOiYjIyMwGAyiUpEsVcXDYrGIAkPVioSUz+flcLTJZEIoFJI+5kDnd1HpikQiKJfL6Ovrg8PhkMFOTzyDHxUzbjdzsNMfGwqFkE6nMTw8XBWkAMDv90sgJZGpB2Lpp2ZA4za0utVM8uI2N4sR8kwAJygXBVRX6W8nYZHU2FbsS5fLhXQ6LeNB9aVTsbLZbGIZYlG+fD4v2cLy+bzUQtm/fz/q6urkTAJtPYXCgQPjPITMxQv7iH25b98+tLW1SSYoplWlDYSLQh4w5SKGKhsLHcbjcfj9flFkGeyCwSAGBgZgt9uRzWaFVKi8ErxPtivtHDwTwL95PB7x+vM8Rbl8wEPPRQpJm+3MxUptbS1SqZT0u6peMaU0bSQcewyiXODwe+mTLxQKsvCw2WyIxWJVChljTKl0IEUxxykA6XsSN+ed+jveA8+G8DM4fqhCT4ydvGamDeb4dblcVRYxLvzUhdREOwljMeO7xvSgOVJzpOZIzZGaI48PjmQsngwzkhaeh9b4w210bpsTaoACICqVeviyUhk/+Kduw7LjeHN8EgUgT6psHG7te71e7N69W7bh1cbgJKctg5/HDuTnWSzjheVGRkbQ1dUlFgmn04l4PC73yEFHlSCXG6/bYDKZpC4EK3MzxW0ul0NdXR0SiQQcDocU6mPdAx7q43YzPb0Oh0OUHQ4WHk5MpVIYGRlBNpvFvn375Ane5XIhEonA5XIhEAhgaGioattWJW4GUKoqPp9P+o0pYekZ52Sh/YJ+Y04Aq3U8/afD4ZDsNYVCQdQNtlttba3U26AaQUWWW8EMyPQ/q2phPp9HIBCQgM1JQgWCKiuJQR2XPCzLg91UlHhGgSlI2S70nTscDjk4Tb+yxWKpqu/CbXmqTaxXwf7k53GxwXmQy+XgcDjQ1NQk15PJZCRFcTwex9y5c9Hc3CwZlnjIFTjgLTYYDOKBp4WAB2Gz2Sw6OjpkcRGNRkUtY4Yml8slCimVJS5s2BeJREKU6mg0ilAoJIeVw+EwhoeHJ21DjjuPxyPKotvtlixiwWBQMhsVCuOHVLlQooWCgY2qEhc0FosFiUQCJpMJ4XAYAKReDm0UVMWpKpPoSRBUKZla2Ww+UGyTRRsZ6xKJBHw+nxAtY1u5XK5KwayeMeH4V1U5jgMecFbHMZV8kjaJkDse7BseEuc4DwaDsmDgeGT2OI5Tqsi0Q6kWCTXGcf5oTA+aIzVHao7UHKk58vjgyKkwI+xJryUDPrd8qY6wcziZqaKpN0W1QJ0c9K2rT9v8XNVTWSwWxRpB76rRaEQ8HkcoFJKBxcFhMpkQCASqbBfqwVcGBio77e3tclBUXXhQbbHZbKLKUZ3gwJk7dy7MZrNMUCohc+bMQTAYxPvvvy+KD7epef9sH3qf6S1m0KFHOpvNIhgM4r333pN0uSMjI/D5fFXFKyuV8UJu3D7lVjC3W7nlqwZ7Ti6v1wur1SopbdVFAAe/GnSoMgCQjDUkcL4fOJDlheTBgEi1xGg0CuFSAeF3A+OFKr1eL2KxmFyz1+uVMRmNRuU+6ecmMTMwUOGj0sZx4vF4JJhxzPF+BgcHEY1GEYlEJFh7PB75/3g8LmONthmv1yuBTrXkcJLyIDRJfnh4GDabTVRctnVLSwvK5bKMaQahiQRLZcpisWBoaAgOh0PqjagHsXO5HGbNmiVnJBiYWH9kZGQEACQYuVwumRvBYFAUdWbYcrvd6O7uxvz580XNDYfDsphgX/IgL8coM4jRagBA7pltQDJkcKbyy0WOxWJBKpWCz+eTbFiFwvhB2mg0CrvdLsU1eYiduwIMpqptTg3cLIIIHEjPzdfHYjG4XC4Z67x+Lp5Iopy3/DcAWRzRHkMPv8lkksUOlVTVJ656/ammU0HmvDKZTNKmVKep+vNcCQkaOLBwp8rIBTwXKPxujelBc6TmSM2RmiM1Rx77HDkVZuSBi6oLVRsAEmzZYCSZSqUiBzK5NcctegYbWijoKecTqapkqd89NnagOCC/kwoBG4aKF9Uh/nBrnt/DhmZNgVQqhUAggLGxMelAbq8DqLLZsEM8Hk9V57pcLrS0tKCzsxPhcBiNjY3o6uoS0nG5XOjt7RVy4wSkssCCd1T8WCulUqmgublZbBChUEjukX5jHtyMRqPI5XKoqamRLXiDYfxwq6ooMHByArndbqTTaSQSCVgsFkkdSr89r5nfyXZQ1TISGScCFQdui7PwIf8fgKharK3BYKJm2qE9g4GEKiInt8FgEKWWROJwOKpquHBsceLwWump56FZYFwlYrHEbDaL5uZmsYHwx2w2S9Yhet9ZM4SLDtoXeOCUZAocSJEMQO6dr+E44Os47rlYo9LMucLDsqrKSkLgnKTVaHh4GIsXL8bQ0BDy+TwaGhqwf/9+eDwe8dqTbEwmk/jY1WCsbtc7nU7s3bsXfr9fiJUHcumrJrmwbalgMUuXqrIHg0F0dnbKooC2HS5S1RogJKp8Pi++dJ4ZGBkZkYUUME6AVONHRkZkTNGexP4gkXEsUqkmaVCx5/UyHpIMuQjieQ7OMy5ueIAagKhqXGSqmbNo62FM4/2zj/kZqnrNPuN1qEoe+4/fy8+j3UT9HBKLxvShOVJzpOZIzZGaI499jpwKM2Ip5CTiIGVgZ2Cj8sHgzq10HjDkDwmGXndOWk5oKmpsLHphx8bG5AmYaV4ZSPlUTu8zlSwGKqpWlcp46kw+7fKzvV4vcrmcpErN5XIYHBysmsTAuE+a27hOpxM1NTXIZDJCPpFIBD09Pairq0MoFMKePXtksNLjykFbX1+PfD6PoaEhCU5WqxWtra3iEadaMzAwgLlz56JYLMpWPgeM3W6Hz+dDf38/AKChoUFUIR6kpDJDe0AoFJKJyNSt7B+DwYCBgQHxAZMQOGm4IODBX4PBAL/fL/5wn88n7yuVStKegUAAPp9PvPr0L3Ngc3FgMpmE1DlpVTWGkx2AHPDl/5PcGPT4k8+PH9jmwVMetmXmGvU8BRcqTMFqNBplrHo8HlFC2F4OhwNer1fOA+zevVvU1kqlAr/fL4TNQ/EMULTPUMHjmQiOOR5KzWQyKJVKsvBJJpOSMplKNG0uo6OjqKurg8PhQE9Pj/iWqcYlk0nU1tYiHA4jnU7D7/fDarWiqakJiURCDjiri7ZsNgufzyd9H4/HEY/H0djYiGg0KsoQlUoAkjGNpEvbQiKRkDMbiUQC4XBYbA30vtMHDkBUqkqlIvOUfUrfdX9/P+rr6yWQU3Gjj9/tdqOvr0+sCbwu9QwDfeRsJ5JnMpmU+GE0GqXeDxdeFotFFko8nMs+5nynx54EAaBqUcm2prqsFqblnOMYJTGoOwEkHXr7VaLgAo/kxu/h79hWjPOq0qxx+NAcqTlSc6TmSM2RxwdHToWP/MBFXzK396gq8Ca45c2nSD4F8uLVwED7AQBRVEgiHEz0XVLlK5VKaGtrQ6lUwv79+2EymeTJ3uv1wmQyyYFJbiUDqPLEs44DfZj0lBaLRakHkc1mhcyoNHDCMnjTc8sn+ng8Do/HU+XPzefz6OjogM/ng9lsxqxZs9DV1QW3242WlhZUKhXZ2ubTeaVSQSQSgcfjwXvvvQeDwYBgMCh90NXVhfr6eoRCIdn65wQfHBwUny4nLQ8sckDHYjEJCgzkaiCmYqAqTvz8bHa8OGIgEBBfP++f/mWz2YyhoSFRa0hCxeJ4il5mcyoWi6irq5OzAiQeDnbacHhGgAUELRYLenp6UFtbK203Ojoq/nH6ibndzOBjsVgkc+wyjo0AAB9jSURBVNfo6CiCwaAQCdWQZDIp44yLlUwmg9raWjk/wPFKuwknM1VhZuJpamqSFMNjY2NyoJfFCTk/fD6fkGY4HJYgSa85CZSZjlh7hD5ktiUJr6amBhaLBfv370exWERHR4e8zuFwIBKJoKOjQzzwTqcTPp8PLS0tiMfjsFqtaGlpQW9vr2Su6u7ulmxeTBnL1L5qBjLadhhQ1aAKQFQqztNKpSKHVrmdb7PZEI1GUalURImz2WwIhUJC4OpCgQoh26C7uxvNzc1CnOrClwo7FzhMM8174Dhg3KpUKuju7pbYoaaC5vjnAoqLwGQyKYotVUMusjmf+BlcjNJyYTIdKBxaLBZl54ALNCp6HC8kUt5fLpeTuU1FkaSh2oZ4gJ6xUY2VjF20uGlMD5ojNUdqjtQcqTny+ODIqVwgH/mBi1uL6rYnL4hBNp/Py8FBkgsHE7c4qayxE9QBx0GvBnwOCA5uv9+PQCAg6hEVv8HBQQwODsrE7uvrQ6FQwKxZs6QT6FNlx/X29op/NJlMYsGCBahUKujv74fdbpeJYjCMH+RLJpPo7OxEXV0dTjjhBMm2RF8wJxGDBQ+gDg4Owul0ora2FolEAqVSCZ2dnUgmkzLomUVp7ty5SKVSSCQSqFQqcLvdUvytq6sLixYtgt/vR19fn6ipPORJZbKpqQmFQgEjIyNVnmBgvJBcKpVCd3e3nBWg0kC1jkpTuVxGT0+PDEC73S4KJ5/+s9mseIBpS9mzZw+8Xm9VBh9+F3242ex4LRH2PdUy+rvp2SUhlMtlWSyQvAHIYWKqKCQxdbtaPcBeLpeFeJn9h/UjqJ4xaHPruVgsiteYY5e2CS5Oa2pqRAlhQU9aaLjYokLNsURiol2C5xK4cBsbGxPlb3R0FOl0WhY1drsd/f39kgY2Fouhvr5e5mS5XBYV1eFwoLe3V66jUqmI0sNsYCR7t9uNgYEB5PN5uSfWzuF5ANocbDYbBgYGpN/ZX7RysJAk1WsG73K5jHg8DrfbjWg0itHRUdTX18Nut4uKSfsGyVddUNASpWaTop2pt7dX+oexhv1JmwdVPSqAtEzQikKvPhVlZr9i3OPY5rgaGRmRxTCVN2a4YnuTSDkXON6pHBYKBVnc8ppo7SsWi3KGgO9VzwgVi0X4/X4Z/4xBBoNBxgznAz+Tc5FWHCqevEYuQDUOH5ojNUdqjtQcqTny+ODIqWyFBtoPPixaW1srt9xyS1VgMJkOpH0k2ajqnvrkzkZTf0fCoXLDJ0Y+efLG6b0m2ZBo+D18olaDIBUCHhak95dPpnwPO9tkGs+iZLPZpAAdn9y53UzPMWtWcHtcHQwWi0WUjFKpJClEGcyHhoakqBy3eqlYBQIB8dkODQ2JvYLBiRMwEomgt7cXJ5xwAnp6esT3TIKvra2FxWKROg0MPjysTMuF0WgUu4jqTaWlRfWsUhGhV5kWGYJ9pqob9OWzvdlvfB3HARVE/tC7rqp7ZrNZMv5w0nBi8X20ZqgHP9UtadVfry4qstmsXB8nIlVddfHJz2CQouWB38P/cjHE7W5eLxVhqsPq53NyU2XimOU5DdXrTAWKNhXeD/uSyh/POnCcUnVWrQAulwtjY2OSKtnpdMr92+12mVe03aiLOHVhSNVc3ZKnt5vBTC1QyOvl3zlPaaUhyXERSCWY8YZzmfGE5K1aqpLJpFh1OEZJqMx4pfrnqcxz0WQ0GsXnzver38fxQ4WM84aqLa+PcZI7Fmos5Pu5AKeSrcZJWqa4C8JYzvjIOEcCpS9fHTOMNeqcYBwgCfL3VA03b978h0qlsvQjEcdxBM2RmiM1R2qO1Bx5fHDkbbfdho6Ojkm9hR/5gWvp0qWV11577cAHTrixwwUbUkNDQ0Pjkwuj0agfuKYBzZEaGhoaxweWLVuG119/fdIHrhlJmkF1iE+N/D3JgQQz8d+qTQKAPO2rr/ljgtdLJZI4GEFOvJeJv5up61FVzoO9bmIbH63EPFn7qffycY2NjxsT+3iyv6tQx4A6fid+xlSfO7Hdj8SY4udO9tmH6uuDzbnpzsXDua9DLZInfueHba8jEUc+DA413jQOH5ojq9+rOfKjQXPk5NAcefDr0xw58zjYuJjqnj7yA5dqZxgZGcH27dtRqYyncpwzZw78fj8GBwfR3t6OxsZGNDc3V22nx2IxvPvuuwCAtrY2RCKRgzbk4XTQoV4z1d+5pQxAtmz5HhWTvZ/bk9PBdAfMdF7/cQ/GmcSxdC8fFodqg8n+PvF3h/Oayf6mLrJUWxRB64y6tT4Z+H71cw72OvU7ppp/kxEm/zvZtUwMkrwWvnaq6z/YtQIHzuUAkGufavE52edMXIBPXERO/NuhrmliG0z0lk/3XjU+HDRHjkNz5JHDsXQvHxaaI6v/PvHfmiM/eE1HgiOnev2M1OHixb/77ru44oorAIyntVy5ciWuuuoq7Nq1C//8z/+MzZs348tf/rKkjhwZGcF3vvMdPP744zCZTFi4cCG+973vYd68eVUHQicbEGpDqYNzssGqejonXrOKaDSK5557DjabDWvWrJEie5Pd62TXoP5+MhWTUN+jA6XGJxkcu8ViUc5hMEUvz/TEYjEUCgUEAgE5b3GwecNMUkzry4r2mUymaj6w5s/B5gcP99KfzrMCTEVbLpclDbUaTJmClwvaQqGAeDwuB6WdTue02sZgGPd6R6NRyUzGZAH8Tp59YVakgy1o8/m8HELnmRg1rtCP7nA4PvAZE2ObwTCe2YoJFyqViiyU+Xf6zyfrL42ZheZIzZEaxyY0R07dNpojD2BGHriIUqmE4eFhnHLKKVixYgUefPBBVCoVnHDCCUilUnKwkbaKBx98EPfddx8uueQStLW14eabb8bChQtxxx13oFwuI51OY3h4GBaLBXV1dVIscGRkRGogeDweDA0NoVQqIRwOw2g0oqenBwaDAY2Njcjlcujv70elUkE4HIbD4UAul8PQ0BCcTqccjLNYLHjzzTdx5513wu12IxgM4pRTTpFDhCoqlQri8TgSiQScTifC4bAMKlZzDwaDsNvtiEajcqiQFdFDoZBkCtKEovFJhUoA27dvxxNPPIHu7m4sWLAAl112GcLhMF566SU8/vjjyGQyWLx4MS677DIEAoGqcc3PyeVyePPNN/Hkk09icHAQS5YswcaNGxGPx/H0009LQcJ8Po+lS5di/fr1ci1q8CyVSti9ezeee+45+d5TTz0VBoMBzz77LF544QWMjo7irLPOwvr162G321EoFDA4OIinn34aGzduRG1tLbLZLJ599lk8+eSTMBqNWL58OTZt2iT1dg5HIcvn83j++efxyCOPoFQqYcmSJfjCF74gdVR6enqwZ88etLW1Ye7cuQf9vFQqhUcffRS/+93vAABnnXUWLrrooqqaMk8++SRcLhfOO+88Ie2J18MEB3v27MHbb7+Nyy+/HEajEYODg3jmmWeqiOTkk0/G/PnzdQz6I0JzpOZIjWMHmiM1R04HM/rAxSwobW1tuPbaaxGPx9He3o5QKCTpb/m6XC6Hp59+GiaTCTfddBPq6urwwAMPYM+ePSiXy+jr68Mvf/lL7Ny5E263G+vXr8fatWuxa9cuPProo+jo6MCiRYtw2WWXYevWrdixYwe+/vWvw2az4fbbb0dbWxuuvPJKPPTQQ3jhhRdQLBZx2mmn4ZJLLsHAwAC+973voba2Ful0GuFwGLW1tfjf//1f7N+/HzabDXfeeSduuOEGrFq1SjLIEHv37sUvf/lLvPfee4hEIrjwwguxbNkyvPnmm3jwwQcRjUaxcuVKnHLKKXjqqaewf/9+GAwGBAIB9PX14ZxzzsEXv/hFKdimofFJRaVSQUdHB/7t3/4N+/btQ1NTE37605/C6/XizDPPxK233gqr1YqFCxfim9/8JrLZLK677rqq+hXAuIq1d+9efP/730dvby8aGhrws5/9DE6nE3PmzEFfXx8ymQx6enrw1FNP4dprr8X69evl/IqqYvX19eH222/Hjh07UFdXh//6r//CddddB5PJhDvvvBORSATpdBrf/e530draivnz5+O5557Dk08+iSeeeAJr1qxBKBTCtm3bcPPNNyMSiSAUCuEHP/gB/H4/zj///EMGWCr0O3fuxLXXXouGhgbMnj0b//Iv/wKPx4OLLroIL7/8Mv77v/8bu3fvxvXXX485c+YAADKZDHbt2oVQKITGxkaYzWY88cQTuPHGG3HOOecgk8ngpptuQktLC1atWoVcLofnn38et956K0477TSsWbMGDocDsVgM7e3tCIfDiEQiMBqN6O3txcsvv4x///d/h9FoxGWXXQaDwYC9e/filltuwYoVKxAIBGC1WlFTU4P58+cf2QGkUQXNkZojNY4taI48eLtojqzGjD5wcVuOhRHnz5+PHTt2SEE2vsZgGK9P0N3dLQUaAeCaa66Bz+dDoVDA3XffjZ/85Cc444wz8MYbb6CzsxMejwcPPPAAXnnlFTQ3N+Pee++V7cOHH34Yp59+OhwOB371q1/hBz/4AZ555hl84xvfgM/nAwBs3boVjY2NqK2txf333w+LxYITTzwRZ5xxhqR5JSGq26yqLzSXy+GOO+7A448/js9+9rN48cUX0dXVheuvvx4//OEP8c4776C1tRU///nPsXPnTrz++uuiNhaLRRQKBezYsQPr169HOByeyebX0JgxqPN1x44d2LVrFzZv3owLL7wQf/3Xf43f/va3aG1txb59+/CP//iP+MIXvoDnnnsO9957L6677roPBGIqgLt378bXv/51nHbaabjpppvwyiuv4KyzzsI//MM/wGKx4Kc//SleeuklnHnmmQf1Tw8ODuKNN97AFVdcgeXLl+Pv//7v8bvf/U5S1958880ol8v40pe+hBdffBGtra3o6urCtm3bJCVzsVjEI488gkwmgx/+8Icol8u46qqr8Mwzz2Dt2rWSAvhghMIdiAceeAADAwP4zW9+g+bmZrz99tt46KGHcO6556K3txfvvfeeFDbl5w0NDeFnP/sZTj31VFx88cUwGAz4xS9+gbq6Ovz4xz/G4OAgVq1ahcceewwrV65EV1cX7rnnHkSj0arCw+3t7fj5z3+OtWvX4txzz4XVakU6nUZHRwd27tyJJUuWyPWz5s2mTZsQDAYRiUQwb948ANpO+MeE5kjNkRrHBjRHao6cLg5eEvmjfOj/+1ZZ7I5koR7yY3594EBF++effx4vvfQSxsbGcN9992HJkiW46667cP3112P+/PnYuXMntm3bhrVr1+I73/kO5syZg6eeegrz5s3DrFmz8Nhjj+HXv/41amtrsWHDBvz2t79FLBZDQ0ODWBrefPNNGAwGuFwunH322bjjjjtw5ZVX4rLLLsNXvvIVzJ49GwsWLMC3vvUtLF26tEphIwk+9dRTWL16Ne666y781V/9FSKRCF599VW8+OKLOOuss/BP//RPmDt3Lnbs2IFMJoN169Zh0aJFOOGEE9Dc3Iyuri4p3Kah8UlGpVJBLBaD0WhEJBKB3+/HiSeeiPb2dimO+vvf/x733Xcfstks1q1bJwUb1Z9sNouhoSHYbDa0trairq4Ora2t6OjoQDQalZofTz31FBobG/Enf/InyOVyUliRP/l8HvX19bjhhhuwYcMG8X47nU7E43E0NjYiEAhgzpw5aGhowGuvvQaLxYLNmzfj9NNPl3oaVBP5Wp/Ph4ULF6KzsxPDw8Ny74fC7t27UVNTg9bWVthsNqxevRodHR0YHR3Fpk2bcO65537gjEogEMD69euxePFiqT2yb98+nHjiiWK/Wrx4MV5++WWk02k8/PDDeOedd9DS0iL2rkqlgkgkgvXr12PBggWicLa1teHqq69GU1OTFHCsVCrYu3cvkskktm7div/8z//Er3/9aylIqfHHh+ZIzZEaxwY0R04NzZEHMKM7XMA4YYyNjeHNN9/Eyy+/jGAwiPr6ehiNRqRSKfT29koxwXnz5uGdd97Be++9B6/Xi3vuuQfnnXeeFITj4LFarRgeHkZjY6M0GovrZbNZ1NXVYenSpXj44YeRSqXwuc99DsFgEMViEQBw5plnwmKx4Be/+AXi8TgqlfEK5aeeeiqWLVsmW5+sXM+B3d3djXA4jD179sDn88nTLgAZ5LFYDF1dXTIoWEiPT+pGoxHBYFAKz7Ea+sGyz2hofJIwUUEzm82or69HLBYTC9C2bduQSqWQTCaxatUqPPjgg9i+fTvsdrsEPofDgf7+fpjNZpTLZTidTgSDQYyMjCCTycBkMqGrqwvbt2/Hhg0bYLVa8a//+q9SuJRwuVy4/PLL8Wd/9md455138B//8R8IhUJYs2YNfvWrX0mBTZ/Ph2AwiO7ubpTLZSlmObGgLDBOGjabDTU1Ndi7dy9SqRTq6+sP2S4A5Nqo5jU1NcnZFZ6XUdV/APD7/bjgggvks0iIhMPhQH19PV5//XW8+uqrePTRR/H5z38e+/fvR6lUEgUvEomgoaFB3sdzNoxFKux2Oz772c9i7dq16OzsxJYtW7Bs2TJcfPHFVWnHNY48NEdqjtQ4dqA58uDtAmiOVDHjSTPy+TxeffVV9PT0IJFIYPPmzTAajchkMnjsscewY8cOAOPFwS688EK88MILuPXWW+FwOOB2u/Hnf/7nsFqt+OIXv4gtW7bga1/7GhKJBHw+H0455RS0t7fjySefxLZt29Dd3Y2LL74Ys2bNwjnnnIMHHngA2WwW5513HoxGI84880w8+OCDePHFF+F0OpHJZLB06Xi9znw+/4GAXl9fj9bWVrzyyiu49dZbsXz5clxwwQX45je/iTlz5uDuu++Gy+XC+vXr8fDDD+Oaa67B+++/j9bWVpx99tnYv38/nn32WezZswfxeByrV6/Ga6+9hnw+j7GxMVExWGlcQ+NogBqYisUient7EYlE8NZbb6G9vR3f+ta3MHv2bLS3t+Oee+7BX/zFX6C+vl4OswKAxWJBLBYDcEAFHx4eRjAYlMPxL7/8MlKpFM466yxYLBaEQiG4XK4PBFqTyYRdu3bhu9/9LoaHh/G3f/u3WLBggRAJAMTjcQwPD6OlpUUyH6n3wcQEJJWxsTEMDQ2hpqYGXq9XrvNQ4GeQqPbv349QKIRQKHTQ9xSLRYyNjcFsNgsxqNncaCWbO3cu7r//frS3t+OMM85AZ2cnCoUCnn/+eXzmM5+B3W5HPp8XAjkYSqUS5s+fjxtuuAEnn3wyXn31VWzZsgVDQ0OHvD+NmYXmSM2RGsceNEceHJojD2BGH7gCgQDWrVsHu92OhoYGnHzyyVi3bh22bduGNWvWiEWgXC6jVCphxYoVuP322/HII48gkUjg29/+Ns4++2yYTCb8zd/8DXw+H9566y0sWrQIGzZswLJlyxAMBlFbW4vOzk5ccMEF2LBhA2pqarBy5UpccsklyOfzOO2002AwGLB27Vrcdddd2Lp1KwqFAr7xjW9g7dq1SCQSuOKKK7Bo0aKqp/hQKIQvf/nLqKmpwcDAABYuXAiPx4NFixahubkZlUoFVqsVN910E5qbm7Fz506ce+65uPTSS/HpT38aNTU1eOyxx9DZ2YlLL70Uy5cvR2trK+bOnYv6+noUi0Wk02lEo1HJ8qKh8UmGwWBAQ0MDzGYz+vv7kU6n8e6776KpqQmZTAbxeByzZ8/G8uXLYbPZsH37dqxZswann3561eeUy2U888wzePbZZ7Fv3z7MmjULnZ2daG5uRjAYRDabxdatWxEOh3HiiSfCbrfj8ssvnzRddDKZxN13341UKoW/+7u/w4oVK1Aul9Hc3IxXX30Vw8PDGB4eRl9fHzZu3AiLxSJKOtV/k8mET3/603jjjTewb98+lEolvPvuu1i+fDlqamoOu32WLl2KrVu3oqOjA7NmzcJLL72E1tZWhMNhORStpr82GAzo6enBj370IyxfvhwbN26E3W7H4sWLsX37duRyOUSjUezYsQNf+cpX4PF4sHTpUnR2dqKvrw8GgwG7du3CihUrsHfvXtx3331Ys2aNEDDbSP3ecrmMu+66C4FAAD/60Y+QSCRQKBTk9Rp/PGiO1BypcWxBc+TU0Bx5ADPywMVgPH/+fHz/+9+HyWQSNc7hcGDVqlVYsGABSqWS+CWdTidCoRA2btyI5cuXo1AoIBwOS5BtbGzEV7/6VUkrGwwGYTKZsGTJErS2tiKbzcLn88HtdsNgMCAYDOKmm24CANTU1MBgMMDj8eCSSy7BGWecgVKphGAwCJfLBafTia997WsfqCdgNBqxYsUKtLW1IZ/Po6amBlarFTfeeKMcEDYYDJgzZw6uvvpqJBIJ2O12hEIhmEwmnHLKKZg7dy5GR0fh8/ngcDjQ0tICq9WKYrEoikGxWJQBqz3qGp9EqClmP/WpT2HRokXYsmULnnjiCbz11lu45ZZbMG/ePPzmN7/BjTfeiEWLFuGtt97C5ZdfDrvd/gELQKVSwZIlS7Bo0SL8+Mc/xkMPPYSenh6cf/75qKmpwa5du/CHP/wBp59+OmprayVGqGTCufPss8/ikUcewYIFC/D000/jf/7nf7B69WqcccYZ+P3vf4/bbrsNyWQSDodDFpaVSgXZbBaZTAbAuM1h06ZNuP/++/GXf/mXiEQiGBsbw9q1a+W8zFSWJs7bSy+9FPfeey+uvPJKzJ8/H52dnbj66qthsVhQLBblmtWij/Toh0IhObNz5ZVX4oorrsCXvvQlZLNZ2O12XHDBBZg9ezYuuugi9PX1IR6Pw2Aw4OKLL4bf70cikUBraysCgcAHCklmMhlRLc1mMxYvXow77rgDlUoFb7/9NiKRCJYtW1Z1LxpHDpojNUdqHFvQHKk5crow3XbbbR/pA7Zs2XLbVVddBYPBALPZDJ/PB6/XC5fLJU/NVqsVPp8Pfr9ffjwejzxher1eBAIBeT1/bDabfBY71mg0wm63w+PxfKDSvVpsjjCbzXC5XPD5fJJ212w2w263S1EzVcGjT93r9UrxN6fTKdmZCJvNBo/HA5fLJd5OZm7itZlMJthsNvHj22w2OBwOOJ1One5W46gAD8+3trbCYBivo7NhwwZs3LgRbW1tOOGEE5DL5TA2Nobzzz8fV199tRRDnfjjdrsxZ84c8XJv2rQJ5513HgKBAGKxGNLpNC699FLMnj27al6q87NcLqO9vR0mkwn19fXI5/PI5/NoaGjA6tWr0dLSIvWHNm/ejJUrV8JsNsNkMiGRSMDv92PdunUSExYuXIhEIgGv14vPf/7zOPfcc6vi0KHg9/uxZMkSDA0NwWQy4U//9E/xuc99TpSxdDoNl8uFFStWoLa2FgaDAU6nEwsXLkTr/x8irlQqaGpqQltbG3p7e+H1evHVr34Vn/nMZ+B0OuH3+2G329Hd3Y2Ghgacc845cDgc8Hg8OPHEE9HU1CR2CV7zwMAATjrpJKxevRoAsGjRIoRCIQwPD2PevHm45ppr8KlPfUrSeU8nFn3729/uu+2227ZMayAdx9AcqTlS49iF5sipcbxx5E9+8hNcddVV3550rBxOlpGpsHTp0sprr70GYPKMJeoT5eHiYO+Z6vcTv/9wP2NiIx7qWqd7bYf6LA2NTzLUYoHZbBb5fB52u10q3JdKJWQyGTmo6nQ6qxZ4Ez+Hh/iLxWLV5xSLRWQyGTidzqpt/MnmdjablYryBD+Ln18ul+F2u2E2m2URODo6irGxMfj9fsnCVC6XkUqlRC1UF6iH2zblchnJZBLlchkOh0PuCYAkNXA4HKKm8T0Tg3ihUEAqlRICJ6mx/VOpFADA6/VWWSEmEm6lUkEymZQdDO4a5PN5ZLNZWWBPXEgfLoxG4x8qlcrSab3pOIbmyKl/f6jP0tD4JENz5KHb5njiyGXLluH111+f9A0z+sCloaFx7EENmmpmMf5NPYirprWe7HPUn4mvnSwwHuozCNUuwEPIE4Mlr39ikciJ3zud4MrrOFgb8G8Tf6eC1z1Z26jv4fvUtp/4OQStaRPbV/2MD/Ow9f/v1Q9c04DmSA2NYxuaI6dum+OJI6d64JrxtPAaGhrHFhjsJvq1GYgON13qZKreVJ8/1edMFgQPdT3q+/idBoOh6vUf5gHkUJ9xsHab7Hfq3yZrp0N9zmTfeaj+09DQ0ND48NAceejr0RypH7g0NDQOAwcLPNMNSB/1cw71uqn+Pp1gfLj4KNczndd+1HbWD1caGhoaRw6aI2f+eqbz2qOBI3VlQQ0NDQ0NDQ0NDQ0NjSOEj3yGy2AwDAHYPzOXo6GhoaHxCUdLpVKp/bgv4miB5kgNDQ2N4wYH5ceP/MCloaGhoaGhoaGhoaGhMTm0pVBDQ0NDQ0NDQ0NDQ+MIQT9waWhoaGhoaGhoaGhoHCHoBy4NDQ0NDQ0NDQ0NDY0jBP3ApaGhoaGhoaGhoaGhcYSgH7g0NDQ0NDQ0NDQ0NDSOEPQDl4aGhoaGhoaGhoaGxhHCR37gMhgMK2fiQj4uGAyGFQaDIfxxX8eHgcFgWHkUX/tnDAZD4OO+jg8Lg8Gw6ihu+88e5W1/+tHa9sDR3/4a08PRzJGaHz8+HM0ceTTzI3D0x+ijmSOP9rafCroOl4aGhoaGhoaGhoaGxhGCthRqaGhoaGhoaGhoaGgcIegHLg0NDQ0NDQ0NDQ0NjSME/cCloaGhoaGhoaGhoaFxhKAfuDQ0NDQ0NDQ0NDQ0NI4Q9AOXhoaGhoaGhoaGhobGEcL/AXSwPC8Odq4zAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOy9ebRt61nW+X5r733uTSAhIQQxCpQIisoQG9oaKtIZE0mB1qgAgSDGLthCQEjRd1I0UqBVUgpaAhFBEUrpOwUdZQlosAFFHQgEpAmQGJKQ3HN2M+uPtZ69f+vZzzf3PvfsS+49533H2GOvNefXvP3zzXd+c66xLEs1NTU1NTU1NTU1NTU13Txt3tQMNDU1NTU1NTU1NTU13a/UF1xNTU1NTU1NTU1NTU2PEfUFV1NTU1NTU1NTU1NT02NEfcHV1NTU1NTU1NTU1NT0GFFfcDU1NTU1NTU1NTU1NT1G1BdcTU1NTU1NTU1NTU1NjxH1BVdTU1NTU1NTU1NTU9NjRH3B1fS4pjHGT44x3jjGeD3+nvUox/roMcb/e8P8ffQY43TH12vHGP9ujPFBV/R56hjjS8cYP7Xr9193399qd/4nxxi/MMZ4M/T5E2OM78P3ZYzxw2OMDY597hjjK29SvqampqamxweNMV4wxvjXO9z4uTHGt48xfs8NjPuVY4zPvSEerxxrh1+/spPjZ8YY//sY4+CKPlPZxxifuRvz+Wh/uDv2P4CvZYzxHmjzjmOM/jHapl8V6guupicCPW9ZljfH38++KZgYYxxOTv3LZVnevKqeVlVfVlVfN8Z42mSMW1X1T6rqt1XVH6yqp1bVe1fVq6rqPdD0oKr+4hUsPauqPuzaAjQ1NTU1PSFpjPGSqvrSqvq8qvo1VfV2tcWbD35T8nUP9K473HyfqvrQqnrRrOE1ZX91VX3WFRdur66qG7mwbGq6W+oLrqYnHI0xnj7G+JYxxi+OMf777vOvx/mPHmP8+BjjdWOMnxhjfMQY47dU1d+oqvfeVches2v70Bjjr+zuNr1yjPE3xhhP2p37/WOM/zbG+KQxxs9X1d9Z42tZlrOqellVvVlVvdOk2UfVFiz+8LIs/3FZlrNlWX5hWZbPWZbl29Dui6rqE2YXbjv6wtoCzOxCsKmpqanpCU5jjLeoqs+uqj+7LMs3LsvyK8uyHC/L8s3LsvylXZuHdjslfnb396VjjId254RlH7/bPfFzY4w/tjv3p6rqI6rqE3fY+M27488aY3zDDmd/YozxF3bH33I31vN23998jPFjY4yPmo21Rsuy/FhV/Yuq+h2PVvYdfUdV3amqj1yZ7quq6rePMd7nKr6amm6a+oKr6YlIm9pe/Lx9bS9e3lhV/2dV1W4b3l+rqucsy/KUqvofq+rfLsvyo1X14trdjVqWRRcyn19Vv6m2yf4dq+rXVdWnY663qaq33M31p9aY2lXW/lhVHVfVKybNPqCqvmNZltdfIeO/rqrvq6pPWGnzjVX12qr66CvGampqamp64tJ7V9XDVfX/rLT5lKp6r9pi2bvWdsfEp+L821TVW9QW4/54Vf31McbTl2X58qr6mqr6wh02Pm+3Vf2bq+rf7dq/f1V97Bjj2cuyvLq2d6O+Yozx1lX1JbXF2K9OY10l2Bjjnavq91bVj92D7FVVS1V9WlV9xhjjaNLmDbW9S/aXr+KrqemmqS+4mp4I9I/GGK/Z/f2jZVletSzLNyzL8oZlWV5X2+TJitVZVb3LGONJy7L83LIs/yENOsYYtb2I+rhlWV69G+vzan+b3llVfcayLLeXZXnjhL/32t0xe6Sq/kpVfeSyLL8wafuMqvq5a8r96VX158cYz5ycF8B82m6rYlNTU1PT/UfPqKpfWpblZKXNR1TVZ+92TPxiVX1WVb0Q54935493uyleX1W/eTLWu1fVM5dl+exlWe4sy/LjVfUVtcPGZVm+q6q+vrbb459bVX/6Ucj0Q2OMX6mqH61tcfHLJu2uI3vt+PqmqvrFqvoTK83+ZlW93RjjOXfHblPTvVFfcDU9EehDlmV52u7vQ8YYTx5j/M0xxivGGK+tqn9eVU8bYxwsy/Irtd0P/uKq+rkxxrfuKmiJnllVT66ql+uCrrbbEniB84vLsjxyBX/fv7tj9vSq+qbaVutqjPF2Ay/72LV9VVX92usIvSzLj1TVt1TVS1fafFtV/bd6dIDX1NTU1PT4p1dV1VtdsX38WbW/s+IVu2PnY9hFyxuq6s0nY719VT0Lhc7XVNUn1/b5KdGXV9W7VNVXLsvyqmvKQfpdu/k/tKres7Zb8Wv3Mgzh5kfU9WQnfWpt7/Y9nE4uy3K7qj5n99fU9KtGfcHV9ESkj69tZe49l2V5alX9vt3xUVW1LMt3LsvygbW9sPlPta3MVW3vCJF+qbbbEX8bLujeYvcgb036TGm3TfBjquqFY4zfuSzLT/FlH7tm31NVzx54A+EV9BlV9Sdru61jRp9SWzB88nV5bWpqamp6wtC/rKrbVfUhK21+trYXSqK32x27DjnO/XRV/QRw8WnLsjxlWZbnVp1vn//yqvrqqvozY4x3XBlrPumW/kFt5fv03bHnADe/pq4nO8f87tpuT/wzK83+Tm1fcvVHrstrU9O9Ul9wNT0R6Sm1vVB6zRjjLWt7UVJVVWOMXzPG+ODdBc3t2m6bONudfmVV/Xptv9u95OIrqupLdnvRa4zx68YYz360jO32t/+t2n8OjPSy2oLZN4wx3nmMsRljPGOM8cljjOeG8X6sqv5+Vf2FlTm/r6p+pKr+6KPlu6mpqanp8UnLsvxybTHlr48xtMvjaIzxnDHGF+6afW1VfeoY45lj+xMjn15Vf/eaU7yyqt4B33+wql63e2HUk8YYB2OMdxljvPvu/CfX9sLqRbV9wdNXj4u3A/pY16HPr6o/OcZ4Gz9xTdmdPqWqPnE22e5O32dU1SfdJZ9NTY+a+oKr6YlIX1pVT6rtHarvr+02QNGmql5S28req2v7bNfH7M7906r6D1X182OMX9od+6TaVsO+f7c98Xtqvq/9bvh77hjjt/uJ3XaGD6jtnbfvru1LL36wqt6qqn5gMt5n1267xQp9am1f7tHU1NTUdJ/RsixfXFts+9TaPqf001X156rqH+2afG5tX7b076vqh6vqh+r6r0D/21X1W/Gc9GlVfVBtX8DxE7XF2r9VVW8xxvjdOz4+atfuC2p78fXSNNY1Zfvh2j4a8Jcm56+S3dv/i9ri6hp9bV3/eeqmpnumsSz9m29NTU1NTU1NTU1NTU2PBfUdrqampqampqampqampseI+oKrqampqampqampqanpMaK+4GpqampqampqampqanqMqC+4mpqampqampqampqaHiO67g/JTWmMsYwxalmWGmOcH+fLONJx9QnjXeo/mffKdmmOGS+J5+u8UITjXYefNM9V/dJc0neSbzb+jFe3X2p7XXKdXtX20cwz63cvfN8r/WrO/aaU81eDZn7J81f5Muk68T2LQz8+8zsf8zr5w9un+a86dzdtruLBx0rj7dr/0rIsz6yma1FjZGPkbMzrtG2MfHzP9aagxsjL7Wbn7qbNVTz4WLPxlmWJznetC64xxnsty/L9M2YODg5qs9lMFbNmtPT99PS0NptNbTaXb8Alo6q9J8SDg+3PQpydnV2aS23Vzx307Ows8uYOrmNqx7n4WbJQJ1ctbNjf52E7n9dBIckxA3vXtY/h4/gxyiWZxZuOadzkM5RHxw4ODur09DTyeHBwsNfW9cPxvR39S+eSXdhuNrb6uKzUSepLmTXO2dnZpT4+FsehX/rxWQz6fMk/fV71lZwen2mONZ+TzjWmbOz+Rv8/PT3dswnnmfl0so3zk3xVx5Pd6L9nZ2d1cHAQF2Uzmx8cHER7ed7RPDp+t35A/XmeoI3pL/7f25+cnLyims5pDR935xsjGyMbIxsjGyMfAIw8OTmpGV3rgmsNTDxY9V3KdWFJNJQUJidl0tF5jeMCCtB0nuBCfqpqj0c6bQIakbehcegEnngPDw9jMOv70dHReXCImDjFM0myucHlnK5Pl4HfHQASj55M/byPOwuSBDYOzg6gPgb7nZ6eXrLRLHgJbrOEzeSk/wcHB3VycjL1h0QaJy1SNL/sqzaeINxXvZ/6SgeuF/lD4tn5WpalDg8P6+TkJC5EaCe3RUo2bivGNWOU5DHutne7Sh+zxM+8MgMZBzGPK/d5go3zRHsxaSuPESTUhz6gvoeHh5cWWMwBHiNuIwd+l/mq2PN+vvDzXNW0pTV83J2vqsbIxsgL/hsjGyMbI+9PjFyje/4drjHGQqV5MvNgkmNdJzh53K866Zh0VB1L4zABUcFuhFTNYHt9T0nS2/EK3/nxsSlfSo5yJOmaOl7jzatPHgzuWDzORCM906Ye5HS+lDh1znXs31Py0ndV7BxQZu3T4oNg63OlSs5a4vQ51ebk5GTPN70SRJuyvydJJu+kTyZQTxgaj/FJn0+Lq+R3DvTyAyY217H7h857bkgypKoZ9ZAWbySvnFPHs8o89ZlAZ7bQ4THZnX7kvni3/kXyfMJ86McJAFzkreW3WYWb+j45OXn5sizvdknpTZEaIxsjRY2RjZGNkfc3Ru4u6GPCvolnuOrw8HDvyk6AQSZ1nAyKZs6j/nSaFAh09BRUHpxOXnVMFQ7KkvjkWO5ss/lTMPic3oZ6YsVlTV+edNQuJUDaQoHB8VTlIG/6zCR0enp6XkGi/M6nKFUkvY/O8ba6B5wDiSdWb+f2W5btLWH3Ubanb3sQOk+Hh4d7yY+AlRYZvjAg70k3zoePkT7z+0yH7gezGHA/c3CgzhJQXSUD5xQpGbLynfiirMmnZgsY58vPEZw5p2RT1d7lnfHhvpji132AuvLFoi/sxbP++4KQ/bjIcl1S5qbrU2NkY6Q+N0Y2RjZG3t8YuZZL7/mCSwySMSasFKzpynHN0USeKPy2qI/J/+LLk56PTVDyJOgJeBYUyUGSw8h4Oja7qp6N4wHH4He+uZ1iZhe2d73ovAdRCgJ99qA7PDysO3fuTG2cQJGVnnQ7OFXEyDeT/bJcVDlU+WNlp6oiAKag0+ekF1YneYyUgtKBxo/PiLzP/FtjcjuO+FQ1lLJQhzNenTzu3WdnsTMDiyR3AkC3caqa8fsMrDxR65h0xPzBvgTPpBvO7zmLPM0AxeVdywUOCL4wvsq31Yf+6/m96e6pMXJ/nMbIxsjGyMZI19X9jpH3fMHlwT/77Ey743Ksqv3bewoAJheRBzCPMTlr/Jlz0ihUYHKSVNVIxhcvMwB0HjwAScmYDhAiGn8WUASw2UJgDTS8LY+zv8afVW7p7NR5CiLNIX9wGTUP+6StCDPfTFUPzuuUkiD3KntCJJjRz8jLTDfuQ26jGa+uB/LlMlB+jyf3ac7jPKbz7g8cz2/1s7qUgCD5Eo950mdy9IWIL5zIb4pVLcz8WNVFxZp2cPCbgZhv/UmLCfVPlVDq1/VKXXkMq6+P5X29TdP1qTGyMbIx8kLvjZGNkbTD/YaRa3Qjv8O12Wziw35ikMmCCvYqCIOEt+zoqLxFSgX4YmAWAE4JSLzvVf04/kxe14lk5W1MT878zMQh3ek/r9g9qFMiXUvU6kfn9+pNSnqeQBw0XD8iOr9/9uqpjskHZkmLzp/AlG/LOT093UuamiPJTd2wnfNA3/UEJdmS7nwc9xOfl3FSdfFga1qscEz+pQe4PVkn8HLbcHwec735MfHNOFhL+C5XinvndS1HpBilHhjLbu8ZiF6VP9xHkk5miwSPWc+hvqic6YZEOT0/+F/To6PGyMbIxsjGyMbIBxsjb2RLoYKSQqXEnAI7CezJwa9WfRwSz+k2sPPK/35lyluE5McV7POlsa+jt7Vkrs+uk9R/tmfb21ftP9hIG/htUuqe5Lag7tYA/ir7ab5U0fWKKYPceWA/8uCgymOu85ktU1B5RYrgTnu4r8+qlZRTFcGkO48P8rtmW8rCqtZsMZIAJlHyB6cEruk8dZSq5WnetGDxXENbzbZkcO7EX7JhWsCkbS/O02y8RMm2qZKX5qO8PDbLZzM9Nz06aozMY19Hb42R++M2RjZGum4bIyv2e1Ng5Fpuu5E7XO6IbqCkHA9yOoO3W7s9nByJ47vCPFBSNYhOxLFnFSmdS0k38eWJxYGXY7tDJ/ldnz5u0pPrgklYDqpAnm1p8eTMJDKrurDqSFBwmagPT7hukxnYJ1npF0wCGstlnMmdbOXzEJQ9SVP2teSR7O+/Z6Hvkif5aZI/bQNi++SfVXUpgc3iN/nfjDQXX9Wb7OjtZ3kjyU57E/g5ZrrDkPyQ40snzuta4ha4syq4BrBr53WOvqz99CR/ZfJMV8nvyEfT3VNjZGNkY+TleRojGyMp+/2AkWt0I28pTElT5N+TsqVYHzP19eNuJK9kuBN4Vc4/kzcf28dLlUHK4smDx3i7NDm1jqWAcqfyK/5UqUkO6WNzTh+TbVOFIYHMLBkxkGkXgkzSlc/nAe9zup58bE/wXg3hmKqicQyv2vnnzWaz+tsdyTZJf6yceUKa+SbHTvqexeUMdN1HyTcrvg5MvshM8ife+T0tOtJ38uXbCFw218mMH2/vuuI5+kOaz8eaAfHM/91+SY/0mTV9u85m45Ck16t+a6Rpnxoj9/s0RjZGNkY2Rqb5fKwnIkb6WKQb2VLoE64ZnUy5k3iS4pthZkKkYErGXgOSWXD7PH5r3pON88G+7OeJ09umY/w+CwS2mwFRoplMrmPqd+agtKsnP+eT43hVZZbsZolyTU4mNpH7Gv2DfTSnqmIOYm6vlIxnssy2onDO5Kdpe8R1aI1njcOH72eg5jamnsirg+0a4PkYVft2c19aA2SPe573udPCjzTT71U5w3U0Axg/PsY4/6V6r6bO5kh8JVC+Coxm8nucNN0bNUY2RjZGNkY2Rt6fGLlGN/KWwhSYKSm406hamhxEzkhjzEAqGYXEStbsan0GSuLDA5tjpsQxA4AENvrsb3BJyYbHGAh+PFXunA/XB3WYqpCiWWVEfTzR6BzfoqX+Tu4zaS7X2yzg6TdeWXE9pTfgeAJy/5vpWOe5JcbndUDyeV1OJZmUvGexQL4YR/SVqxJ5OjdLRg7Is7j2PtQJx2dVUDylirnOcf60OPJcwrs1zFneZ8a762Lm17QT+1CelOSTz6Yqv5PbcWZvl2Ems3iijZquT42RjZHepzGyMZJ9GiMfDIy852e4kjPKEXReiqaRCBQ0ApWXBGSy4vwzA/K7BwjH9j/y6Q6cHNSrT6k92yT5kizps7df0wOP0aH4p2PUjweyjzcDGpfPdZr0p/m8OqRjHoyegGeLiWQ/Bz9+529vcGzKwOBPwTVb4NAHZr7r391fyFO6hb7mV4eHh3vbQcivfrjzKt7ID8+rny9iElAmX1+LVcb7mr7TImTN/uTDAS35K+MjLRLcj7lgSHHqwJMq+eSTb6miH3pb8pYWBFeBj8ea870GJk2ZGiMbI13WxsjGyGSnxsgnPkau0Y1sKaSDcUvBLLmyyuOBkW6LpkSZAtUdlQr3YykZk2+1TYlqWeZvd/JE7bJ4xSAd97FTwp9Vhhj0yeF8TPKuP83vtkv6umpsAgOrea5/By5PVmpDAOc4rA7yNa70M09ibvNZxcoDne1pA8k5AxLXyVUBq3lOT0/3Fl3kieDgCVI+wOOudwcD8ubyui+4XzsvlCnpl/Z0X3B5aPfEm/PpwMJtIM5/+u52EPlvyDjA0SecH+o/5TfFnVcpPTZSwk++KD5ncqktP88WiSnXNF2fGiNrb8zGyMbIxsjGyPsRI9fonu9wMeD2Bp5U5DzIyLw7NZWvY1JsSj4cwwOLfM0UM8bFD7apTXqTCc+LX43tQKIxE8DNqmOpsunnUoLises82J4cWsdpp+S8s/GoD9qE/T0JMRF7EuJ4HvzJ8ZMc0qO/qYh9+Fe1v9XCg5Z6Sf7l4yaeeWzmY9LH4eHhnjypf9KngJX6pR69Ouj/1/TDMeivTglcGAvuK1fp3Ntye9HMJprfkzz1xwXKjI9lWfZ+x4VVT84neyZ+HPiZG2Y6TzHhudB1K/36+O4HblPeiUgLgqa7p8bIxkj2a4xsjPS+bN8Yef9i5I29NIOMzZRHpyZApCTD8VgVSNUKv3LlmInSL11X1XmVhJUmAlPVftUkBaobQuO6I7OP5KIO1gDMkxJ1kBIuHZ7zc17qXXpg0NHR9MBisq2O+YOlOq594LQng9gBieCeEhZ5ODo6Oud/RtSRBwz9LSUlzq9nCcgXfYKLHy5sXF9e7SPRBp4MyTPHdt+d6SKBt4/v5xyENJ+O6buP5f6WANd9aC1+vZ239e8CLdnLt1DMZPYY8LZeqfOtKGzvviWifZxHX4gQRFxO+hj9zIGDPPudAPdPxbDmb7o3aoxsjGyMbIxsjHxwMfJGLrgUSDSulEImuB2ACpBAvM3tAag2XuHjrUVPRu6UDgKJNKfGpDE8CDwxyUkVbHQcOgeTUHJW50fjezsm3ATe4scT8dnZ2XlFKAUW5fUKGfXqwJ5Ayucln/whULahvB7kfp5jCuSYoFOAUue0G8cXOZioYsPj3N7CuVPVMiU/b+OLMV+MpD5pHtfRLNFwe4zP4VV4P08d+ULGx2FspHFmD+dqb/bR0dGeLghkqZKbQCXFkdslnZ8dc3/SPDMbp+O0dbKvxvQqps7zRQKuY47r/uB+4QsWl2u26Gm6mhojGyMbIxsjGyMv5nkQMfJGLrhSIPqvuut2NZnkd45Fpj3JEEx4dU0FMzlRIZvNpg4PD8+Tjs7P5lgL+rTtYk3ZvKXtgelOdNWihvoTpb3t1LHzyIdfU8VRbRmoCurDw8Nz+b3KKd5PT0/PAUtER6fcmufk5CT+YjyBzBMlE3d6oDfx51WxmS3Vz23idvYx+N2TVErwPq+Sg9ql2/gcjz5Jopzc1pBARRXb2dYNTzZJV25fPYScEmSShdsWfC6Nxflc9rS4oV5m/RNQJrt7HPG5gZk9nVJF1he4rhfqi/3cn1Mf10UCRm/r8rr+mu6eGiPrXNbGyMbIxsjGyBk90TEy2f98jLWT16ExxuJJw5ny5GH9rz0XA0J9kxKoXF71zhJHkGkqhycNH2vNwEoO/E7HuE4Q++1OfXZndsdyh3IgODg42ANZBxImjDSP8+5VzJl+09hcMHBsd3TfrsAAm/mZ/IdbXWakZHEdnSZi+6QPT6gzP+A49OlkA/WdPfya5qZuRK5b91WRQJwJ1RM75ZglQV8kul7Ic6o0ug/xmNtK81HeZAsS8wjHTAvR1N/7LMu2Enx4eHiug3T3Yub7LtcaQIg/f9EA/UZ69Ype0vXp6enLl2V5tyhg0yVqjGyMFC+kxsjGyMbIy/REx8idX0Wnv9EfPk7JJxkoKcirCzxPZSUH1f8ZsHjQeN/ZcU/cVZf3V7uDkhwwOI6T2h4cHNTx8fG0bQJpv6p250qV6Vlg+L5rBw6vis7O0+6zNzqpjwJJ7VIS94TmoHB6enp+O11zyvndvr644H57zsEHcZOvEBgoN/XjNqHfuF78/CyJ+meRqqscw8GLdlcl2xdYmpvx7Isz8ueVXPojk6fz5fpyuR1kvBJNSsn0KrCX7T22Z+Ol/gRIblmgnB5/kuHw8DDmiGSLlAvJh4+dgI62FK98la78I9n2Kl02XY8aIxsjGyMbIxsjH0yMvNE7XExubvh0NU+nleN6MHilIiXONUVXXXYqT06axxOD5mPbVB2gsf1hYwYDjUieXTdK6ikQKbOOcWuKVyXcGZJ+fH7XKROX656y+PYX9WXC98TJ8RiUlO/sbLtFwx9uTYuTBFjkN53nPDO+krz0Xwdvr/bMwJ0JR/x7JUz+Ix2zrdtaCZK2pl87GJFPf9DbeffEpjFnixXqk1tZrloAelyt5Y2UrF3Ha4uyZFN+F7++iOQ8OjartHk/10fin7wTxKkTt0PiyeWdAQ/jkrmYzwRozJOTk77DdRfUGNkY2RjZGOnUGHkxz/2EkbtnGB+bO1xUlP7rqpTBr3aeCCUMGZbSuEeX4yej8pxu/dOhNU8yDgPXae2YJ2mONXPK5FzuhN7Gg4afN5vN+S1X17UHPvVN/ma69KTp5/27V1/JNxcMM5rZlQnUQSVVIWcJgmPqnFcK5Qfyx2XZvtlJ83sgexCL/NY3kyr90H2e7VnR1nnt4aefsx+3vLiONI6qw+4D7odexeVCjqCkOdSOsayFQFp4JdvomIMndc4FB+dOunNQXQMEj2vqgtXZBIAcR5TiLyXsNGbykdmi0r8TbBN5VdbnF098i5pXp5uuT42RjZGNkXUuv6gxsjHyQcPIPNujIA8qBjyZoWO4IvzNSBSYf6T0nYmA88tJGQipkpLGZZvZ+RlgOoiwujNzvlT58aBQoDHR+YOF+lMyT4mWf9S7aLO5+H0OjeHHyOusGrumY81JeTyB++KAOktBkexFHY8x6ujoaE9W+U/St+uEfSgbk4DaSp6jo6M9Ha7pgmN41VoxlsCT5LpnNdVBdCYfk7X8zscgKFP+ZVnq+Pj4ki+6j7pdku7Zl4sB8u/jeBL0hOs2lJ14nFVP2kLxxnY6rz6e0+inihPynQAi2YZ/3iYtdsTHGGNvAcJ+1K0v4KmjpkdHjZGNkRqvMbIxsjHywcPIe77DReMdHh7W8fHxJaV40vQqjzPtzNPZUqVL/z2x+RWqfxZvDHbyorbp+FU6mRFB1vfDig8Gr+RIMvCVsTqXEgz1l5KdH09ycs5kNw9Wjs2Km/PKoHMQU2Inv6kqw3nX7OD+pP6p+iO9MzHQNu5HqbLqOmPlzPUn8mQ5S36uL1UcOaZXqLmVZ1aNoRyuf5K/ZlVtPXmmyjP/vCKYkjl1RXClHyVQdN1TPo7vQESAW4v/FGNc9JAPtUsV7DS/L6TYhv5PXn0MLoy0hUY2cXv6mKRZzDVdjxojs05m1BjZGElqjGyMfCJh5BrdyJbCqv0kRkf1pJ+SGdu6wA5IHtQpwWk8Koi88TudRf1mju68JkBMSc2TQ0oWnOP4+HgP5NxJKQP1k2wzA4eUQN3JeauadvE5POG57bgvnf306tyUaDQO+R14akUAACAASURBVGJ1iPrywFuWZS/ZJqBKvjmbj4nK/czBmjrScT5YzHl1y9/no2z007RQSr6V+JuBsPQs0OCYAvkUp5R3jG0VlFUhtk2JyGPPE6aOpSqS64V9PFGmCjl17Dbxc+TD/Zuy+NhpPtcLj/MNaCmW1U929PzDWGB7r36SuNjQeeZTblNa46tpnRojGyMbIxsjGyMfDIz05yhJN/bDx1X7bydKBvIASE7jgUaj0TE5Btt6guD4dGAHNQ8c7+8gkwAlAafL6Q+7sh35SsnEExvPE2RYqWF7yutA4kHlY/P3H7y64mCUqqazaoZvq0nJzp1epODz36ZIwbpWNdFnykmdif+0CHCQTYuMqvkPmjoIpoWI2vOYA5fkT230nRVUnpN+qWf9pcWD4pwVvLQg8YUP5Ug6cv0lcEwV7jUQdV648HRda6xULff2lJW/W+QJOcno83keY9JmTKd8mPzFbT8j2dYXlvIFAklV/t2gputRY2RjZGNkY2Rj5P2PkWt0I+iZkiqZSxUnBxqORcbppCkJpQToVZhUIdBxHnOwYyKeBQGTlCe8WQLgODrGBOwVxuRAarfZXLy205Ow5k1ypURPsPOKC53bZUoOnPSVEo6P4YCtOVNQziqW1CPlm/GjP4KRt/c+TrSJJ+6qXKmb6dPHFa29wSol7Jk+XV8pLhM/mpcLorQoof5Tm1S9d1kpL2ltuwP55n8HfV94sC11or7kNS1KCMauP8rlcyTbVV1sbfCFjW9pmPWnbmbyeC50APTxOV7T3VNjZGNkY2RjZGPkg42R97xHZDaBC+gB7W08qNMca0G81iYpyPulQErJWe08OBMPlMfH83H9Vbnko+pir7Qf87asMnmCngW+Jx0mPHfgmW7ES0r8s6pZ4mFZtm8RShUU1zGBeE23/Oztkh/wnM/v/Vwet5PfYl5LVK6HWXK/js/SJtSl+2OqirqeHACp9xnQq522UblsPg/5TDwkW/KzxuUYPh7t6YtTxjWPue/7ApR9ycMM4LzPDFCV3L1q5zLOEr3HWAJLn585KIGJ/Lvp7qgxsi71b4xsjNT3xsjGyCTLExUj1+jG9odo8tmVeVVOYjyeiA7sCkngn5Kmnyc/yfDuaOKDMjlfKbA5D3m7yiipH+dKAZkqh+nWMHlN5+ionE96cV24TaiPBBpJviSrk8/Dz+KbixA9hOzB77yx38w/yV/am+w2TYE9i4mUvBLp9aOUWUDlfjFLMJ4gNT9lo05SzBEcfCtJkn8WW+5DikUCXQJwX2hyHFbQZj5HvlL+WUvwa/Zxv0l5cAYyyX9cx85j8kP+d39iHNDnnBfO5WOlannT9akxsjGyMfKyXGzTGNkYeT9j5I1ccM0UkZKJb10Q43xoMzn9mhFdaM5PBSa+vL+PeVVCIYi6A8/ALB1PBpS+nE+Ocx0wcxskHSZHYSWQeknjkAeXb7aNxIOOOpgBHbcdUOfkX0DqyUXn0qIgycFz5JMBrrHSA7UpySVbePsEnEmGqxYmvu0mLRTcPowX8kn5UyJ226k/beagl/igDasu73H3hJx053YigHHsBL5ug+TrKfnP/FY02yLkeqZ8M9lm21CcRx9jzVdSHx6bVXmbrkeNkY2RjZGNkY2R9z9GruHkPZcskwI9WbBtYl6f/dagjjtIVO1XPRyEnIeZQ/B8auNbPNxhGIQ+d3KelCycJ97iXtNVOjZzrpkuPFEnGXn7P82vP7Vbc7YE0Gv9kkwpuDgW+6UtOikZkoc0hut7pkMHECZOJoGZLmYA6ny4vLPFnPO82Vz8LkxaoKTtBW5r7+cPGUtGzuVjuzxpXunQ5fV8kOLP9Zn0c3Z2dol3p9l47ifkL+Uw9p/pngBDIBTRd5ZlufRjnu6v7pcE4xk/LmOKuXTHpGmdGiMbIxsjGyMbIx8MjFyjG9kjwh9RFLNueCZg/jaBjzEb29ufCxCqCvxzx6XSqKgEbPqckvNVYMV2s6ARedLj2F7RYP+r9gavJcMkr1dWk9NfJ3jFG8eWnMk2aR71nSX9qn2/Y9u0wJkFDe3nn6n/q8DOdZ582/3I+SNtNvkH+6hrypgWBE5K7jN/88+cx2PM56Gf+o+q+sIvJTrK4fNpDLep68O/e7sU6w7InM+J/sp5UnzoXKroruWbNI63mS2SNM8s5l3WWWy4XPzf2wofHTVGNkaqH3nj2JKzMbIxsjHyiYuRa3SjP3zswcg2ZNDBJRlUfRhQOu57Z1PymO2dlbLXFDPrS/K5ZwHPMT2oNA8dLo0/IyaGFEyJB8k1m096ld5nFW3phr9D4DZPY7rdOL8HAD+7c+utUyL+cGSyn/ul+5H4S7LMgopt/PcYXAecy6u+Igeu5LvkPcXNDKw0ntuTY/qPPnoy9wWCAE/jcAG52Wz2fkjQt0w4D+4rbDt7FbXbYG085g61SYk//Wgpcxxj1n3ceeBCKbVNtvW3bHGumf7Sdhf6hMfyVVsfkv6SjE3Xo8bIxsjGyMbIxsjGyBspV4phFyYlWg82rzg4w6z0qc3s6jUlDD/G4NCYsyoYgYxzqt/MIB4M4oV6cj5dN/quSojfBp195vxOazpKSYjVsgQqa0mMYEOdi9JWk7SAIDEo/NfaOZZXiFzWpK80T1WuKnul0/XnFdc1H5vFiB5m9m0XTDaUw/lJ86jNwcHBpa0bBOAUM6mPAwhl9fZeGabNxrj4gU+Oo/HpN0mHiWa+5MAy8yFP9G5Dl1t902tqfVHjOkl6dRlcfl8kiAcHMPePlGsY5y7PVQDSdD1qjLysj8bIxkjN2RjZGPkgYOSN7Q9JRp4loZRYPbjlWL4VIwnOPlW5SqE2biBXWkrwcjyO4f1mCYo6oDy+NScldCXMNJfrLOnRE6vLvpbkqi6cdAb0nCcB6+np6fmP3XllS/OTT+fDA0t7cv2X3avq0i+Qi28H4jUwpp3U//DwMAIbeWQlLcnGHz5M+vLFkQMMPytJ++KGMnAM9ifwJSBINqR9+EYrVbq8r/jy373hOfeFs7OzOjk5ufS7Qj6/f3afpH75uljG7PHxcVwgST63IX3BdeJtqW/PQV5p5bgOxj4PXwHtvHFMn0vVVY7rix6PMV+4MO/eDbA0XabGyMbIxsgsW2NkY+SDgJE39pZCr8z41eJ1+nkQsL8Hg4+b5lmrGIhPb8fv5HNNDlWTGJwzsOI8MycQvx68zg91w3nd0bzapTF9bH9FLtvPtpCsOVly+JnjOm8cg4nLKyFOaRHgt4cdoLnVYiYLfzGevu3+6reudSz5uuuDvDHZ8VfaXT+0OSn5NEFE3wl2adGQ9EFZ1Neriu5fTOApxsmPdOiVKG2/WAN26lxjy3ZcpPrcvvWDfCRK20SS7WeAlGLY/dJjgzw7KKVFFOOGpLeF0UaznER5+6Lr0VNjZGNkosbIxsjGyPsLI9foxn6HiwlVzLoQfF1pAh8JyN9SEM2U6uf9VrfO0aAOSCmIRFcZUvz7WDP+/NyMFzpoGsv1mRxR46QqKXXix10etqu6SFQpsc0ck680TpW6qsu/mk6bpURAfg8PD88TDuWl/AQV7pf2IGGCVT9PBLMFj+tQAU3Q5vjUhYOSJwjJ5uPTdlw46Hha2I0xzn+3ZJYI1xIa7eGycQxW0Jx8wSO50zg8lwAv8ee8Uc++IEiLpRQfDp46xn5J5qTfdC7JzJzo/Li8Kc/QVgmsyYMDXNPNUWNkY2RjZGNkY+SDi5E3dsHFqzuvNOl/YjQ5iAeuiEmTAOAK9MqFlKkKSAIPVz6rNeJTAa125H9mxJlB6Iz8L74YqAnomAQJGg4CM10m3jm22qcrfZffnXQGmvxPHp0f+kaqGLAPQYHg6/rkOfc18UwdEnRErEbR75xmdvUkw8/0XfcB2pny+Xy++PBFANtyLiZXjpMSjY/jCYljHB4e7lV3PYlzHLc9/UixwjmTHmb6SwsxBy3+RkwCUR5jnqNMqbK9lgNc32kxSP4ddFw+2ly+kgDOfZ9yuK5db3cLME0X1BjZGNkYua/nxsjGyPsNI9foRn/4eKYEMk7BKezBwcH5/mON4UCUEtRasuU4XiXxW7F+29+dlQZIidp1oT4pmMcYMUm546REI105v9427YP3AEsg5nJSnrTlwuXSPExOM53Mgszty2Q/S8QMFufb/YaflfC8AkpeGXQu7yzxsFp5dHR0vi9adpWO3M/ULwGJKpTUv/usJ9+k61Q99aSetjok2ydwJQh4AvM3dnk+0Piqekom+R63jaSql/PlCy7KOas+J91zXC1gUjKn/B6TSU8zX5OMOpdk1Jh8eDxV4ahf2YUAwvyX4oY8XAdUmi5TY+RlXahPY2RjZGNkY6TmfKJj5Brd8wWXGOLkqeLibdifVTWRlOHOzOMpwScAc2KwJYdxHnw8Opxk0znf482xXS909pQUNJ7LmJKFjykevd9VgJ8qsLMKjSfwlKSoxzSX+uvWvSfopEfawgNP43ilznWmOXzfuwc+tzqQL+qYclZdbNPRuTt37uzJyX3WPmbyFy4Q9F1ju1ybzWYPyJI+XZYZCWSZ5MmPxvIKHSlV5ZQA0zYSzk1K1W73PY8hPt+hfgmovHLpC6FZkmc8kdzHNC51lBaZ1I+DTlroeUwlgOX4Htv0eT4MnaqYs9huupoaIxsjGyNrj8/GyNo7z+ONkfcvRo41Z7oObTabhQ8UJiXq3CzhUEheYbLfVcr0K2k3oh6qXJb9N+KoffrF71mwJdCh083kZn/yzXGYdDxAfE6CkcZhoDAZrzkCz/t4SZ9Mugnk06LC7e9gwvkYFBqzqs6rV57kkv45r3hxvlgNq9omBz4c7GO4HnjMwYjHyZf7FPc7u544DsFvVpElud0Sf7OFEnVUte8LBAMfi2P4Z43pyd1p5qsJXMl3WgRpMZCqrmrrCZl9k840xsHBQR0fH+/x4z6ZZPC4EflCIi2kZoA9i3Pqn/3ddt5nTYadTl++LMu7XTrZFKkxsjGyMbIxsjHywcDIXQ6NyeSeL7jGGMvR0dE5w6rErd3Sd8YdFNTWwYeJxqsGHF+Ku+q2cUoC7rTs6wlJt03XnMQTIilVFqQDVoA8Aakfkyv1meQgP9TRLHk5zRYCKbmKb4Ijbc3Kl1dlZqC0Bm6aT211nrzOEofzp4TtSdoTticwrybznOaVv+jZBwcZze3+roWQPwDuciYbpgVX0iN1z36yk/tf1UWllHxwLsUn53I7rC0A0uLM7etyEihYVV/jjb5G8tzgc1zli97HeVF7+kWqdjKuvVLqseU+yuc3eHwNnDwfeCyfnJz0BdddUGNkYyTHFTVGNkY2Rt5/GLmLjXjBdSNbCikwHcwNmhzYE54HOxXmb83xtiIpy39Z3YGHCZkKdsBJ86ja52DH+SnXTHezhOe/JSE+WNniLV+XXzw4AHhbBgz7sMLpenH5UlAx2Y4xpsDrffk2G09mDsr0NenP/cJtIt0ycTu/vkd8Zjvpnn/k2dvrHHXLRYknO/VL1RaPOwKuy+7HaANureBY1NtsoeFVVi5MmCir6lJFOdmV85A2m83eb8jM5qWePS69zVUg4TYk2LsuHKxTXks50PXtfiAeUq7juGyTYtkBZ0a+KPHqONs0XZ8aIxsjGyMbIxsjGyPza0weBcmAbsS14HKDss85g3blmMbieOpL5ep86pOOeeXIAYXzJ14oA3mQfvx46kd+UtJlVc/PaawZfx5Yqj6tVf40jwedJzoRK3SuC87NB6MTn06UV9tf9N3fHkMdSF+sIHIeJgL6kPyI/iQ9EUzOzs7OH/rl2J48+McFA+eYJQ6Xlbr2uVJlm0lC/Gic9AYi9x3KS30l27A/EyWJPDqAMRlqP7/bUDy5npL9GYOsgnqszkh25VheaZPPq/rrgOz+R5tQB8neXrXTee7B1/j6EUjaRf998bPWJoHfDPCbrqbGyH1dNEY2RpKHxsjGyPsBI9d0dCNvKdxsLn48zA0pRSigPUnps8bwoPNA0XzqM1OaC514421xJhHxwn5q5wEth/IqGckrZDODuFNfBTyUNQX+bA4PCifpNQEpiUFEXhhsTJTURxpH55igmYg8AJI+KAOPkTf9yX6sDnlQecJwG9I/Dg8P9+YSeYWM4/Az/dOTT6rwJZ2nZOTjs9JH33H9cB7qkfF6enpaR0dHU39JizpPijP/YRvaNS1g0gLIx9G83FeeKpOeJ5JMrm/6kPp4HKQFFsdw8Kcf8C9VqDUXq6Q+HnXs8eQ5kGN7pbTp7qkxsjGSvDRGNkbSno2R9w9GrtE9X3BR4SkBsB0D0Pcl8600Gic5+dq8fi4F0sxo/l1zp73Kasdqhtol3azJ4AHufTQXKYGot6UOElj6Zwbi7C1SDnbXSWheFVt7exTnScHg7ZmkZ+2od467ppMxxt5WjAQgLvssGJ3HWQJLdkjA4H4/k4/+qXHd3xiv7ntp0edJ3x9mJi9pK0/Sd/JxfmaljXx7PNNWzkvSM9vMKvAzHl2HOp/2gmusRNfxcY/9ZP9kX8kqPl03aZEwi0fO13T31BjZGNkY2Rjpum6M3B8r0f2GkTf2DBcd14PPHcWV5sAhcqOznzu5Jz0lXAKUxvK5XGEpaN1InlhT8vXK8EwnrHL5OP7Zk56OuaF5LunIx6Fe6GhKOMmRaJfrOFuSjfNRR54MyROTZkpqvghJn1lJYXLnIsKThwccE46qSuJlrUJJWbyiTbvwoXq3F3WWFjrJH53Ozs72Ko7+3xeEbgsHMO67T3L73CnxE2wIdmqn34QRzQDBx2J7JwIg+SAgph8Pdd+RPHytsc+Rkr7HDhccHhMz0HVw9Rj2WPdxnZc0p8vbdD1qjGyMbIxsjGyMfDAw0t/gSbqxHz72pJMU4kmcDqjva+CkNn7eFaJzXlFxEEs0C74EHvrsFQWRV38SD0psqVI544V6URB4pYHjz4LKA8OTlgNySlgcPyUatndQ8v4ijkFdpiTm/RLwUlfySa++6TNtQF0lUPLvia9UdWKl1pNqAg62YzVLdnOb0D9S4k3x6HProenkg+m4x6m/LWotbhNopweIl2WJv0eUeHMbkRflBlYdr8oL1CNf8a3++i+9uUxsJ6LtUlWSel07R9n5PQHdml/M9DB7BqXp+tQY2RipMRsjGyOrGiMfRIy88qUZY4z3HGP8mqvacbuAJzsynSoV/ueCudH8fLpSTUpLyUzj6rP2kbKNAoA8a17KmBJ5+kwe6Yi+79Rl9mOeHBIQiRxYk14lv1MCyuTs+k47ux1TAPhCgby4/B4EArxUYUw+mPTDCo3aeNWTyTtVSzWO+jABrFWPPFmsLXh8zOQvXlmeJTZ9ZxWT/szky+NuL86TFk8JNFwePRjNY0m3nlSdj6TfpLuq+TMjM35TPtLDuJ4/0najNG4aU2MQ7NV2lh9mOSj5nee2lA8S0KQxm66Pj1WNkeSP8qbPjZGNkYnHxsjGSI3xeMXIZBvRlXe4lmX5gWu0mSb+1EZEMPHKy3UchOcIQrPjMx5nhlZb54X8pgDheORhJksax+Uh3z7PjBdvW3X5zU1JZ2vgxD6zwJ/Jveb4DrApYV8FeqzM+atJCXBeRVYbJVf3S1+UiFzvXCjNdDKTO7X1/677NMdMp64H9U8VdcrHYw4o6ZY++XTenf8Uk17NJB9Jh7MYSUnadTdLlDru1V99dl9Yu5NAv/Lv7mvkgf9n8qUcm8aQH69tB0z5ltV7n79pS9fBx127xsgdNUY2RjZGNkZyzPsJI9foRrcU6vOakCk5q+/aL8evJe001qzfWtC44tyZ0nGO67fzkyM7ucyuR99G4XLM9D0LqlkiS3K6vhIPyQ7uxMnxqcfUlj6V9opTPwpKvmp0BpLef2aXWYJPidXbp9v87mPiOfX3zzMQSfPwnOZZS7bcquPyzvrNdEbdXle/szZ+zKtkPM4+qZ/z5vPz+Eymqv3nSGTTtF0lgekaCDjN8tGM35R7E4ClnLI2P+e8LqA0ZWqMbIxsjGyMbIx8sDHyRi646BC+15rnPfjcoF7B07FZMvH/MyDS+VlC5WfyMhtTY6VbjzOlr8151ZX12vi8tToLSte5O10CCj9+HbmcLweatcBgO/qP8+t7g2cLgGTv2Xfy4HMxic10kkCWCceTbKoKuS597LXkzPnom25n9yECivPvDyOneX2hkxYaM/koI/lNSdJ9aDaOZHG9zOzkOr1OwvSxUsImeYVzxjd1NdtXnvpdRbQfY2a21SsBcrJJ091RY2RjpFNjZGNkY+T9h5FrdCMb8pMj8bMCSN/dOcS83+JOAeRCzpThYMM5r5Jjljicj7VE604hOjg42HsImsnKtwgk0PHEpM+ab+YEs8TtMsycN803S36zPe7qMwNhVka8opmSIKspfpt9VhH1MZhQ1wA9JQnuT05jejV3llQ1npOS+hqQULZUdUlzeUKUX5Ff+qXHrNq4X8wAZBajlN150RhXVa+TvCm/pL6zWGGc0S9oa6+cSmdqm3Q/44NzzXLUTHdr8lBns6087j+zXDt7s1jT9agxsjGyMbIxsjHy/sfINbrnC66Z0ySwILNqmyoD7pR0MO/HZMLjIvVJtzVnSZGO4bz57X/vT2djIKTbvbO+rjf2n83tAc3zVwWP6y3x5K+6TKCXEgyDy/n05Oe3n3XM+yWeHexSwHmgzsBKRPvLH5IcPqb8zQOQMju/7rdryTIB9xpgsm3yE/e1BADUn+w0S7Rqw34+Too9jcPPPu5VQEFdzBZnDpyJ1uLa307leuIca3LOctDaAoC8pf6zvMQ49D5Jh0k/Vy24mjI1RjZGNkbuj9kY2Rh5v2Jk4u28z/TMXdAaUzQ4EzS/K8gkKIMwJRf15W9KyMjpFi8DyZOBB18KpDH2f7SRSeEqo+i8eNQvj18VsMkBNYf047c8KW96m9AsAGbzaa6zs7M6ODiow8PDWpb9H2ZMOnDdcTzyLD34a0K9eqJjngCTTGrr9iAoqA19xP1nlrAkA/tTPp+fiw/5IGVxn+N8/EFI2WBtQcC+M/5cxymxeDXPibHAcfz3Stz3fJHh4ONbUyTvzIdEaUsL+dDnGS9s64uq5EOq2jqlBcFaHEiXaZGi8dzetG/if7aQYH/K6qScnCrTabHXdD1qjGyMbIxsjGyMvP8x0n2OdCPPcInRs7OzmAjIkDuvO4uPOUuw7gxsqx9+c6fyQJ4pPt1adMUrCXLv/GwbgMswk5F91JbzJvk53tnZ9kf6Tk9PL/2oHOVPfPhc5EF2PTk5OZc5VY1c1/49Jf70mtSq/NC1ZGNQO78u2yyYXdfSHwOYbRIQezWsqvYAQMd1zO8QcGHFOQggSrJaHHB8AvFVWz3of7Sd5HNe9J0/oujz8LsWSgKxmc/TX9JvVzCWlEu8cpwWD1y0JT7XYnC2qHLeUozquPNAnVEmByeXQX25+PTxyQf7ujzMd65j+m3KU9Rb4rfp7qkxsjHSde3fGyMbIxsj71+MvNGXZlTNH1B1BcyYY/Ki8jzR6L8SG6/MdcwNrl85XwMzzk+nSEl4dtWd5Js5ItvSoDPHZ3v+KB0rGC4XfxAxAWziw6udDs6eAH1cr4zNnJ2LEOrVK1w6zv8ER09MDtLiyf3PHzZPiYWV0AT+M8BhhXmMcQkIkq2oS/Er+7lN1EY/dEjduT09lhL4Emg4d5IxAbHo5ORkD1R8YeLkAEcdrcWf02zBQvlS/8SfjzWb131TCx5fkPh8szwpf5yB4UxGn0vtCMS+kEkL5Kray3ue168ClKZMjZGNkY2RjZGixsj7FyPX6J73hyTh0m04BX5ydDJdVecViarLwcXATkZKY/C8JwsmRfF4eHi4x5cHvTu5+qWH5pREnIdUIaSsnNdJ7Qi4BwcHdXR0dH5cMoi3WaJkRYnf6byeTPUjfEzS/Kz2nnBmtj46OtoDHgab65/9fcHhvOg79Z4WNEdHR+dbQdwn2J/bOtT28PBwz6+9okfbJzBOiY7jaQzNn8CQP4qY9sRfp9IqOWgzt533I6/0E49zjsHz7mO+3cJJ/uHk1cikX/aTrBw38cktA35XIiVXgoCPwZjiYjXN6+Ml4KfeeZz+kPRFnyYxb5BmgNN0fWqMbIxsjGyMbIxsjLyRO1ysLNCgs8TozpqcgYynpDob24OB7ejAbO8BytvbznMKtmXZ37M9A7qzs7NLe7zp/NIlndITqvPEZFdVdXR0tFdxYX8meQ8mJvAE5ilQ0+1u3uZ2nc5AVL7DZHadQCPJB11/Gsv9k21pq4ODg/NtIeqXKh1M9qwmz4I/JRCXgb7qiwWe1zltXUkgOOPDiX61LEsdHR3t+ZgnfU/U9FuO6TGlOdwHKavaevxSJo2RfE/jpvikP6SH21mtlDy0P/UhXpJuqurc1zmW68I/u+7chpvN5lIljrriYkLVXp7zRU5aOLC67PGv9k2PjhojGyNFjZGNkY2RDyZGjnsF0THGcnR0tHfMk7WYuyqxqz2dj4mFglHZLrg7LcdIV92sxHEuT9weQJTLk70r/zqAqDZ+u3U2hnj1W/KuV0/Wftx1n5wwJTW1cb7o9AJPjcPbyUywCeSTziSn28mrZAQUyp1s50HrPuu28UqOL2h03PXj9mXF0Bdg/hpu+r2ePVDbVI1RH5fD9cCY8gUM9cTFhuuFC6kEPGs6IT+zBZkDQAIrzesJkWPIVlrUcT+97MIxxXMa19vQH30M+qLsTn345wTSaXFJ3/NFnIOHLyA5l3/nOPR3jXNycvLyZVne7dJgTZEaIxsjGyMbIxsjHwyMPD4+rmVZYmDe8wXXZrNZ+ApIGYoCURDNx6vfZKTk+Pqckpq+J6fe8XkpoBJweVJIjutJKH13SgHKcy5/1eU9196P1RsHJI3J5O0J25NEGj/pRkGXErg+pyqey1tVlxKkeKD9PMG4D6V+sjerQrSRV14YfDrvAenBSdlYXfHKCOdne/r+bOGhcU9PT88fzp35QrFcwAAAIABJREFUOPXr/HJcjxMnT8a+4HaZ/JjzmIBlLY5dFhLjNckknVVdJEL3pbRIZNJ00PSE7rJxMSffIe8JyJO9Eyinlxs4n7QP5XN5uVDxRYrz4nGk9oeHh3X79u2+4LoLaoxsjHT5GiMbIxsj70+MPDs7q7Ozs3jBdc9bCt1IDKR0tch2VL4E5cOrrnRPLlQmH46lMelM7iApIJl0fC88+5MXESsBrFTo/1UPCjr5bxl4e+o76XCMUXfu3LmUkD2R+L5nOo54pZPNKq9ebSXRKRPokQev6rm85MkTVQJsB8xUzfNnCLTPV+cFdgKZGRiokk3fZDv5wVp1l7Zh1ZOVJM7h31MyT/7GuJEMm81mrwKcKm20h/PvidTjajaW+0fyFc8b5JufxbfHnid09mEeYgxr24z6Ozh5MiYPCQg9/lk1TbpKOvaFD3Vwlb3V33NpWhhwXvfjputTY2RjJPs1RjZGsm1j5AXdDxi5Rjf2lkIxy0TsRpYg/E7HENMMfhdylli5/5kJSmO5ghncnENt/Xa1G1JtTk5OLgEYnU3tr/NWFncI6sfbeyKS3O6EDnzkgbK449J52N+3C9Buvlda+lTAuM2pE7XhvnAGqTs6eXcbUZ9MAk6e3JSYtT9dc4h3VkrTfKqmOtE3HKipE+rQE5Xz6mBHOcWz2uwqLpeqsUzc7k9rb+1y2ZmkBLo872+5SmOJxxnYVO3H+GzRQB17ldlv/Xt/8u6+Rh2k+Rk78iO+CY5yERRpA85Hf3Ce0mKI/s7XFDsYsSrNNg7a9KuZ3ZquT42RjZGNkY2RjZF13uZBxMgb2VJ469atveAgw0nR7qR3QzNnpMJTwqQBqCzy4s7n7XmlLYdJoMC+bnTNycRFPXkgJgDkvKw46NWn6kN9qE9KHtdxUrVz2yawmtmI8yS5rko41J9eqzrG2PvMJJz2OzuvM6DjYkM8OiDNgN6JOqnar1Bf1a9qP4nO5Ep2pW5dh0n/Oj5L1pRFvPjC4eDg4NI2nuQvtENKYuTLEy91I+Cj7zi/a7qlfCl/kFceSz7istF3ZnImUHU5Ej9V+3ngOnKy8ugLNX5WvpBunZ/j4+PeUngX1BjZGNkY2RjZGPlgYOTJyUmdPVZbCslECi4PJDHmCvKk5c7hyvLbdwwMJk6dS0qX8fiqztSW4ySnSm05PvvQWXgVX3Xx8KwDsMhfZzrGxQO2qYKV9E2eU2WSTkYeSUzAa07PcVIyITDpu5IS+6axCaK0Nys3yff49h7f+yselbC5XUY6SP7oyUTfUyVPPCT/p5zpNrrbzxOdbMg5tG1Aek2LA/VnBZXjc27Jxv9KPn47Xt9VQfQY8cq560J6XwPrWYymPp7UNT4r3z6WvtMvqAvK5X0TD/RdtvfqZcqZnje5qHBiW/quj51ALYFOym9Nd0eNkY2RjZGNkeKhMfL+xMg1urqEcAUty7JXjaBDONNUiDuKG5hCsGLGeUlUngzOqpv6pMSQEuO5gjb7+2Eph59notR4foXNgEuAISfx4Ep/m82mjo+Pz9uzykGdun6cP+pzpgsPtqSPpAcGCKsdnkAcxJJeuD1A43hlWO30lyo/OuZvrhKPm83m0u9QuLyepPXnIKCxBXqam0nck5+DBMcm/2zv4Ek7ut+TB313PyHftLtXrHjcgUE8pIqU+HY+SQ5Q9BtPfvQ/jamtTJqL8/pnjxXZl8fXqsHs6zZN9nIfl29zXvdbj8sUJ2wzAwDGJvXvepzFddPdUWNkY2RjZGNkY2TtjXO/YuQa3fMdLp9olpTd0b0S5UnBb53quAcGBdTxVClg22SoNNZVMqkNQcvlTMbwQEiJPNHstrN4YSJkEHPsma74neNTfrdNSij8zj4cixUQEu3rOkvOzMTnVcZkOw/+pOvN5mIrhoOy693HpY1YmZWOtbDgA7ezCozzlhZrOk6/SPuZZ7YUX/oBUMrLdsmWyZe1r99txr70d0/qjMuqy3ujqVvvk2TzhZ4nR+qKY3sCn/mR5KGvrOUQXzzxvMuk/nxbEsdNbUWMBcrmMnG8Wb7jfncH8abrUWNkYyTn5/fGyMbIxsgHByPv+Q6XC5eCNZ33q3ZWZmbJ1at6biglOk8q7tzuMKR0PFVamAw0ZgpAOklKnnwolrI7SCSQct5YLdG8awn+qnFd1zOgm4E/5137dW6fxx3XdU0ZWYHy/sleDGIHKCZ52i1Vt9xGXpkT7fbz7ukp6dT1kfzt7OxsbwvGDMD5p+Pkl/6/2Wwu+aDIFyDuKzNywKE9WVFzWT1G0gKCOqSM7n+erB2wGR9rC1f29/OyWXql9Kwyqv8pt7ntZv7iNpnlFgIXjzs/flx68b5ehWy6PjVGNkY2RjZGsl1j5P2JkWt0I89w+VW/kx9z56ZyZ8bj+Kk/z6craZEnHVYTXAYGq1fmNH5yssTz7DwrmUxSdCiXVXP6azg53qzylHQ5AznqLFUQrpJdx5JdvULi52fBIZu43dSGdqJsvlik/OTFA1569nOaT/r2cSSjPzQ+xsXrdPkaVreXy6AtSQxoT0i0mYOMy5304PYgAHtMpBiUn7ByRr9MydLHcP+gHeTXtHGytYj74pNvenyvLYjIZ0rIvqCiXfy/n5/FzZpPJP35wsTjIVVneZzgQZ2x2r6mn6Y5NUauy9oY2RjJeRsjGyPvR4y8sZdmeFIj064I0ew2o/p6wnOnS9UF/p8BEol90tWp3zanQpPi09gcy48T3FJQc6xlWfYSl4iVGI7jttCxWUXB5SAf3Frh87gsSXafN9ltVh1YSx5pQZEC2vWYjnuQjnH5dvVsAcNkmvTnidX587a+7YCvMeV8M99LMjtf0i1fjUsdpphz3XmMMtn5XCm5Jp/3nCA5E8DMFh3pXJpzzcedb/q3A+xsITMDkhTvzosvYN1nEw8pxmfyp/mvE/9Nj44aIxsjGyMbIxsjH1yMvJEthZ4kZ45Lugoo2HcWDA5OKfnOePO+OpeSeQIu5439eOuTVYw1cKHTcNuDB+/a7V3XC//owLME5LKTh9lcHGsNxNPt6xm5zM6z7JHGSMkvybz23e2RQNBt7uDuOiFf9A/Rmj48MdDOrlfK7uMmsNcrl31BwnF0nHL7eeos3fonb7MFh+vJZaIdxc9sseCfxQe3Zfkcrrvk57Pcdp14dPnS+dRuxpv7aZKL9phtyUi2mvnnmp82zakxsjHS9eXUGNkYqT6NkfcnRt7IBRcFSIkqBZArY7YdQf89cD0R+FxpDJ/XeUwAwYTq4DdzMD/m1b8UMJybCcK3KZA/r6K5A+gtRCJVBGYOQd7c8ZLOU5C4gztIJptwzHR8dt6DR8fJawKlWUJz8FiWZe+H+WbVLc7rMiuB6XgCBvnsZrPZ+wHClCw4pxYW5NnHJj+ax99kxfazxUECdNer20j8XWUv6s7bpTmoN5InavdR6TWN7XpKgETeU0XXaZYfUvL22KJt05guX8pHKbekuPV+rByq/1VbSZqupsbIy3oQNUY2Rup4Y2Rj5P2KkTe2pVDEZJ6E8cQi5n0M9p+Rj89EmcbgbVfxQKMS2LQP1h/A9HlpSD/PQHLjOE9MNG48dx4e86DQeL4Ngw+7esA4r1X7D8ceHh7W8fFxdH4HxJnDOo/8Lv4oo49JG9Nu1ONsqwJloc7pK/Id7ilPCTQlMB6b8ZSC3o+Rl7Q1wG3m+nQ+OIf060kk2cITloNRsrXbjXZnv/ScQwLhxNPMpzhvqmbyRyaTXIlfb+fbJxwYZ8l9lstSLCd53DcSMM1ihvP6/B5XSX59Fl33x0ib1qkxsjGyMfIyT42RjZEu1xMRI9foni+4lJxZ7bmbW9keTDrPhyVnSucxJnCNQUrG9sBa49fHFzEQPEFxbvLKAGIbVol8bq+6zQw/c0TxmsCNbXnlrj+vVHg/Jgseo2wCZvkKeVyzFfXH/6pucF7NwYol+Zd8SffSTwrYpCfNR3KdzvzcAd/ndn9hxSnp6+zs7Pw3USRf4iMt8g4ODs7fijQDxeSLy3LxkDL1kR4edYBNsea8+mLTAWcGWglwlUtcZ1V1vu9/lgvcFt52Jk+ypVNK2Oy7RrMYd2CV/zNOOe8sFpLs8q+mu6PGSLyB7re+d519+CdhgKqllhpVdRYWVGP7pWpZahHPy1JF3NsOc9H3YujLOt71vfh/Mf9JWkQt2wGT7KfQzelmU4vkDnblWBdzDjC7nPdblqU2Y1OLpNrNfTp2+bvWc8OlxaCamxwu494YmnZ3/OEvelHVI69vjGyMbIy8AiPTXTfRjfwOl4iLdJ6fMUBhk/Oq/wyYZot29fHfPJiBRpLBz7nzMjlwge/BvwaaKTHMQI+Oq8+84PALGXd6B1kHR680qY0nzBTEGoe25AURfcAD0p3U7a3PXnF1WyUf4DYX6lby+u9xOL+sonEMD0j/LL2dnJxcsi2JCybXJ9v4PAIPf+OS65G8qA3bHR0d7enTwcaTEhMqq4tqR336toSU0NO2JvqKbML4os+73t3P2KeqLlVxPSbdf5jP6K/Ok7chb2mBMosr54N5YLYg8Hb8nhb1DtKsbCb5Z/mi6frUGHmBkafP+LW1vOdzL/M6+T47fp2+s/aJrjPPWp/1uvb15iQ9mvEeS1oOjxojqzGyMfJqjFyjG9lS6MpiQCZF06ET8LhiNKYoGX/tezruitdnfU+33xVUCmQHM/6ReJ7bcjyAXI8eHOSTQaV+6e6O28WPc0yep+4IntSLj+cOTz6kO73pJ10QUSfe32WYBSl51vi8hU7/mlXf1C/ZirbxZOTH3cf99bd+Aa253QYcVz7kF61s5/ZIfMuHBSqUw38Pxu/iineCkCcu502yu850TEnNwZOx5kDuslOnzoPznvxH/PB/0p/LkqpaPg7nlk+x+pkWJdQbK3AOmJ6vPHd6fqJ/q/3MLimmmu6eGiN3PnhtjTU9HqkxsjGyMXIdI9Ma9lz30zN3QekCIwUFhdSbX+iU7M+LCZ+HiiG5wjmf78/lOAx48eM/dMcgTr/urT9WKfxiYKaLqvx7CO6gfvElPauaw/F4cUFndH5EfvEhR08XRbSN21D6ZCWMSUJ65S16teWDqgw4Vovcdu4/vLiin/i4TE7J/w4PDy/duVQioO24TYj2IA9qKx1Qp7T1jGdRqj6Jz8PDw0t91gDQdU5deELmuPrxSm6ZoG/J3vpMPph8PSmKH46tvgTbJBMT9Awcqi58j8WKxKfHPW1M/TrIEuzZT/x7gcIXF65vT+BuA8a1/G4mg8+h/+nHdGeA4QucputTYyT883v/ftWH/4aqf/x/ZWX92ffenr/9xlWdPqb0LV++5eG7Xvam42GNvu1vb/n78N9Q9S++6eL4X3r2xfFH3rA+xn/6wYu2L/vc7bGv/MyLY//lh86betQ3RjZG+viNkVff5brRtxT6ot3Bwq/CGXC6YOA4dG4XigZ0Y/MiJilAfXlBM3NSN4SClm28suj80WD+yk0aknuMUzB7oiDPvGvEtkw4aifdMkBdzlSR8PPuxG4TJRXak8lLnxlk9Avpa7PZnPuH5Et346gL8iE+XY9eZebYx8fHlxY1tBUTYQIdt53L5YsYjctxNL90oItU6UP8eLXt6OgoLsa4fSMlYf75RTT9hWPLhtQB500JNyV8T4ySm7oQH4wT9U9yuF49T7APbZ8AI9nHc5jbzgHbdcrPBCPyk3Tl/u0gI/LFj+uR+kj68Xi5Ckya1qkxckdvfH3Vz/9k1etfkxX1ed9c9WU/UHXr4Xz+V4N+5bVbHt/wujcdDzP6p19X9X98bNX7fmjV73y/qs97YdUP/ZOqlz53+/+zvqHqV3656sPevurkOI/xEz+yvTh7299c9fyXVH3N51X9gy+ueu2rqj7sE6ue9Rurjh+p+ovvU/ULP11n8IfGyMbIxshHh5E3csG1doHB4PEKGits6eqdRpIiqMxluageXBJss/+M0/Hx8Tk/MwWRV1+M6zivzEVsr/+qqPi4M/25Hjkuj1HHLuuyXH6ejDr0YPAkwfEYgCTdiZvp3Z1TfXQBRqBX2xSwXIDwos4vHMmz8+568wUEK20eOMn+TLTi3/f2SqeqTtNWnC9dfBIUyIvmZmLQsxdjjHPgUzvKlJLR0dFRHR4e7t1lpN/OkimTjQDLF3Ocx+/I8q6n5wP3UfoBY58+QZ2mQoOPS13Ih3Un1yujlNl1S1I+4MKVBQ7PWeKNd4Eph/oQVFLcimZ3JTxuvSjEvJsKJuTdbdl099QYGS7Ovuozq5771Kp//o3b7x/3ftvvH/4OVR/xjlV3Hql63tO3x/T3ke9U9TP/df/Yc59a9bc/9WLcz/+jF8d/6j9X/dHfst/2g5520fbVr7w4/tI/tD32XS+7GO/LXrI99/Lv2X5/8XtctH/kDVXPe8vL/P3sj198/9wXVH3312w/f90Xbcf4ghft9/mFn5o7TqI7j1Q98vqqh9+s6qEnVb3htVXHd6pe95qqs9Oqt3hG1dhU/fIvzcc4Pdle8B7dqnryU7d3E3VH7MteUvVvv6/q4z+w6mP+StVb/brGyMbIxsgbwMgbvcMl4lW4vrtTuALUTv95dZ6CSk7JKgbnppN7hU2O5InDk4nm9qtgnhdPfrtS8/jFxMnJyfmxhx9++NIdL7XTfz5kqTYpgOnQShgMYurc+fWxZ7ZNCcGDj04qW0lfXCQwmI6Oji7Nz8SdEl2qrLLaI97cfqxwiEfpyqtSHkBpgSRA8WTgFTX3wwRMbgvOlcbyBJq2s6Yke3JysvcwcrpAJeBRV7Kd/Jg+7UBGG0l2jxHqIy3K3HddJwksOOaMdM6rjhrTfWuNZ8WF+urObtqKwnnXqn+qUFI+2kDtCEg87nzPwDUtoDkG+fc82nR31Bh5VmeMyRd+WtX7fWjVZz2/6l9/d9UXfWfVP/6lqjd7atUbX1e1LFXf+Mrt55M7VX/vv1b9zI9VffIHVX3xd2+P/673r/qYL6r6mv+t6u99QdVf/XPbC6ZP/9qqt3/nqhe9S9Xnf2vVweG2/Tf+/Pbuz3Ofsv3//Letevpbb8f7wW+v+sz/peoDXlD10Z+55fHFX7jl6Xe+X9Wf/z1V//lfVX3Fv6k6PKx63tOqvv6/bcfV38/82PZO0xd/z/b77TdWnR5vPx/frvprf6HqO79q+/3zv3U79jPfdjvX855e9YG3tnKv0R94YdWf/sLtBSu3Zf7Vf1b19r+16iPeaXun6ptevZX7bujPfknVt/xy1bt9YNUXfHvVb/7dVTuMboxsjOSYjZEZI9foRtBzdleBDKWrZyrbX5XpV4660nYlcE4qTN9p5HRlWpWrAZqLe0Epo4ImvaSChneHoEFVdUmGTIHHgNWtb12Ukf+qi19HF4lXXgDpO8c9PT2t4+Pj8/MaT/1YrUrBL55cJ7zwnC08mJC98uMXmAoizuX+weBNiVrjHR8f7+mai410QUkfcR51nElQvi5/cL0kH/ULdcpRta0QKZFTZs6d/FuxpnkUg0nfIgKKdJ5ebSs/ISirrfyR8/HZPl+cpUTHOagLt5Pa+xgO2uQvxZrr3kGZ84gvzxGpOkbZZQsWJNxmlDuBgfNOm1BXXol2OVzPzIO+KGm6O2qMPGcESjmo2my2d2WWperwaHvHhXR4dPnz6cnFhcRmbD8vZ9txNNbBUdUYu7ZHdf4QksbXVrvT4227g8Ntv9OTLV/n4x9u5x1jd0HzW6pe+JuqXvfft2McHm1l+I43bO8Uff3PbMcg36Q//6VV7/v87eePfZ+qZz9c9cpXbOf+x6+q+s5HtnOt0dhUPf/jq777TtX/9OKL4x/7+6te8R+rXvafq576llUf/IwtL3dDm4OqL3xR1Q9+R9XHvW/V+x9V/fxPNkY2RjZGXhMj1+jGthRq8nSF6At/Koy3K6l8BiUD04kKpwMw2MWXAnCz2Zx/TlemXNTPeGCQqE+SnX29CsixOZbm9rtCmpPB4HOzIuDOo/n8KpyO6HPOEltq59scObYHPG3DxOQLA6+sM/ClL+mQd66oF/qnKlf0C5dt5muUnce8CsM/JVFeIOs/bRjf6GX2pf50sc6kRH/VOKwCyT5+8eoynJ6e1p07d871zLus9FG3Lf2OdlV1dFmWOj4+3tOFA4f+z7YbeFtfWBLsE5FnEfWhvp6z+D2Bj8vi8zAx+4KXtnXZmD8SceFNubnIYFsnFjyc91RdnOm1aZ0aI3d91tYjpyf7zxydHl8+X7W7GBpSbNXp6fYiZLPZXjDoQmtZLt/h0fi6IDq/0DrdjrnZYdjYbP/OTqs++0OrPuBoe/Hxih+t+rv/peopT98fd/cbWecXS2M31vnYVXV2tv38v3719mLpnd99e/4j36nqta/ev2Bco+96WdUHHlV99eds20pm8X62m0/fl2Ur997zXMaf9PfXP67qvf5Q1Xs+p+pLv3d7gQlqjGyMbIxcx8g1upEfPnanStU8GcqdhQ7KLXG8kuQCekabzSbe1fH5fTwGgc8xe6BY8zm4eLJxXmb6Y1WBgMzxrhpL8vmFDqs8PiZ1445NuTkmbeLt6cyedJ1Xf4aBidNBfoxx/nyB8yReOE665U57iTcdc1uqD+8IpsBkAiJfBwcHl/Z+n5yc7L34I+lkNi5jibHlNkhjUnbXh+uP4/Di2auBKR7JrydLl8Pl9WPil/lCPuO8ur8nXTnfvMvLmEv8MLEmffP8Grl9vQ99xv3JY5K+Kp7SAoTk/p1yHsdIxYjrAErTZWqMtG1OB4dVDz95e9Fz+ND28+ag6uM/oOo//attm4eeXPU/P6vqW395+/349vYlEL/2Haq+6kcv3qD3/31z1b/6rqoXfFLVC166PfYrr91uDayq+r9/uOqt3/aCqQ9+q6onPaXq2167/f4PfrrqBb+x6iXvW/Uez676rH+4Pf78l2yfjfryl26v7b7g26v+7l/ebil80W+/4LFq+yzVqK0cY2yfq3rH31H1Jd9b9Yl/cLtd8uEnV33tF2z/SA8/eSvPWzyj6kOeWXX7DVXf9vqJBXf07I/abof8ipdWLVX1aX+v6j3+4PbvT7971R9/1y0/3/yarY7PzrbbH5/0lO2Wyqqqd3zX7R27T/gDVf/mn1Z92CdUfeSnXMzxfV9f9YnP2Y4jP7NFdWNkY2Rj5N1h5Jgly+vSZrNZ/DcKZgv3Xfs9hSVnooLYR20cAHyclOSpaFbikkN6WxKNJT7dGZPSZ3rmBRdv0VNPqVokvfCWvcY6ODio4+Pj8wDk1sWZvsj7smwrNrdu3Tq3YeJf8yV7ug04ti52Tk9P9xYQGkMPVHJ8Bpp0pYvJNL7zwkRwcHBw6fkEVm14S1/B7Hu/fcFDW0m22aKIPNIeKVk7CBBsPLn4nvi1BJsq5u4bjAHKk/wyVbdnsUpfoz7djqmAwDFddw5esrNsTL0ILPngtvOdyGM7AYvHr7dNADHTkRNl8/6UjRVXLYycT+qKMcNY8Lsay7LUnTt3Xr4sy7tFBTVdosZI/P+9f6TOPv3vX195v/yqqg95q+1WwH/4c9utcqenVf/+n1W95P23d2M+/euqnvTm8zFe99+rXvAO25dEfOPPVz3trS/uRDVdi57ywneo5TW/2BgJXvi/MXKfb//+IGHk7s5nTDA38sPHnEzkRkkgkhbnVfuVvQRSnG/N+HSutCD3edOdDndmju3zJNDT+QRw7lRyBD/PPpSH7ZOjePWCSccd3UnJJl2sSYfaLuJ8Jid03eliS1UttdFY7tz6LN5TpVdtkz8pcUpuyXB2dnbOQ0r21B/5SJVl8aSLXQYzberVNE+i7kdqc+vWrb0tQCIuaqh3feYeaOlecrFKyVhy36W+1U5bnVxntLt07L7ofu/ATJ34+O4THI9yczHAhULaWy9/9iQ6W1Q6eZLX+BojASfj323n+uE4s4U6/dR1xDlp++SjfFjZF83pAeqm69MDj5Fj1F1tSn3xu13cofqLv6/q7/zI9q1+n//R2+M//u+rvuqzql78RfMxPuk5VU9+yvbvxe9e9XWvuBsOmqrq+C3euo4eelKdnZzUqFG12dTB4UEtp2d1NrY32pazpbaf6vxu2LJUndZSy1I1NsonSx2fndZmtw1zbEadLrWfFzfy69reZVvk2ztf+uVfOv+dtsbIxsjHE0au0Y3d4UrExa9fOEhIvzA5Ozvb2zuu4/7dF/F+2xD8XQIcd7xUSfQ5OfbMmD62GypdtPjCmufIRxpPi3e2W5alHn744b07X7oAoD54dT/j4ejoaK+6oT6zizy3KZ2PD5WmJCPeHPCdP22t4a/A62KKgc+EIGLAuh5TUvILWwZUWpCkZEx9cuuodOn+xwUQ7XV2tv21e+5jn/HofuK8eGKn7dPCjDISiGh750EvXuEYnuzpgxyD+lA+cH9d45M8+Vy0hSdYXnBTV04pN1Vd+LjLlfRftQ9QKXcRZJKPcWy/0+y8u6yUOeUsju25tu9w3R01RoKH3/OH6/Ru7nA1NQV6s8/90Bo/8G1V1RjZGPn4wsjdC2YeuztcvMIj83RsMsgrRS7mpQC18d87cOE8qScH4yKcC3mBSDJOAi0PYL9Q8MBMDqQ5PdDl3CQFUXI4jUcHVPWBF0bUM+X0oKGt1IcXMR6ctCFtO7PT2t0uHvO53AZjjEuVpvRqWV7YkFgVdp26HBqHeku3ud3/KDO3IaQLTgYz9Zsq3FUXv5PjPPvFKX0mgYbbOtnE+8wqN/Q7b+d6pe7It/TCpEkbaJ6rEh7tpHlUYaSc7ruzPFBVe9tek00om/rNKv4+/lXxoz6+IKa+3c+cr3TM5Z/dcZ39b7p7aoy0O2Ov+tkar/yp2t4RGfh/Lgm+7+6QXPK9pWqM85sq16XtMBhb0+H83rFx8Vl3Xi61n/FwMQ3m3h2yuzfn/Oy+L7XsnwMfF3OPSzJIvj07j/3uY/dtyrfLsOzrbVmWvTHHJRvKV/Z5ujhnPjtpSFzdAAAgAElEQVQ0xs6nNm6ELZ299dvX8pZvs21TjZGNkY8/jFyjG3lpxoyoVAcQ/uddCh3323rpKprz8EJBY/pVcrqNOuN/tsjQWOkWMGXixZSIt2nFj45xTgfJdJ7zcS6/qEiOQL55zh+ATrqZVQeS7H6cNiLPnoiYFBy0tf3Qg9tlUoDRh8SbB5MvGmZJgPpy3mfVECYXfect++Rb1IeSGS/E3LfSRXxK4rqIV5LWnU/GGLd4Und+ke72Sknbn7NT/5ToaD+OI56oPy5eE6jwu/jgnL4VQnN6ccPt4+R+sLbdw/uJD/eX1N/5Snd/XY8+H+1Mvtw3XU9+kdB099QYiZy6a7b53q+rgy9/6d54M3/m6+c9V/tuC+osjSs9Clv8NfqeUzzfaNdHyuuJ/8QLxxbRJpSLdnDckww8no49Gox0fVBennMduz8ljHQbCjvYV3dsfJ43/onPr+Pnfcz2Aq0xsjHyCYaRN3KHi8bQZ09yzhSBhN8lqJyTgZKSofp4QtDYTBhUcqo2etJmAHj1IPXxJOwBR6eb3cXwhOhz6rOSjDsUZRagODC7fLKXfmuDFRN/FauOM/BdDtprs9n/hfIEmnRsPW+lcXQH6+Dg4Pz1rbrwEh/ubwJEvQJez5qdnZ3V8fFxfAU//TElErdf+sy+OpbukqVKSwJq3eF0nhgbBGbniYsrxgSrgBqfF4Aa1++UpoWIfyafDr4p0ZPScQKZxkzx7P3dDh6/7tMpztwePpcvsmaAlkCB43guSosszzMevw7WMwBnrPncvvjwxdF1QaXpMjVGaszd5+VijMbIxshrY+T5dftojIRtGiOfGBh5zxdcdCIX0tvNnKsqK6GqLj2fk8hBIyUG8kEe0tWukys1nedYakuemFzJ9+wCjeO6njQXAZxVEh1jpTHxnOSRE/t8lInPIxB4bt26dX5MbXVediTffozJguMzIboO3acODg72ftOC+jg93f6o8507d86B5uTkZO/3GKjT2SIlJUW/e6l2qUJDGzp5UnFdONgT8D2BOrBQRvHLH1tUdW+NJ313OXnMF4Yua1q4pTFSpWmWWDmv5yFfFCQw93auT+eJfT3uPbY8sc/mpJwpNgnsnFvAT7/1POf6WeNJn7kAEU8plzStU2Mkct7YvjRDozVGNkbeLUbyfGNkY6Tz+abGyLVceWPPcKWXMJBJCpgU7hUCPtOUgpfHfV4mlsQDFU4n8GOszLgD+/yedMnHLGmw/VpwpsTmfK6BUiKXVUHCxK15dUwVM722XbfV9d35EMgRSCknz4knl0P6S/qXjsWj5E7BSJuokndycnJesTw5Oak7d+7s/TiiAIjJl0Agn3V/0Xm1Z1vftuk+SNJxVui4AJn5DROOLywYSwRxzc+3KtGPkk+lxOoxOkv2nvQ8uaVkuAbm5CGdo52Szl3/vuhL8nhcJ/Dx+T0HzeZOcruP6Rgr7DMQ1pwpL6zlN5eh6dFRY+TuvOWCxsjGyLvDyJ18kLMxsjHyiYKRN/ZaeCZeMcLnk0gpWfK5FgXMDDxECZA4/sHBwflryxPAcQw38gxEnK+U9JND08gcz8dIQOug6WNTdiYxtfe3z8z0pf5MYA4Ut27dqsPDw/M/gYnriYn98PCw7ty5c4k/B2yCgN8Wp40cxNk2ASvbeyVC1auzs7PzX3gXyKjCp3Npvz/BI1VxXB8Ebga4y5p8gHHBqiV1QJnJK23r/k2wo4+s+br7iyfHtPCZ2YTHZ8fS/JTRfY/tkz59PtdVauc68aTv89DOnpjZ9io+k1zUc4rjqot9+ZSLdrvqzpXrx3XRdHfUGFlVs+PVGNkYeTVGlvisy77dGNkY+XjHyBv9Ha6q9a0FzpAHFRONqgguiPdxx6VR/PcY2IZtk1OynfefOfgsYFhxmQHkLBGRF5c/ASLnYwLzhOp61W1XndOWAx3XVoiHHnrofGyv/ngSp8x6CPahhx66VEmS/EzIWgykxOYPmXoQidIbvBxs+Btg3ELggKJtFmdnZ+fbLVLyEJ/UMYGAvKTPkieBPs97P/JASvGYQE59WQWVDanrGbAneTy+3O7ed1Ytn4FdWmy4PpJ+1/IJ+VjzL8oyi+kETLNcscYrz/u8XkF0HTNu0gLH7ZYW3lcBXdPdUWNknb+jrjGyMfLRYiSpMbIxcjbvrzZGrtGNXHAx2bqSZobU+RmA06BMWP57SJo/XcHy6jWNq/akZFhWRmaAqM9s6/zMgsS3hjhPybj672DMoNX4Mx37PmdW6Vidu3Xr1t6ec4IqjzEZ+Jzci84Hfse4eLg5gZGDoFfJpIdbt26d+4EnB9etv9FIc2g7SFVdephY2ymOj4/39rhru0VK0OQjJWDNzbePcQzZkqCaApx68+qok/ucgJkPCev/ycnJJd8Ur56gfHGSEtWsvRPBuOryG5Sq8gP9aRzyPvMJj2l9d1m90pz6Ub60eOD/5IP874sVz1u+gE56mOU5P+75zH1wpr+m61Fj5O5z8J/GyIu5GiMbIxsjn9gYuUY38lp4OnRyFAqbhPbk4JUntvf/TGQyDAHIleAOLxLPKUE7rSUql9mDyGVWvwTIAs8UvOJtrbri7akvgsAY4zyRKnkcHR3VrVu3aoz8ALASC+1F3dPWnkhdp9zrzotW9Vd7ysvjnJd+4GBEXryKd3a2/2OinOfs7GyvmidguX379jmoaG87E5F44K/Yc3uD+5vkSD7FxRR/j4eypQWVEgH7aV7akjpWP+ro+Pj4kh9xESc5Z77ni0DO43HoFWrqiX1myY26cpCmvjgm53agTvP7eB7HKYkn2/I8fwzbc4ODofPtfi4drwENSTKmvOaLvKa7o8bIi2O+MGqMbIy8W4yc+VRjZGPk4wEj12j1gmuM8V5V9RPLsrxypc05My78GuOzY+qrfdFy2LWrZSnZr/ipTCYJ7klOTulgwCtaVshYVVwzmualo3kfjkOj+u90uVOLF972r9oHaNdTAhIBh954pAqeJ6xZgmbSoK38dbdq72DtAOI8Mhl7BUoy6e1B/qN3rgMmXc2vh5PFr4P00dFRPfzww+eVu6Ojo/PfZLl9+3YdHBycV/b0RifazG9zkyduTXD/YEzQfhyXlVX3Ez8mku8sy7byuSzL+XYQB1m29yQoXdFPGYe++GMbLhrpT+TBY5Tj+ALKaZYcuRhhbkn5Rd9nwEke3Z5J7rTYSzJS3zNZCZSzNi6b+vEh85RbZ3ytAdKDSI2Rd4eRazpqjGyMTHaexUtjZGPk4xEj12j1gmtZlu9f7b0jMUMF0+FpaCpAe5bpiDIwf98gCaLxfU7+d2VX1d7rQWcO6AoUqKXbtt6Xi2Y6lR5MFl8cx52WDs+ExwRAHegckwDB1PXmoKGq3dHR0flnJlWN7WDgAMA5uBhIAVxVe9sW6Cd87epMF+JVPxwoPep4Cg7p11/ryt8xUaApQXuSlJ6Oj4/roYceqtu3b59XOh955JF65JFHzh8eVsXLFzySgdUafZccCRh4zv0nLbroU/Jj91F/hoN605uq0j5/T06kxDNtz6TrCVj8JXlTPHsCdbBJAOz+yrji4sVlZdwm8PcFDnnhomwGjL4wJb8OIjN/cPD2XEh/I9+zfOhjXQUoDxo1Rt49RpIaIxsj7wYjz+dZzuqwMfKSvI2Rb3qMXKMb++Hjqsu3zRlEYsaVTedPSdXHqNq/omdgkLwPxyGfLoPGpOPqnCpkybGTk3HOGS8pEdMpCWLUrUDA37KiihITORO/Erg+37p16/zhXyVi2nJZLvZ+a34fi8S91EpunkTUh1VCje8VOR+T/Tk3/cirQjxH4KD/8bv+BIriRwlb9tEvzrOa9/DDD9ft27frkUceqaOjo7p9+3ZM8s6/xw31yURP/0g28TmS32kBJ3n1fIAWINxawfFlUx3ntg0mPn33yhHtQR24nWhr+n2KJ1aHya9/Tj7iAEZ9UY4EIp7X5LvelvZMIKK48wWV95WMXnVz4ONihbaj3Ti2g1xa9FDnHu9N16PGyP2FFL83RjZG3g1GVlVtxsXxxsjGSPV5PGDkGt3Y73ClqhMdjOAi8qvGteTP8xyX39XPiRVBGoPt6cgECgdHtZkZ3oGRnx0UWMWhjC57AmYPau7/5X8lPbXdbDZ1dHR0XqHig7+pGqexmCBSkDuvVbUHDPxOoFR/tlUy97YelFUXFWDy5fMxcaTFjSdlBxbaXa/Ale7u3LlzDsq3bt06r+YdHBzUnTt3arPZnD9I7P4kHjUu/ZWval4DCZeHccjEqTl8EabqJ/3KK+JcQBHQnBe3qdqTZpUjfSeIub3Yh7InXTBG3Z+XZYmvwr5URQ38p/aUg3MxFlJ1VMQ7FSk/uLxa5Ig/+gmfsVDflKdSnlxbiKS81nR9aozcybe5vBgj342RjZGrGCm+dhdBjZGNka7TNzVGrtGNvhbegUPH6IA8l6qlrFypHStUDATe5lRQerCqn4/rRvUgpCPzSjY5gwNTAkCeT9UOtvGKhsZxp3Kd67scTbwdHR3t/R4Iv+tP4EI+uH1A46nSl3RctV/hVGVIx6Vjr76pWqRz1A8TonjyiuUM3NbaMIn7cQcTykpdMAlwO4y2T9y+ffv8oeE3vOEN5/vXZ8k38UcdkOSTs/6zxYeqkQSYk5OTeuihh84BzytbXslzfiS7J2ePlSSH80iZjo+Pzytb3lbEWGPi5HyJlzWATrym5Evy6qwDvM9N+6QEz8q8t2MuoA8LSNynU/U3gTXBibxzYXodUGma04OOkfrF2sbIxshHg5FVFz7VGNkY+XjEyDW6sQsuVo+YgB04eNXvFT+1kyJ521bCiWZJ2s/LCJ7Ak3N45SDNS4cgzy6by6RjdC6vzqSg80qiA4eIlSrKKyfQw726rS89M1hZlaKuND4dlYlWFS+CiD77W400/hijjo+P99qxr+SlvTxpaE7qkHK7PxAgdU7VPx+PoOg2ZsJ76KGHzqt0m83mfDxtk1B1tGqbdLWHnbZye1Pv9KHj4+NzO3qCdD/hednVH2Kl3wgQmDCoR68AM1a4ePHtC5zbY3a2WJSNHnrooagLxonHgx8b42JbB23PZO3ySt+uR+c9ATnn5VjelvPRdxmLnE+8c3GlMUS+3WWWwyivFuIum8d/071TY2TVyWTB1hjZGFl1DYzcudTZ6Wn50rYxsvb6kr/GyDrX45sSI2/0Dhev9KSYFOBilMeobDku99CSZoDgVYbkhHIiT/A678b2cdxZSUm25Oy80p857MyBPXEwOKouAmaMcV6l0z5wJXI5D986RBm575i8+FueqFPaW39eKeJt+ao6f5Uu5fIqkNtEMtJmrDZ4UiKf8k+2l1w8Rh7cPu4r6s8Hk/mKYL6NSW9qOjg4qEceeWQv4LUAYwWKsgiYKIt0y+qMJx2NJbBQbKm9/JFxRz1zXPmF70tPfFB/lEmfU6VNiZ8AR/9kfHtMOfnikbwy1zgfIr/w9UULF8z6rHYEST8mkjx+XHok4ElO9uX81D1Bm7mY9nAgdF+ZLbpnum66Pj3wGDn+f/beZDe2JDvXXNv7nnTytKHMrERONRI0FG4N70PcR9RYo5oUUC9QgxIgCYKABCLjxInTkfS+9ztwfsv/vWjuQUZQlcxMM4Ag6b63Nav5f9vLltkubBeuyxyZOfKxHKlF284cmTnypXDkufJsD1wqUO28KjsCROp7VX7qtCQVRjQSrkkRhi4RRiI5JTytm+sjsem4tA+qmJSx6nc4sxpfKtqp39GmRk2JFOGQROkgEH50nFEWyIPrtG8aGVQZcW0kJa1fjVn1SHvYjPZLo4ARQKPMdUwAJ86qpBXb0bZP6ZoIp9avhAVZs7xvdojqNRoNm8/nfqQwkb5qtWrNZtMWi4XN5/PSOFSGUScRrKNNRPBSoNe/q9VqKV+esSmpKhFotDT6Svxf+xonTtF+ol8qOFPq9XoJBxQ4qZO2NPrK2CL26D1m5f0PmgoT/ZiidgC5673aprYbcYu2te/avxTAn8KeOMGN96jtqG7pj8pI9aj7Ps71IZfHlcyRx6L1ZI7MHPlYjiQlNTVBzhyZOfIlcOS58mwPXDGCEqP1OkjtuN6jqRHUqXUo8SAkBTnth7YTDUoBJNW3c+CvbWvfo7GqkgE3rRfHicuiKeLV9jX3VMFRDQHwqlSOm3+pW4kqRUz0n35zr0aOdNyAGICq7UQi4HsATcet+kjJV3WppKXRIO5vNBo+HtULfaReBTy1Cz0SF5lrBAkCgVT2+2N6xW63K719vtPp2Hq99rYZ22q18nvjQ5fqX+0X0FIbYlwKZOpbmt7C+KLN7XaHTcCc3sXnqUif/n8uwqXXqHyRJ/0+ZfOq31hnJBX+VhlEf65UKqU9BJGg4+RN+6B9i2SnEWv1e9Wl1hH1G7FC66UP6DxGZjW6egqz4gQsYqjKQvFQ/TXV71yeXjJH7m1fPLw+c2TmyKdwpI4z/p05MnPkn5sjz5VnPaXQ7OESrF6jhsO1p0hdcy3VwPV3dDr6oSDG5zHXPQo2ZTjxu+igKYNUw4jRiugQfKZOl1JsarwKKgAVoAawaWSi0WjYdrtNOjJ9jX3RdAe9VpdplRyQA1GgSChaT8qgta3UZyl7isQYo096bZR/JKH9fl+K9ClRUFQvMZoFqPEOklQf+F7rgoAUOKPtRZ9SO4nEo31X2fF/zBVPTSiiz50C/EhgKluNrMUSsUDbSvU/2kv0+5RvxXIKuLkvtRdG+6HjVr9XXOB/1WVsS20hhX2xr+p3KVlHTI26ibLVjey0Fck5+locay5PK5kjHx6aYfYwop85MnPkOY70sZzQfebIzJF/bo48V571PVwU7ZgOKC7ZUVInvKgwIyhHgcZlypQgYhupfkfF6jWqJL0Pp1GAjH085ZD6fVwu1Xqiw6phE0niZKRms1kCSdom8qNP/Pt9OS0igkqUgxKCghT90QheiiSoX+uO4z43CdHvmGRobngKRHRMKaJSIo591nFwTeyP6pwIEk6/2WxcL0ocjBMCJgqoESicHsKKwI2P6TX0R3WvAKR16ffoN0bcIZ5zUXhkGJfd6Q916D2pSNKpCZy2GScKSlanCELtP/p3JProa3G8sY/6XZxgqg9riVFsla3io441BeSpvkR8iHJRXetESPufwrFT5JzL40vmyHtsCjLJHJk58ikc6bK+t+fMkceSObJcXiJHPuseLhWsDiY6pBpMFNQp0Oa7WKJhxihU6n69Th1Or1GH0s8AcByPa7ScM8j4fySc1FiJZOqpQLr8Xq0eT01S0IukEx1aZRVBCEMn2hSJg/8jyCixMA7dCKykoi9M1P5Eh+R/JR6z48bbVHRN5RoJKtapbUWd6pj4XEF5v98/2Liu0U++L4rCI6vImogdkVVOpFKdR19RIlE9MOZo84C5Tug0LcbsmG+vciyKokRySoIp0kiVVOQuToxSJQWAcXxqf6cmgOd8Su9JAXPsc8Sq1ATvMcAbI77IFb0ouZzDwlPEeOo6jSaaPTymOPabPilhP5ZUckmXzJFmFu7NHJk58ikcqSVzZObIvzSOfNZTCmOJYBNBWYuCjRohA4q5mNEgFZCiQ0WHiwpQZ9DrUtfqdTHFIfZFxxXv5bvdbuckoaQYDVRBng2+RXE8ZUmdK+ZCn3MSDFijeNTDZswI4tTHPdyn90a54kTURdtqE5HwtC6+5++45KvtxchUJBjkHm1Tx2H2MNqhdWvR66NuNYJHPZvN5oGe0KtOUuJkJzq06hyZ6NjVJ7RP5KHTlqbVoJN4DG3KZ1UX0TawvRQYxWvi+KJfqd5SoKnXpEBWVw317yjjVN1KanqP2q2Oh6Lkq3aktqa/qUsjoZEwU22fskWVZYzaUn9c9VCdqH89hkhy+WUlc2TmSErmyJ/nSH1xdiogkDkyc+RL5shn28NldhSggmnsrAoiEocWVRCDUtBTA9YJqtaf+u4UuUWDpO2UkUWDjP2PBnCu6Ds4ThmsEokeYwsYsQE2gkv8UbkrcCpRm5lvDo394xruVTJRIotFbYH/1dFKgBr6q9EOta0IdCqzVOQtBTZ6rxIP7bGn4RyBxML1Gp3DbvU9HvSt0Wj4tavVKum8OiFQANBImtpOKgKp+tUNy5Fw8DPqi9G6KAOdAKUmcyng5v+YDx79TttQ32eDtRKRykr1rd+p30bfT5HYOQBV/ErpiTpSxJCaaOpkIDWOFJ5pmynMUZLWPuq4Yv/0+tiH2OdcHl8yR2aOzBxpLrtfypGxr5TMkZkjXwpHnivPtsKljcUn7ygQiipcjTkSUswtTwG8GnaMpmgfzvX7VEkJW40iVYcaWXySV6VTXypiwbggDQikVqv5MawKdJoyQb/UyWmHeiN4aYQqphGorHFqlYuCpJbUpCKmUmhbSkoRUCMoRXCgDo34poAmRg21n5VKxaNrCqjx2tgX1Tn9gYyIyMQXSOrLEFN2FCNges2pyUzK3uP3tM+xt/pdBEedzGn9kZi1bv0/ji1OJvX+2HaMPKlPpPpyqigh6mQnhU/RLrTPqodURBW705LSTewb90bdnZoAUe85/cexRxKLvq0l6g/9R3/K5Wklc6SZWk/mSPO6tC/cT32ZI4UjrVwyR2aOfGkcGVNftTzbCpd2OEXKUUkKoPyvYEpJPa3GOtW5o4JPAdLP9U/Hpn2ITpAywFN9YIxxDCljUYAmHQLgrdfrpY2mGmEDxFS2+pt7TkXAFDy0Ph1H7KPWic5UVipfrUtJg/tjpEwBKjpJbC/WEZ07OqpGfvhcdR3vTfUx1VeVeayLFBRyxvVYYa7T8cRIXGqJG7nqeKIc1UZVLtFmK5XyBt4oH60zrsZF0FVb1wlAlKlOnlIgqu2nrtGiwJ4aQywRiE/JWH1Cxx/1HH+firZR4oRO/04RU6qeSK6pSeCpsaucU0TGddhXLr+sZI6871NxrCtzZOZI/fwxHIkB6VgyR2aO1L7+uTkyVQ/l2U4p1A6fazAaRzSi+F3KaFIGHgHc7KGD0U4qUnaOrFL9PKXUlMBjH2Pd3Kt1E7FjEyk/kAuRvFSUS3OgI2hoW/q3gkgKwKOjRCdSI9Y6Y11KArFPjFnbTEVqVN46CYkOn5pIRBnHd3CoTLR97UcE6qhnJTON2MX66TsTBI2oxehQBLdIzqnr6VMqFWS73Vqj0bDVavVAr/FI3ChL2o/AQ4QH//q5iSD9iKAYfS0VFTulb/0s2q/26RxGpewzjj3l03FifIpoUxMYtYeU3WqdqX6k+hiviZPilJ+oDHRskUhzeVrJHMl3pyPGmSMzR/4cR1YqDx9mMkdmjnxJHHmOJ5/1gasojnnjsTNcFyNfDAajiYM5N4BUpCcCTvxcS+relPPGPkQDSPVfx6v/p/qW+l2pHF/IqCkS+vd+v/fTe6Ihp0iPelM55GYPX3oYZaT1xLHRXurN2xFwkKemgaico7HznaZZRAdO9TE1huio1BFtLxJraozatvZN89q1LtVHHKeSqUbPYtsq+wMBVUrvMzlnkwoo9FP9NpXuEstj+nbK56MOUv6lk5JIwDHyd67O+HdsL/riuXFHHErpUeuNfT/Vlzg+lZf6SSqIo/iamkClxhX/TsnslEx+zi5yOV8yR97fmzkyc+Sv4Ejev7UL12nbKvvMkZkjXxJH/uoHrqggyqn0k1M5xbGeuNyZGiT3R5Ki6OlBkShSS+lxHCkBnhN0ivxiZDjKKVWnbvolatdsNq3RaJRIhbFpnnEcp7YJIGs0RyOcmt9OJCY1lpjjG/Wpf3OMqt4byRpgUyJVsABsU7KP9qPH68axpyYvSmQqG+0PfUnl7Mc2YhRQI3nadwUP7uNHZaa/U+RK9Etlrn1OpQ1w7Wq18rZiikSciKSIQ/UU24sAGOukrmhfqXZPRd2iDZ0rp/w29jE15lhPSqeRqM+RnU4gYqQz1Wez40rEKQLQz1L4Eu1WJxqpvp2S52NIJZdyyRx5/NtHXMhnmSMf3Js5Ms2RxX1KYe1et5kjM0dSXgpHniv/LYdm0DE1OgXXmK4QN/zqMni8J2WUpz5LkYaWc2kyKcDSKFkEUD6LjkY78YleFag52tVq1ZrNppNFo9GwdrttjUbDP+M6DIwf6qU9Tqqhz6loJ5+rzHTMOh4F2jiGmBKgslNHiPKOIJ+S7SkAoo+RaJABdqXyRVaAsbahJBl1C7mrvFSmOnZNk6AtHqQgMCYKTARiFDMVdYtAtF6vH7w/JF5/iszobySk1CptlG0KaDVKec5PddKn/p0iDW1P/TjlQ/q/YgV9i+Oi3tRENmWjKstYb5zARZlpH6NstU09vYsxqH70nlP6TMmdopPIVEn55ymyzeXpJXOk+aEHhT3EzMyRmSN/jiOj/WtfMkdmjtR7/lwcea786geuVOOnOsv1+j0bJFNPrwpAKUXp4BSwaQehpwglJWwFFdqJjq1GEiORWrdeF50AcMOZ+b9Wq1mlUilF6jRapySiBKEv4wPY9/t96d0WOp748j76gRypNzoE9akhx6ibjpn7dSleAZvx6jtStI7YfmxP9RMBVUGee1L9jtEmAD/acZxExB/VrwIM+o7EgS5rtZptNhvbbDYlnajelBC4TyO256J7fJ8CCrWZCBQq8xj1SYEt+oyTyBQRpPxDJyrRx/ENHUsEzWgT50okD5WB3puSm5JbSvexnIrK6SRSyVQjeupXkXCj3WtfTk0OI1bFicYpmeoYTq3K5HK6ZI5MR4QzR2aOfCpHuklmjswc+UI58txD17OtcKVAiA6dEnjsfFRmyjD1HhVy7AdCi0AfASr1WcphUvcpmGg/UkqM92iEEkDl75TcogERBVLHihMjBVvq1vY1ehIdMBpcHCsEoNEYro/57wqeAIPqJ+qCvmh6go5DSZ++60lGXBfTOmKkL+pQnZzIGMSdIjb9zRvnmbzEPG6iMNFe6Sd9YyxaN3XSN6I8yEEjp0o+cbKlhKSy10gjsqKgRV8AACAASURBVIqTqdSEiT4qEXN/qv14X7xOQVrTeJSko71pSYFhBM8UaareaeccuaTuPxURVnmkxh3/1nq1zRSGniL1FAaqjFJ/Rzmm2s7l15XMkTIpsXJ7mSMzRz6GIy2BcfyfOTJzJHXFa1L3xLZURqm/oxxTbf9cebYHLrOyYNWx4/f6P0as4KbXqtHH5T6U/XPRuZQjqLGo4UYl6v2pp+JT3ym4qqFE49b2FHD1M/2ce2IEjz6QIqCGgJOz5K9koiSkkREFWa1f/1f5RuKLUTb9n/7TpsovAh3tcZ86CqdM6Sbk2FftJ58R3VTAj7pRGWu6hwJftEn6wwsxVR7USRQvkjvfVatVW61WbtdKHhFQNKKjvqc2F2UY5axyiakTKRtWoDezpGx0khIBPeX/aov6Q6Ffqt9T9gfxxsmPth0nELQRv1ebSGGX6jhlP6dIQGWktnQK3NVHdCyPBXnqUD+KfhEx6hTJPqXNXB6Wv3WO3PGZZY7MHHm0s8dyZHGflLqXh6PMkZkj/1I48tkeuLRDugwYFa9OEU+yicCi9Z57io3RomjEKSXr/ZFIuE7rinXTB5wvOqk6j7YNQMR7kJmOkeV0BUpy1qkbAFOQTNWpOdH6HVEv+mV2jERxn5JufKmbyi2CnIIRMqbvyENBVevTz7REHQHcCti88FIjRuic/qve0BkPOmoz1KERR+0n8lG97vf70lG3OiFYLBYPAFPtv1qtev+V+HViEU/cUsdXkkiBJeOP9oluU8fdRr/U/SR8p31R/W+3W7e51AQt5StqL2pD8ZpzRKHXRp9X/NF+6HhTABvb0TGfwprUNSndY2MUHaeSt+pXfytRnCLdqCfVlX4ffS/aWy6/rGSOLNtu5sjMkU/lSOUz6s4cmTnyJXHkufJsD1zRKGKJSlOhRqEDBPqyQrOHIKV1RvBKOZf2NfYrBVzaTxWwtqvEeWp5UZ9+NYe3KAprNBqlJ2ttKzoQYLBer0ttkKu82Ww8v1plQRSKOgFg2or7AxhfjDCwwfiUw9IeY61UjrnnyEGNmH4hl5Qu1En4nzGpfhWsFIggXNrnvuhwRVE8yK/mfo086jWQiNYRfQDdoyP0EyNyGllEfpCSTjqUZKg35r5H3TJp0DFtt1snXfpFekjKJ1UmGi1EPimSJUrJ53GSF9tR248gGIFPxxgBOgXsGvlKjUv7ovdHAkiRSuxHSp8Rk6hDj1aOEzWK2nOK/FQmp6J7UW76ufqD6kKL9jOXX1YyRx4nnpkjM0f+Eo7UsWaOzBxJeUkcea486x6uKGhKBCX9PBqqgo1+p+kXUUgRzPRJN16nJWWgsZ8p4wEoqVOXrVPkxzXarkbTiJYBrgrC/B+fqCuVimwkPfYRUFHgpi42GGs/tM+kswFiERhTERTVYwRGPtN0FOQS5YuxpjYHR12qPBUkYxRQCU37rdHK3W5nm82mFMXTDcyqwxgZU1tIEQGkjU1qtCiVMkHbZuWN8lo/0dvJZOKkGlMOlFSVyBWUlDQiUUTg0Cikjgc70c3JEUAZN/ae0r3+jmSmkTRtI/pyjAzq30xOTxGG2lVsI+IV10XMUJuMRBzHGb9TsjqFO7HECV2sW3Vwrk+p76JcFet+jlByOV0yRxZWVMo+mjkycyR9fyxHUkfmyMyRsU9a/lwcea48ywMXRkCHYqejA2qnThmFXqeCjoNLKUD7pMarkSXtTzQsBYSYR61OFY0nFSlUQqhWq9Zut0vOZ2bWarUcSGMdSia0ud1uHUT1ZBoATyN15KTztzqE9g9QVb0QVdOIVVy2VTCM5GF2PH0JcNQ+Uq8up3M9Y1KC0OjnarUqAZQ6hAJmjAyqLehYFLg1BSLaa4z4KcnxvcpUr8OeqtXDscakJ8RDMrbbwxvuYzv1et3fC9JqtWy1WrlcVUYxSkW7KT/U3/pzarxqOzp5Ud85Ra4poIwTEb6HzDWyrORN0cg0E6tIAlpv/P8cePN9KkKv8vk5oopYkro22vop8I661b91YqETPo3oYes6aYikltJzxI1cnlYyR95zZPHQjjJHZo58LEdStruH+7AyR2aOpC7Kn4Mjz5VneeBSBUdDjAqIRLHb7dwhzI7Ly+RNU48KJ9YVC4pXJauSlGSiMUewiak0EbQUzGL/AGIAfbfb2Wq1cuKo1+t+nC338mb0VqtljUaj9D2gEwGC76JsaEMJCdlEQ1IA1EgR7VWrVW9Dr1eDIyKmEUXq1Hp1qV6X/jlSV0E6RQqRyBQoo45oL44RWWpEilSUmIpBPaQ6qEOrfJAXMqbf9D1eW6vVPFqKDoui8MmFRmDq9brt93v/zsxKaTMKJEqGcYzaP/VDzU9X0FZi17z01G8lTLUJlVskEp0QUBT81BbUv9QO9boIihEoGWe0k5i+orakaQwRc7TfjJ9rFNhpR+1WdRQJKMo3kojKREmLz2J/1J5V/9pOHJPiiUaEc3l6yRxZth/8NXNk5sincOT+/nRL3sOWOTJzpOpL7e3PxZGph0DKszxwqdNFw6CzKQWoc0dhKpFwrYKn1p0yYOrWiJEKMSUY/UyfdOMY+E77pk/KkAH93W63tlqtzOzhQxifmZk1Gg3rdDrWbDZLBsLSOtfh9Jp+xnjpU6PRSEZGVDaqC8al7Sow6Y/2WQ1XCQu5aN5zdHjVF/Vpe6moDEXTQaLs1Qb0PvQfiVivZ+w6GUHG6/W65JzUr+Adoznq+JvNprQhV/tIUfmxygWZcC+yiu0TyYo6UbslNSdFzKmJBO3pA6HqV+UXJxJKrlEPagtRzmZHouS7er1eWgnUvishRpmrPGIUDZnESWoE/Ig5yIo2wRjtk06C+Ju6I+FR9O9ToB3JKZboW1qv6lk/j3gdJ2d6rWJiLo8vmSPDClmlyBxpmSOfypEFr86+bzNzZObIWP7cHKl9jOVZHrjUsU6BU+ppUwWj1+iTdErZDAqgLoryE7YamRpyVN6pp3X9jChTVIwqSI2Yz5fLZemaZrPp4Iwj8+LGer1u7XbbP99ut9br9ew3v/mNfffdd9Zut+3HH3+0f//3f7fFYlEiOGSgYKZEBcCYPdxDEJ1MI02RgKhXgYrf0cCxA42UpCYCati0QxqE2ouCZGoioGPnOyVerUvlxn38DYDxv/Zd88nVbuJnKfvQCYGCkxIi7TSbTf+b/kPK1Ae5YD+LxaKkNyJ9OgZIvdlsPojCqt1qSo1OAoimqw4AeE6uon6NDqtNqP1EffC/7hWgAMpx8qb2HCdHsW2dZFB/TFPR9qgrVU+sP9pUnLRq27FEoNfrtE5KjLohd+2bErrKT3VHWzEyeQ7ncvnlJXPkPY7w2W6fOTJzZKlvj+FIVriq9ymD9D9zZOZIykvmyGd74FJnj8DCAKPDKVGogxFh0Dp1wDFCoYZFG5rnrManT+6RSCipHGk+p02tlxLBoiiKEnmwNByjGoy5Wq3ab3/7W/uHf/gH+8Mf/mD7/d5ub29ttVrZdDq1oigclGJ6idatBkP9qo/o0NE44+QAoEKH0eBjRNCsHGFSwjMzBzt17EhiKZJPRf60jagX7CmOHbBDvxGAAN5Yp/ZBZc7fCjjYIP+nUltixAjZIFNIkToqlYqt12uP5HU6He+LApXqSu09BcpaaFvHovfqJEGjfDH1Zb/f+8liGtlS3ahe9HsFT+Svk1RkoZOAKMMI7IwpgrRGXeMkKeocm9C24qRV+6WpMtQTT5SLslHdqM3q2PRv1YHaoH6+2WySJKbRR/4/RRwR43J5eskceV9fpRyhzhyZOfIpHMl4asI1mSMzR+rY9O8/B0eeK8/ywBUjC/rkbnZMdYgPJHFyyQBZJjZ7CNwp41MQVINKkZBZOZKk4BQdPxJJXP7EgbSvqrho1ACmRpr4+5/+6Z/s7//+763f79uPP/5o//Iv/2I//PCDLRYLjwSqvFSuRXGIBOLARGAiaGj6hjq3Gnw0WnQDweu9Wn+MBjEu+qqOTV9iVDE6VrxG/1Y5KjjpeE9NDk+NFdtdrVYlmcT+UreStYKjjol+Rp9AlpvNxnUMeTD2Tqdj0+nUOp2OdTodu7m5cVtUG2y326UJmOb/0xfVJaCmUb5o6wqk3KfXq5zVVhhzBGUliEhUUW9cx+lO0cdSE03NBVe/0/aV5Ima6+Z89XvaUhtQn009hChB8XnEGu1LnHxFIipNVAOWads6ZnAiNZmIPqj60PqQEW2p/vID1y8rmSPv+3p/aIYV6Y3omSMzR2p7kSN326OsMkdmjnyJHHmuPNux8FrikzIlGqFGycweOr9+HzfxaUkNWBWsf6sxRLA3e/geE75PRQKJuKWAiToU/NkAyj0AVK1Ws6urK/vXf/1X+7d/+ze7ubmxRqNh7XbbWq2WG308zY46iPAAsnqiURyjgp0akkYmNF2FehW4ozHzE50X/aXu0z7xvYJYrAuZK3BrH2KEIqVbnYzo/xR9maVGCrUuPo9AoxMLnbholFUjJKvVylarleuVSI7aq5n5BIHorRJ4tXpIR9ntdn6d6leBV8kiZZ+aFx7ll4qWoQddmtdIo0bzVA6pyZBGmKhXI1unwD1GxtS3VVdqc2bHHPhT+JMiv0i4gL/qIfbp1ORXr4v/077+rf1XncUJt7bD2JmYoyd8W+0jtqP/Rz/N5XnK3zpHmowzc2TmSLNHcuR9SmFFuCVzZOZI7f9L5shnSymMf2tH4/8KIAoC6rx6T8ogSuB9XzQ6pU/R2gfa0TZV+WqoKkSNDmrkTu/jJ0acUCKGx/Uodj6f2z//8z+bmdlgMLBut2tmx/dnEP3RcQPS2p7KVMeu7aosAKKoS34igSuwqmNHMj1lfEo8tKsRIZUJ12jEyaxMempH9EntQyNmGkVkIhDHmwIuBVO1S340OkPbeuIQ7en/6/Xao4TL5dL3HGgkstFo2Hg8tk6n4/cowDQajQd2eMrG6Dt9UJDX9mazmY9Dc8lVzwpkCqbUlYroQlYK9FqfpmJEH1LggzQiwAGUUWdxskI/sX1NL1Abir6iE5nYR5VRtBHGq2Cv/sTf6lcpbNLr9EfxRyN1vNxVJ+JFcXxvDf7AGDQNLEZd1W/i5CuXx5fMkff3hQlT5sjMkU/hSOqvVg4Hk2WOzBz50jjyXHn2Y+EpCmwqoNQ1Kkw+N7OS4er30WhP9SGShD4I6XfRcFLGFIEVwWt0hrxg7o1P6MhDjQtgI92hKArfYKnGFx2bCJ2mRmi/tZ0of61bgfLUpED1RElFy2K0BLmoU+jn6syqG0okFtWZAqL2/1zUmO91NUnJF4KJS9yxnthW3EAdc99Vz+v1uvSzXC5ts9n40cYAY61W8827rVbLms1m6QSoRqNhy+XS+6MTiGq1WkqXiPbPb6JOajtMUrDt1CRBbUHBDmLQFAedjEFumpag0djod4wzRq11MsJ9qh+1pdSEVidDii/UEfvAGGKELdrZOQKOQKyEqv/HvurkIO53SPmD6tDsGI2lPsbB5Ez9SmUY8VgntLk8vWSOvPdvmdCluCdzZPnzzJFljqxV7/e9VTJHUmfmyJfFkXEcWp4tpTAKBUXpdxEUtaMUBSQFCR2wGrTWo4bCdZE01Kli3059pkpSYFIgVIcByNV5tf9q/KokIi0K9lEmGBJH2uomTmRmdgQ4NYAUEOh4iuJ4ek/UoZ7AoyCl5ZSeVObxM3UElY/KPMow2oROXKJTxsiV6hiZY4Oa5qM6iEQWN5urLNAnsicHfb1ee3rEcrm05XLpefA8bJHqstls7O7uzt8Ps91urd1uO3nQtqbeqN1jI5pnT1ESV9mSz4z+GX8qhYVxa7tRtuq7Uc/6fpcUycVr9LhbbUNtRfUb7T5OWGO6TfR3jWzpxCGOOU4yo99GTFH/UyxRuwboqTtGraMuUhindqykrragMjlFehFT1fdzeXrJHFmxbagjc2TmyKdwJDLabraZIzNHvkiOPFd+9QNXBAqK5peq8+tA1CHjtQo4qchZfDJPCSQVDYh/IygFykhQ8bdZ+cVn9FPrAeB13GqgGkFBXkoa+ps+1et1j9ZplEUNWfuCU6kjaDRJ5brfH5bIiSAyPo1EIS9KypmiIyIjfqucot70vjgOrtX3XdCvFBjQZtS/gor+HfWhfaPvyBxA1igKK5u73c4jJsiYH81Fh2CKovDjbeNDupIChLNcLkuROewhRml0osP/Cra6qoaekS8vmtRTtJAlf2uqDbauNhhlHMen6SE6GYq+q6CpNl2pHN+5o/qLE5oIhhG4I8hGX4o2pEV9XIv6QapoX/lbZak2yXjjmLg2ksKpyaLKGP9Q4tK+Rb1FHeTytJI58shRu+CnmSMzRz6FI2mrWq2WXiOQOTJz5EvhyFNjMnuGB644kJQTpwbM51Ho55YkIxlEwDolZP0+FZ3BERDyKVJTBSsYRkNQQMLZtR7aTCkLEqY/Clw4kDqv9k3HkgJO2mIDJsSieiOKo23r/qJISgqAUX6qN9WXjl0ncCnyiBORaFPaf72P8Wr9sR4FW7WpqHeVMzJR8iQVQtMwNCK72Wxc5kTvIJhK5RiB5XqNqu12x82+/X7f/6d//M0m10ql4n3RsWu6q06ezMyazaan6GjRsauM1ZdUhnod7SmAIRtyp7H3qLNYl04+daKmNpJaeUEWOtml35EoIVKVW7S/OAFRMsGvIkbFFQz9W6PWSq6RUBX8T/lWJI5IcHElJI5P740yjP6Ry9NK5kjZ9yNtIYfMkZkjH8uR2JTqO3Nk5siXxJHnyrOkFKae2mOJAzAr582eW4JXx1KQiMKN9/IZQEe+ry69qgOlhE2b6lQAbbvdLgGJ9kfJJzpBJF6KRkS0H5GYuU+NXr/TfqqxEE0iz5loUlEUnv/MZtaoW50kYPjaLx17lOWpfuqPAmgkB+7Vcev7GtQZVC76uYKS2TE3Wx1eIxmAVZwM6LiIwEW9qby175VKxcmFPpEugQwrlYo1m01/UWO9Xrfr62u/ttls2ng8tv1+b61WyyqV43tHkCOnOwGQjAfQ4loAsNfrlZbmiSbSPkSousJeVe4awWu32zabzVwm+l4Z/KBaLb9PSP0ggrrqj8+Rr6aVaERO9Ug98Tt8WSc3qUml2rnWpRM/tVk9oUnbRx5qp/q3TlSUDCOgp/CUkpp8IVfuUXtGH0r42k+t8+cIJZd0yRy5cb/mPngxc2TmyMdyJOPlZMrMkZkj/5I48tkOzdCS6qwOUKMeMYoVAVYHBgCcAogYRcB4MeTtdmutVssWi0XJodVwzB5GosyOx9Xy2+yQH9xsNq1ardp8Pvf+0abeH0mK3wCYylCXrqNc6TPXKVGpLPSEIYx6u916jjSAAzgoEFBHyhmiY+vYoqPqZmUlUv0sEkkknpT9UH9Mo4myZuzaFg7D5ypPJQTNW1ebVSLBhvlc71Hn16hcvV63brfr6QoKtOgInS6XSz/6mE22amdqY6oztRuivfzoZIH+qF7RG/qFZPBVff8P7wAB9KhvOp36hmZsGZ+KLxnU/ujLKFVvatMKcGrzmrZSFIeoJvYRbVX9UnWfirSp3Wk0W9tCX9gx9aJLtTXGEP1MJ51q32o/MSKLf6rPxIkVnzMGfbkudcb74uT+VL25PL5kjjxgl070daWJzzJHZo48x5G1+kHfjUbDms1m5sjMkS+OI8/x5H/LoRmp/7VjCIBO8pCiS48ogbqiwCLInfoNcKgxc/QjCsQxiSaYWenzWIe+wwNAbjQa7oQ4cLvdtqIobD6fm9kxx1lloBE2BS6VHzJUsMLAopKjA6l8id5plEPbw0EYv5I+8tR+x/FEAuDFeZEYkQ+OHaNtWhd9U9lE8o3OH6MdEfBj1CkVeavVaqW9VXpiDVE7BQm1b50gqUzr9bo1Gg2bz+dWFIW9f//e5vO5LZdL63a71u/3bTQaWVEcoqn9ft9ubm5sv997zvhut7NOp+MrYEVxOLGr0Wi43jTSpnZbFEUpj51ooMpZN5KrjdXrdWu321atVv0lzZXK4WheZLZarazT6bj+Wq3WA9DCPiMxqP+rjejnKlPsU+1d/TliCSCqKRyqe/0sRtV0Ykb7ShqRcOLkWscV7UN9VydO1KM4xTVqayn7i31V+464eqoP/GgaRy6/rmSObNhGeCBzZOZI2n8sR/a6PTMz6/f7NhgOM0dmjvyL4shnOzSDQasjmz08UQml6/dxw5uCVSQR/czMStGFaHBmR6PUZV82vqaAC0Ur2AK+3FOpHHKK2aip5/mrEdO+Rv60vUi89E2NKipXDSxG0VROGIyCn+ZM01eiNSoD2k1FP6if/zVKSV28/0LzkBWgUuNGrkpY2lfapWgUAluJ+eEaRUNufBaji+ieyFmn07FKpWKr1crm87ktFgtbLBaehkDaQ7V62LyrY1U5QkwAGuMkurTf763X69nFxYW/5wNyGA6HHiFut9ulNEMdDxMhonlMXjRaqpFv5IOesFMIiT7GSDnAirz1VEVSPiCdeF8k3liXgjMkx7sy0BmTE+0juuVzvoMYqU/TQVRHXB9JAP1phFTHwHfq5xqFxG7pF3aj6aOMCTkr6VMUD3QDtE7mdEIaCZgftcdIhLEopqusNKqXy+NL5sgjR07v8aZer7svZI7MHPlYjqzV7lcy7m0tc2TmyJfGkazup8qzHJqB4PhfSyrywzXxiVZBSgVGG6oIfitZaf0YeCQhjJj7lMj0aX+/31uj0SjVYVY+jrXZbLpB6XIx0SIFa3LC43i5Vg0jZfRqyDgHAKERGl1aNTu+rZ17WN7WaEUcPwbJ3wAhzhBBkvuRB58rqNN//o4EriQGyGjkgWvVPojEofcYZYFI9TrAv9Vq+Xtd6DM/yGK73TqJTCYTu729fQCK3K+kqvLU1JrVauV28vbtW/vpp5/M7AAYi8XCRqORrVYr1+3333/vE5eiKKzb7Vqr1fL3kiBjxq4TJ9IrmMhwXbPZdDuh74xT7UllTTvL5dL1QmRTo0tc0+l0vD/IX+1GI+Cbzcb7p7aEPKPfRBJivLqZF79TEEVnqt9q9bDHZD6f+//YeQRcJacY4Vfw1zZpTyeiiovaN7V1xSydaCk2aR3q7+hH/4+TNO2zYptONGhLx6U4n8vjS+bII0duZRUGP80cmTlS9XCOI5fLw7Wbe1vJHJk58qVxZOoBjfJsh2YoWJiVBa1PlvoUqYDDwBRQoyCjkjQaQX0IQAdNX/R6M3vwslmMkX7wGYbAGKiTF87p+FNPxEQd1Cl0PEoecbWM/mjUAECGGFQPGqGbz+d+LZ+fijKpsdEOuiA6osCtBsiDppnZcrm08XjsdkDECdkALEqggB5kBOCblXNwFYxoW51b9agyYpz1et1arZa1223rdDqeBqoRLu4njx/bBHAqlYqNx2OrVqueDoCdYif0j5OWIBG1oY8fP5qZWbfbtd1u9yClRidReoQuwPTq1Stbr9c2nU6tWq3aaDSyzWbj0Ue1CQXBojik8WBz1WrVN4KbHUCJQzu4F3lCRApi6/XaSSmCtvqXRsV1fBph04leihywYY2SUacGOLA1bKQoCut0OqVTpiJ5qf6RGzhA5A9b4G/kpBO57XZry+XS2u22bTYbWywWJb/ChxTf8C0iumpP2LaSjxKuTvgi7ukeHCUtxRHVWZywakFHSjC5PL5kjmT8D9Mo6WvmyMyRP8eRu/099teP9pI5MnMkun8JHKkPl7E8ywNXHBidjWkM+p2SiD6Rch0ABhCqM5hZMhpmVn7zOu2jTAVxQIMnfzUG+qBP8hpVQ+gYDdEH2k+BvxoqfTazEpEocagyGYMSoT7B63f7/SGSqJEWlvb1iR1g0LaRK+0rKEXC0ze6s6SsZbFY2HQ6LYEwfaF92gV0IJV2u+2nW6FHxogesJ1UREHzx7GHdrvtm2vb7ba1Wq1SxJi6N5uN//D53d2db3KlrtVq5XnaEI+CPvbKd2p7vNBRD8tQoOVvUiQ2m41Hy5B7rVazq6sra7Va9v333z+wAQCKKC/+sVgsPC2CyBr6ZVKAjLlPT4hi2V8JQAENW0O//ADk9FPtR4FKgxb6vcqKvuuEBEJReeqkUXEHkIYkICAlL65XAEZui8WihBm0T906Fl6+ij2aHVMPsBUIRHP2sQFtXwk1Tm6VHNQGtXBNCoeVVGhfr9M6cnl6yRx54MjCyuk3jDVzZObIx3Ak46gUx5P+MkdmjnxJHHmuPMseLp6EGYQSggIonY3RHwVwMystreqTvAovLhtyvxKILhUTRcDRNLpDfjFghzPxgsNouMvl0iMeCsT8z1I19+oStBoHfVZjVrmhfHU2ZKDRO3VYfjM+ltbp3ynni/neyELlrASJE+IsGvlGdrVazS4vLz0SZHY8zlQfTHe7nYPrer22+XzukQzq5ndMPcF+AG8iZnxH/9nMSt45JAAgqI1CRvV63ZbLpZ96hHyIfh1SHJalSRHjjNE6CCraCTLGxtExUT/NY0fPjUbDbm9vbT6f2+9//3tbrVb26dMnlx92hmzpF5E/+o7tEfVjzwUnP2nUB/us1WqlcREtxMb5DFlyH0AY0yaQDTpQIsCHGo2G2xCfq8+rXSoBKfHirzohW6/X1ul0fO+KRuzQoYKyTgz5X6OWy+XSiS/6MBMz+hSxEf3rCgJjRX7IVNMZdDzUF+vWySxkrzgUZae4oxjLdbk8rWSOLKcfMabMkZkjn8qRKt/MkZkjXyJHnivPssKlkTM6hMPEa3SAKFENDEdigCpEBqrLpHxnVt7IpsvBMSqnyuHhqNvt+hIoQEvBsVxo93XzdA5IxOuVmKJh0jd96o4yik/iEZxwDo24cZyt9ol+ahQslWoRjVJlhU7ol/ZDJw/UoXn7/K0bhReLhdex3W5tNpvZcrl02S6XS5vNZh7dQH9m5vcqMSrQAhCQWqvVOpxw1Os5qWgaQnQYiABgxZ4qlYoNBgMbj8cOGrPZrGTrpDLs93tPYVitVrZYLNyRVQiCZAAAIABJREFUlWgWi4W12+2kbDWiggwA/KIobLFY2Pfff2/fffedrVYrm06nLgN0gV1Qd6fTcfkjI3SHTyrY6mSFOpmwkS6ifqd9jZMjdIu9xUkSn+skQ9Mt1DY0oogN6WRT29bC5BS/J7KqMsM38Vvu08mX2fGdNPo+F61DUz6YpMT7mfDSpral/+Pv+GkkInya8fIZeqe+uKKhZKSkyXc6GdFrc3layRy58fb5XtODMkdmjnwUR+6P8s0cmTnyL40jn+WBi8YZQFzCRql0CEXriS86IFUGAyLyENvVJ2jAixJJgT5G4+Y+XkSn4FQUhTtwpXLYUDmfz5109vu956Kqg2jb+kSu0QZkgzOoE0W5ae6zroQogUynU3c0Uhi4l6gEulKDpS410lOREU8NKQrr9Q5HtKrMAWiIlLoqlYqfJoTOkCd1LZdLq1YPuenT6dQuLy9Ly9n0XTe46phwTiVpxqfL2jg1QBLTGdA1aQ1qmzr+SqVil5eXNh6P/R0z2NLbt2/t06dPtt/v/QQniASARAbITNNxACLIiAgzIN1qtWw6ndpkMrGffvrJLi4ubDweu+6Rv0bVFDy73W4JkJgYNJtNnwjRvm6aXq1WLsf5fF5KY4i+hg4UlDUaHaP2EBUTK9WL1qHXcz/fk3qjEWBNHVFf6/V6btdE5Sn4MwSJ/4IFSvgxlQM/QacaZaMv2KuSBH6j0UHqpH3FJiVvJTMlZ3SlkUHFaXSSIhXVDXXqpDKXx5fMkQeOXIttZ47MHPlUjmQP194sc2TmyBfJkefK2Qeuoij+TzP7//b7/c3ZWmTg+nSJc9JBOqwTS1U8AKxgR12AAm2pIWF0GskhMqhAgkBRMv3FAIvicLIMb5eHvMiV3u/3TiLb7dajLoADCqcdBTOu0ydqM3NjikrlPmShMqY+TvXhKNZarWa9Xs+63W5JTjg1YEH0BkfHSPlbl++ZGJB/zTJ7q9Xygx50UgAIaBQDfWhdAKk6Y71et36/746hdtTpdNzWVquVR46InCA/jQqrnjXComQcSRQnwy6J3EEC+tlud0zzAPQhE8A2FX3GBgF+BX1kgT50AkFfIaB2u23L5dK+fv1qq9XKut1uyeGxKyUmBbft9rAJFX3xuUa00J/6FnrWKDZjIpceclL/UBDTyaX6HbJW7FD/Xi6Xrkd8n/6Ylfen6KQFuTJZISKmUTuO2dXIJzJRsEZnihXYB/iGvJjI0S/6gK1Wq+WTTcGi7XbrtsV48CO1UZ240mftu05S4wSffigu4QN6vf7WaGMumSOfypHaP3wtc2TmSOz/5ziSFa7CjoHJzJGZI18SR+pDeixnH7j2+/3/c+57igI/g4hP8xrBUlDEoPXpkM9TTq5P6ziuOgcvmQNcMRZygXGAoij8TeXcz5K22THlARBF6WaHl+5xLU6Fo3MtY9YX4Jkdl2EdQKx8hC9tx6iGgjNOCrDhjNVq1SNqOCv3M2YMEwDQo1HRERE1NTB9+leZahux39gGfSZaUq1Wrd/vm5m5M1arVX+fh26qZILHAy0n/rApk2jEdDp1OdGukhHyJv1BoyEadUN3erpRp9Oxu7s7j9wAbrVazZbLpednA5jb7da+fv1q7Xa79CCFDtVuzMyjhcib04Lm87nbMZFlbAn7I0K1Wq2s3+/7BEOjxfhSvV53G+A7xozM414C+oj8kUG1WvV3iUAW6/XaJpOJVatVt0etB5vTiSZYoSAOkQLGCoike0Ai9DVGvJlAMsnTVCGNxrNBmsnKbDYryQ0fpS4lSPX7aK9K2mZW2hegpI186/W6p87M5/PSJC+FBXwe/VsJX/0Q+cUVKpUf9cTVBcbI/7kcS+bIp3FkrXo8XZDJTubIzJGP5cj7M1es3WlbvV7PHJk58sVx5LnyLCmFNKq/1ZF5SuR7BX8GqEpqNBq+/KxLiSokvT8qXyM0mrNZrx/eYk6Elr6xkU8NAHCDRIricGwmQMB7FHT5EcLCQQEpfUrXvkfA16d0vVcn44Aiho08W62Wj5PoVrfbtclkYvv93oFN0xlUX/wdCUiJrFI55Gcvl0vPEdf8XRyg2Wy6/NR40Qmy4Rrkj4PSj1Tkq9FoOMkzeQB4eBu9bvDUaCLL4fSZSAl6Rs7b7dZPj2q3D8AOiNFH6uC9H3d3d3Z3d+fHDAM+9AVZMQZNaQHIdSM3/dOIEe9FAXQ0GoVNQFpqR5AofhJ9FN0gL/wVnbOZlYkRezI0aka92OF2e3wnED6jUapT4KTj1/0Mmkah4MYmaLPyBBBg170ATHohRbCFqDGTGWwB/CBqpmlSq9XKZrOZXVxc2HQ6dRCHCNvttmMSMqAPOtFWDNztdn7SFwXbwN6QAdFwMJPvlFiYKMZJa5yg0yeKkrSSiZJMLk8vmSP3ZoI3ZuW0qsyRmSN/jiOrlXus2x3xP3Nk5si/FI58tj1cNKYDwjARHoJAyBgGhqiRLb1PFcB96vwUBq4n9BABiCCp/cY5eOrmWpaLMWpyv7XP1LPZHE9aYqxEtzAOHD5GUABZlA0Amh0jo/pUbmZ+Og+O3u12PQeatonkQCSMVyM6kawxTI2C4VCdTsdlj0wrlUopWhSdlHErUBGhM7PSyTiVSsV6vZ4VReG6QJb0mZxxXeYH8HC4fr9vt7e3DkI6dvqq4ITTUcd8PrfZbOb6RKaqN8h8MBhYvV73lBqie+ie+9AhtqAkiB0hH9JJut2ujUYjvxcZagQH++VUMI2Kkk4BoGALtEU9Zlaya/TfbDZdJnGfmY4JO2GSpbJTm9VoluoFsgRUkUulUinZM3s00JvigmJIJBtsq1ar2Xw+9xOm8F2N0ilGIUf8RAv6pn0mqkQh8QFSlDgJDXIAx7TouGlTo/yKXcheJy460VYdxcm34gD1xiidtqnf64pMLo8vmSPvOVImReBK5sjMkY/lyL3tSzLJHJk58qVx5KkHZbNnOhZenUKNCwOL0QTu0WgcoImQNWKjUS4AiSVT+kCdCJon+c1mY9Pp1J0XJ9f+0B4gpW1rBATgISoAIO12u5LRYZxKnPrkHiN2CrY4Id/r30rKOAfLqwAqkR2NDDGBTxE+MqVPOIPmwQLA9LXb7XrELeZkq4HrGJEFUTUeUGmT+mezmfV6Pc8hRr+QENEhxgbx9Pt9q9frpXeXIE+NXqgetN9m5iQwmUy8Tsit0+nY5eWlffr0yW1LI8+z2cyX9pfLpZMe+tcoZ1EUvnyuG47VjvGboig8UkcEjwiPRkYHg4HbWa/XcxLCZpCLvrARnyMiRRQYe9SNxfid2TESpcSiAIis0S26pO4vX754FFZ9hz4Bumo72AY5+PQLEI82qylY2icmX/pga2ae9jIYDGy3O6aNaA4646FvOtnBJtAbtkDf6AM2rRNLrk/5uhKmjg+f0MkoOlAC0Uk10VMlmUi+SjA6aUYX+YHr6SVz5JEj1X70oTFzZObIR3Gk6CNzZObIl8iR58qvfuDSzsaIWuwQnymYIwgzezDR1KdbnEkFzI8OUu9TINE8bNrHUGgbgGY5kvxvCEiVQL1EQ8ih1qdvjQwoEWhKgBolT+CMV8eohESEjAgEoE6/FbD2+72Nx2OPZOG8tB+JjUgJUZTlcukArhttAVPq02gl7Uc7AVjQA/aCDjDs7XZrvV7PnYt0AH6QmYLB3d2d9Xo9u7i4sNFo5IRP1ET7RJvciwOuViu7ubmx/X5vr169chtqt9s2HA49d5l7ttutjwf7J51Bl6l1gzSkD7BqlAngBDQmk0np3RT0nTz/zWbjG7uxMQhR5Q+IIj/Vo5LEer32Sdd+fzxtq1arlTbLAi6QEIQ3Go2csBhzs9n0l2Ay3maz6cTIOObzeQm4iDqS0qLRvG636/6m9sVkRaN52Dc22uv1rNFolMg/RqyQna4u4G+kfXKvpg8jXyUyxQoFe8WwSCSMVwkEfMC/lDQiISgGM2GBzOLDPX/TXpxoqQ2pbHJ5fMkceeRItTnkkDnyqL/Mkec5snbfn1arZfV6PXNk5sgXx5HnyrOkFDIoLfyv32k0jKdnPsOoI4jyv9apT7AapUERGlUiwqUbePXpmCdu6udJG+PRyI9GKTW/WKNVOLTZMQ9aFafgrUTM//zodTgIy9eAFk5PvrPZMSoBMe52u9I7N8jBVblqX5ARfW+3256LD6DgMLqxEtLVSKDWjX4BUQCX6AvtEpXSiN9ut/P8aupE7gBVv9+32WzmUb3ZbOY6AHiwMyYO6A0i4USpy8tL34tgdgD6i4sLm0wmnpai8gP4yGdfr9cO8vSXomAAkCB3IsKAPcfSxojtcDi0z58/23q9LgE3Y0Q3RVH4ARwa0a7Vav4+EiYNyIRJG2TPJIl6K5WKR7oAoFar5VFFjfSgC8pyubROp+Mv1iSiCNFUKocNutgxEwLsic/UzxVvdBKl8laCIfrOKVFqpxqpJHKJPGhTU58UF9Q2FQ8iCNMOsiFNB9vFHrE7ZMnnTAaq1aqfhgaJKlGAj0o2fKd94fsYmWO88b6Y4pHL40rmyF3Jzor7PmWOzBxJfynnOJL26F/myMyRL40jI85rebY9XGbHU0oAEAAORWv0qt1u23g89vvMjptdNQ2BAanz6xMw96kxxeVEs2MESDfWxadpjSggaL2OsSjIxWVtjaRo9EAjSLQXyRMFasoE42s0Di8nJPLBEjQgjhzpJ+OChKjf7HjiD+PQKAP9Q2ZXV1ceGTQ7rsTgQGzIRLe0qRMC5ASZsNn24uLCZcMyNH1arValt7NjN4yt3+/bfD63yWRivV7PKpWKjUYjz8EmZUCjMxqZAGTMDidZcerN69evfYyME6K7vr629XptX758KUVSyE9fLBY2mUzs6urKJpPJA3tVAtHoFwTNBIUJhEattB8aScImlbCwRc1ZbrfbDvadTscnHESIOG2KfhKVxp65jjETAUWvRMO5logf9sbEw8x8vwRRJcAV3+MaiAs7n81m1mw2S+PmAVWJWH2Y/iiZbjYbrx870OjoKSBl8oJNKS5pdFgnh1yLjmKUDn1DnBGXdCKqEz5wSCOO2g7915SMFEZofdyrtkRdKodcnl4yR+5KE73CHr7HKnNk5sif40jX936XOTJz5F8cRz7bKYVaIuArCABaRBc0yqSRPhRNfXq/ArBGQxgw9/E50SbqIDJhdnQWnoABCRWwkobm8lI3ILnfH17gl8q3VSCLJAN5qvGrYbK83m63fSnd7JheooajIMgSMdEYokrIVI1D5Ug9w+HQWq2WLZdLu7i48AiDkjfOrUel6vdm5s6ufev1etZqtTzSCrjEqCh9Yol8sVh4xIqNv4PBwOVxcXHh+wyQK9+pnWy3W0+lmM1m1m637fr6upTfDumu12v76aeffJNtq9Uq2QppPJwmZWYO9owZ3TMpwLY0GsQkYbfbPZggQKx3d3d2cXHh0Uo2txJZhayI8lIPkTeIAhm1Wi3fSK7EzY9GDnXTMCCj0W/6ZGa+R4KNynzGBKRer/sLVFU/mkrBhmY2tqMvTgHTF2kS0VKZIldIgGN4OToY38MPdHKHjzHhAJ+Q2XK5LE2gIDXkUq1W/RrGxnHLGlWDbJSQwBT8MmIdOIdNENlW3GPs4JpiIzhDUXJRTIgklsvzlb9VjpzJODNHZo58Kkdik9VKtWT7mSMzR/4lcOSzP3ABAnRGDdLseMrIZDJxgOEJ3uyoUAUmQBPlqFBi1EJza6mfevlec0rNykKczWZWq9U8RcDM/Hp9+iaHmydws2OOOtEZlQPj5h6MhfrpB2M2O4Bwv9+3brfrjgsoABzITqOUlUrFwQwy3O127oCQi0a1aFfrhVT3+73nqJM2gTMAMtSpdUG6elqTygwA0EiaGjbXQTIXFxelDZw8oBDxxSYqlYpviG02mzafz22z2ZTyoYuicB1eXl7au3fv/DuipPv98QWH2B7L7bqcrtGU9Xpt4/HY3rx5Y9PptJSqAPk0Gg0HSnRmZp7moO+5wO41PWW73Tro4lPdbtftHVvQSCWEjA4AZeyn1+vZbDbzNCOO7o1AxkSQ8RRF4XJtt9s2mUysUqnYxcWFjcdj191+f3gxp9qjTqIYCz5yc3PjsqYPRHshGz26GpAnKsd3tFGv120wGNh0OvWJ7GKx8HeM6EoDY2R/A+1jE+iKI4BZtWCcyILJlxadLGMvRL91sso18T01TG4UN7ET7tPoP/0DX5XMUv3SiYROHHJ5vvK3zJEeJa8e32uk/cocmTnyHEdyLPxmu8kcmTnyL44jn+WUQjVeDE8jXxgdDyPkDJsdNj+aWWmpEOPHmHFoBoiBAfK0reQAkPGZ5k5rNBBBA1yVSsU3J3Y6HT/tRqM+ZubLuFzPeOiLRlwUFABRnIZ0AwwS0iHaMBgMShuXATwzK4E9Y9NxEoXY7/c2m81K6Skqa5XRdru1fr/vdXBE6GQysX6/7xuEcTIAltx5QIJoxG6383djYOQaVYJsIAROjELH3G9m9vr1a5vNZnZ7e+vkprbX6/U87QEgV9Lr9XpWrVbt69ev1mg07NWrV55bPRwO7e7uzuUCyRDd0+gypzKpreHsADG52OrYEAl2S0S1Uqn4yUJKohr1rVar/o4LooJv3761yWRS2gBMpFojNvRzuVxav9/3jb/D4dA+fPjgBMqLMrfbrf8NgWl0HGDFDprNpk9ems2mffv2zV+yOZ/Prd/ve+QO39PUHPYo0A8mPuv12gaDQemUsX6/7/pBn6RdqAywLU0NggCq1UNut75PhMicpkLoRBFf1Ykcfowv64QIUlKgR+eKTfzgL7pfQSPp8XrFgkgQMTKIDdEO5KrYQX1q0/qZRqBzeVrJHHnkSH/AupdN5sjMkU/hyHrjfh9eJXNk5siXyZHnyrOscCn4awSF7zBQiMDsmHNtZv7iQRU0BgG4IDAVqBonURSN1ul1umzJNTEygWDZUHp5eVk6AhRlzGYzByGWc5XUaI+IIXu7iDJAhIyXtAEAleiEAiH9g7Q6nY6P2eyYdhHBa7/fe9409at8KJBCu932aCHtt1ot+/r1q7cF2BPBATj7/b5Hb7gfvRExQZ8aLQGguU/lSZ9wHMa22Ww8YsWG50qlYpPJxKbTqTUaDV++3263NhgMbDgc+nJ5s9m0d+/eWVEUTjq8HwPnR3fkL/d6PZfzxcVFybZ3u50fNzwYDDytYbPZ2Js3b+zr16/+rgmigUQH9/u9XV5e2nw+9+gS6R462TAzB83FYmFXV1eenqAbs3VjNL7Efeiz2+1ap9PxyNt6vfbrdZJGdBZdQXLohd/tdtt++uknj/xyelSv13Nb0Qkk5KikSZR3Op16nWaHaF2327Xb21vbbDY2HA5LAQ6irdVq1cbjcSnSxf2dTsfTRDabjZ9Khkzpg5ImslJSUPzAFukLtgHJMQFWAgWD9JQz2tNoqZIKkz+iuIppyE1XSZRMlDRoB3tVvFS8AB90jJT8wPXLSubIVWnVClzJHJk58ikciT/U6rXMkZkj/Z6XxJHnyrM8cNFxBBGX2vRpUzva6XScSBgIub8aXQI49akSgeDQZlZSuEb5lJy4DqWqAjQysVgsHCRZKqe/RVFYt9u18XjsS9eqYIyEfumTL/1hPKQAEN1T2SnQYfCMGYdW0Fcjp47pdOr5uEQjNDLG9ZvNYUMu71igrzjlYrFwB1QSrdfr1uv1bDgcWqVyyMEdDAYeKcEZNUJK1EofLDabjUeMut2u6wonoqBH7OLz58++AXi1WtlwOLR+v2/b7dajnkRDO52O1Wo1u76+9n71ej2Psulxr6RiQPhMRsbjsU0mExsMBnZxceGn50ynU2s2m/ab3/zGBoOBvw+F9IbLy0ur1+s2Ho9LKwXdbtcjhESssE2imZ1O5xDduweGfr9vo9HI9Y98m82mE6xObiBKokf7/d6ur6/t7u7Obm9v7e3btw5U2JjmbhOZ4zNNN8CWibhtNptSHrq+jwa9MTaNSo9GIzM7HiFNwfY0lQAS0pQg9GVmpXQY6mw0Gjafz30fhUZ7F4uFdTodB3P8ER9iwgDh6qRwOp26jPEl7kf/ELLqHR8Ax/ABsAOSZz8JE0fFBp1gayROcVbHgDzBGPxJ69SoXcQi/s7l6SVzZPNBP8B+s8yRmSMfx5FgemHH/V2ZIzNH4gcvgSPPlWdLKeRHB6HRHR0IT42kTTBQM/MlSIyeKJ8CH1Esoiss6ZI/qtEi/iZKQMHgEJw+xRKtoK8YDsY2HA7t5ubGVquVDQYDB3Sz49vBFUC1HwBErVbzjbb6FM+mQUCcfmkOq/Zb+7xer221WnnkjxUTTY/A+HgwxACJAiC3SqXiGzGJXNVqNU8N0fSSXq/nS9p8plFJojXoQN/0XqlUnIxZymZCYWYuK0CNyM7l5aXbGMfVbrdbm8/n9t1337nsybmGGMzMXr165RG0i4sL2+12JdLWaKxGg/Q3gAi4ElECcH/729+6PCAQfc+HHqyCrWPPRHuZYEDYi8XCvn796lHO+Xxul5eXNplMfFyMWfcMcPISBD4ajazVatlkMrHZbGadTsd6vZ798MMP3oe7uzv3ZfUhfBDgZnP2aDTyfmnEfTab+SZtjUgr2QHQtNnv9223O+xhAVir1arXM5lM/H7y6km1MDtE05iAkqrDiVzL5dInELvdzq6vr0uTLXyPseMH+Jvmq6M77AFZ4cM6eQJvNMqmoK6TUY3w4Uv0h2v5XDEXXCXSrZFDMyutQihJMDbGpNgHxtOW4mQujyuZI4Uji/KBH5kjM0c+hSP3O3knV62WOTJz5IvjyHNByWd54FJh0KhG0TRKBkDx9M3SNJEJcm0VUKiP6L+ZuQJxSlUQm/N48tcnV340Egbwo2zAnad1TqsxOyzNslmRDZaMWxXI3xqtA2Ax8nq97qffkKJAfYAw40IOajBsgiR/WI8gnc1mdnNz4xGd8XjsgIA+er2ebbdbj04SOaFtQJyTnOjXdDq1f/zHf7TZbGbfvn1zkiDnGb2ytEs9lUrFN5qScsBkgo2P/X7fSQPCQ57IgHFDHKQMAAK8CwQAph5+SAd49epV6TvA3uy4hF6tVh3U9LhVolV6vZmVIn77/d4jvJBor9ezyWRi9frxBCI29wKKCmK8fHA6nXqUj3bMDkB8eXnpBEjEikgQk6Fq9ZCTfXt7a71ez/74xz/acDi09+/fO+BjN9vt1qOFTGLoE7ntKb2S30206vb21uvA7ni/TVwV5r0j+OtqtfL0lNlsZm/evPFJAeAK4bbbbfcxcuXREUTZ6/Xs9vbW5dDpdGw0GtnHjx/t6urK04UgTjNz/wfUmdSiE7CCCYYGXbBVfFdJo1qtOqZgP7rhnnaUfChglkbW8X10oRN57FDJTiN8ZuUNxJEsqFNxPpenlcyRx3GbcJlGhzNHZo58DEfW6vd2Wq04T2SOzBz5kjjyXHmWBy6cPTYMKEPS+qRLpARHwTCoB+fmAYbBAp71+vHt0gAMhowgWTqOGyt5qo5LgxiI5ssS4WKJnHs4hQglojja5n4X9L0c9IQb8liXy6XNZjN/O7r2hxPrAHd1bsagUZ7lcml/+tOf7Orqyi4vL83sGK2az+fuXMisUqnYly9fSiczERl6+/atk0y/33cHJ2//7u7ONwJjqM1ms/R+i1rtkKsMAP3d3/2dbbflDafYC/JCv4vFohRVYxJQqVQ8EnN1deV2hK6ZmJATjl2Ro4/M+BuZEpkholqpVOzq6sqjigAjzqv2SanX6x5FAoh2u0PuOkvr1WrVo160v91uXW5EEyGa1WplP/30kwMCJLfdbm00Gtnbt2/t06dP3k+NkCMz/t9ut/b69evS5KAoChuPx7ZcLu3y8tLu7u6s0Tic9sUYaZNIOZOvarXqef1s/oWo+RziJ/JLnri+BBLZ4Z/9ft9TUYhO6uQUnwLYsSXkwjtsAN3JZOIRRvwEQl8sFo4r6Lfb7brtICNkRl4+KRpgFXsbFNDNjies4SPYDBMAnXhyPTavugPz0KMSFbrU1QsmSnp9jNDRXw0Q6XWQPT6jJJTL40rmyCNHru4nu9vdNnNk5sgnc6Tdw89umzkyc+TL5Mhz5Vc/cNGwkoE+vWrHddkNRSkocswsxssRlxxN2W63/ekdwSlZYGAIgUgL/QI8zY4b3Yh4EBFaLpceSbu7u/OIG8YEIGu6BU6sEUQz8yhIp9Oxb9++ORmQx8sLADGg7XZbimjpEiUyJiLDJliictVq1b59+2aj0cgGg4EVRWFXV1dWrR5eRjgcDu3jx49u8Lrc/erVq2Nu9D3JFEXhubxspiQ9oNVq+eZMgLvf7/vSPJE3yPHy8rJE9Eri5N+yoZg3g+OoZubHw0K43NNoNGw8HnvUiWVxbIk9UURM1YnJ1765ubHdbufgtd/vnYRYtiedAYLmJCvNpa7Vap7njnOTqqKRJR6oIEP6ROS6UjmkjSh5spF4Mpk4UVcqFT9NCfC+vb21/X7vpKfL7SqL//qv//Kc7fF47CmMRPCI9jH5wSc1ckYaAhM69gcwydITuvBd7LharZYitkR0e72eH1G8Xq/d/29vb32vyGKxsHq97idt6d4VSP7bt2+2Wq18EmVmvn8AQgdgu92uRws1pYaJRoyAcS+f4ZeMjRQwAFi/o06ic9giaSUatdMoPaSC3OkDk1kz80kt/gXZg8vsJcA++YkYg1/GVQMmJvmB6+klc+SRI/dEie34wJk5MnPk4znyYK/z+dzMLHNk5sgXx5Hnyq9+4KJzGBrRGIRJB5UAACycDSES7UPoRLi4f7PZ+HsLADyeNlEcRszgUZoafrwXoNRIAoogeqb9jcAOILCRlQkzUT+AUZfxIaZv3755rjXLtLyLQSNcECfkx/hIZWg2m/bmzRu7urpyh8Thbm5ubDgc2u9+9zv7+vWrt8N9pG4QVWPTMwWQ10gsMkAObIBlP8Jut7PhcOjXsrTLuNGJkiuGj56U8JCamhEFAAAgAElEQVTHZDLxY1aLorDXr1/7/YDDx48fvS9sDuZdLbPZzCNKm83hPRuz2cwjkXp6DiDDQxAOySSl0WhYt9u1fr9v3759cxv74YcffJLDplciptgX+dH8j33zHe+6+e6772y5XNpoNCodoQx4Q1rdbtffl6H6YTm/2Wza9fW1ffz40Sdvm83GU0bu7u4cwPANIln0TyPv2Dy6Jjee+1arlW8EhjyYpEFImorEBAc7mM1mrkcmc5ycxf9MdCaTiddJ1Lzb7Xo/mcRUKhWXIfbFxFUjxOiaaDbvLdEIOTKlT9g4RKL4qJFSM3MZbDaHzdO3t7fuj2blY635nwk4EzgmpkTUIAfSL7AT6lW806gc1yI7tR3GoisyOrZcHlcyRx45kskQ32eOzBz5FI4s7mXbbrdt83mTOTJzpP//UjjyXPnVD1yankDnUDSD0ygeoM/GvuFw6IJRUmIJmSVFBIlBIAyif0SVdrudb8BsNBqe/81TPvWQt1sUhR8BulwufUmX5clut+skiQNhHCicXHvytAE+HIEUCZxtNBrZq1evrFI55HpzUhHRLJaL2QiJ0ROBqVQqnmML+DKWzeZwMtPV1VUpRQCS/e677+zu7s7TI1ju5ojSRuPwwsEffvjBx7VYLDyKQF5+JACMVSN9AFC323UZ9Ho9P/ocPeOcSk7YDWkatMfkoigOaQ2DwcCPuf348aNHbaifJfDb21uXGZE9dDccDm0+n9vnz59tuVzad999Z9Pp1KOkyIZ+XV5eOhgsl0uPqH39+tUjT4PBwNbrtX3+/Nkdlqji5eWl2yXgBChMp1OXcafTsbdv3/qGVU4LGgwG1ul0PMWBnPI3b97Yx48fHSixjVqt5vZweXlZmmQRLSUyzniwQ7MDULHpWiPzTKR2u50fdYzeP3365Kk0jUajlAZVq9VcTtgfUT6AiwkmZAMYMyHBR3kxJXjB5JCJBRMk3kuzWCxcr91u1/0PHeETRCyJ3seJqaZpgAWk9jBhVZ0uFgvP+dc9GpCfrnIS4dQInK5SKLYic3yQd7vQp1TKjJn5d0z48VWIn++RAeUxpJJLuWSOLHOk2XHl5r+XI5cviiPt13DkfQrhYzlyb39dHLlYLq1WrVqr2fJrM0dmjnyJHHmuPMsDF1EgGtbIneY/6rIjIN5oNHx5GOciFxsyUULSPHT9jFxa3q+AgNgEC9BPp1OPjkEYGkHUSKKZ2fX1tZmZzWYzX1r9/PmzO+DFxYV9+fLFzMzb0dzTWq1WymPudDoO0IyVJ+/FYmHNZrOUQoDRtFot30wKuOIUPGFryg/pDLQ5Ho/txx9/tDdv3vhyOhEJ0khwxu126ycK8XJJjJFxomPN0zU75uCaHaMA5HKzqZX6WOpFTvSd+pDZdDq10Wjk7z+BPEktgRzfvXtn4/HY88s5PQlb+Pjxo0dP6QORL7PDUvTFxYW9evXKRqORp56wL6HT6ZiZ2d3dnRMbJxkBQGxs3e12dnV15ekGrFrhE9gsusP+GXfclDocDu3i4sI+fPjg0VI2tQLy79+/t83meHwudQPe2+3hiN9Pnz7Zer226+trjwxtt1snBAURAJ3UESKf+KfZIbo6Go18gzwTlYuLC9d5v9/3d79AADc3N6XIL+k1vV7Pvv/+eyeV8XjsugHkNXrd7XZLRMdLWcEPNoWDV/RxNBo5+WDf+LBGWrFRjYQiF8gREhgMBjYajUpAT6oIbUOERDaZqOAzGk0jjYOJKxNG8JNJuEZUiZ7GfSIaLVZ/I1iitoiMkRsRx/zA9fSSOfLIkUywfvwf/8t+/B//6/9vVeTy31X+j0dc8/r5mqvf27JZ5sjMkS+LI8+VZ9nDpcCuT/Wa60mneWJl+VyfXBuNhk9q2XBaq9V8GVqXDTWvnDaJJpE/Tmk2m25A5G4TqcOwK5WKH9tqdkwLxGn2+70fD8ob12u1mudLIwcepNjwyfhZ/bq5ubHXr1+XJubIZzAYuEFwOhKOzRM/4yK3/f3797bf7+3Lly+eOsHyrhoibzLnwQpwwDh5QEE3RAKIvnG06MXFhRuxpoeQ40w/iXgQ8WI5n5xs+snDBZG/xWLhUR2Wzc0OD5+vXr0yM7PhcGjb7db+8z//0/7whz/4caY40NevX+39+/f25csXz42nDgDy9vbWLi8vrdfrudxIqSCXmZN5bm9vrdlslt43Q858vV63q6sru729dZl8+PDBut2uff361U9ZYowAO3bNwzUpqeyLINKMnfb7ffvy5YtHQoku9no9tzcil0Qq8RezY/48+dlm5hE6XqiJfRME0NRYdMy7ZczMX9aMzb97984+fPhg4/HYjzUGI8yOOdTz+dzli9wgncFgYJvNxv16t9t5O0T4mDhymhttKD7QL1KMmGgia1JuFouFjUYj6/f7nupRFIWnopBKRLSNPQikZvH+NCalBCD4gZQVjME4ItJ8hy+AI6TFaGqDEgB2bWY+WSfqZnZ80aemqDE28Ig6dB8FuKARe/BZ28zlcSVz5JEj59WqFdu1mfHgvrfCivuzEOL+wMNOr33p88LOPvIX5aPg/eoCXdz/WRybO9Qv9fJdqqF9uMZS1xUPqnh4W/wkXlnqve3v5RS7UiT+fnDNfRP7cOWD2AlfpcZo8lnieq/1VJ2pjp265LxIzPZmtUrhwezMkZkjXxJHnivP8uJjOocyeGpliZhB6ESaSTmgr4pCyQiMp1wiEpALRKQrKkVRuMEhEAyBt6qThoCgeQhgky1L+yxvsmkUo7q+vnanQGFm5u926Pf7vsRMagFRhvF4bJ8/f7br62tPEdCjPNnkyak1jJmVsS9fvvjqGSknTPp/+OEHu7q6souLCxuPx7bZbGw0GrlR6xJxt9v1ZWUz84ehzWbjOeS8iLBaPb7xnIesVqtVyhunz/yvhthoNDzlgNN0eNjiCGGW6DH29XrtUQ/ARVfgSGXhfRgXFxc2mUz8IWaxWNjbt2/9oQ3iZ2n86urK02qK4pBbzAZfQJQoGBMMHo6RJxOUP/3pT76hXDdzE2W5vLz0Fb7BYGCfP392uSIr7ifFUFNgsHd0w3WAT6vV8gdm3m1yd3fnttlsNv29OEzg0BMbnC8uLvxkJMaGf5H6o5t1V6uV73XYbrf2u9/9zk9hQreswpL6c3197bn2pK4MBoPSSUTk4ROR40H47u7OZrOZDQYDJy/SebA1bIRABxHQjx8/ur8D/Ho6E34MyFcqFV+R3G63boekfWCDugH98+fPnj6hq5qkUGi6GFFHTelBj6PRyBqNhkc7wQ4IlAd06oDY+F/THcyOpyzpJJ+/dZIPbiuhcK1Gc/MK1y8rmSMP9t/7f/8v+5//+n/bxcVFkiOr1cN7nfb7w4tnm82m3d7e+j4QDo7QqD2Y3ul0rNls2ocPH3yS+fr1a38PWa1Wsx9++MF6vZ5zJPu4wJKf48jRaGSbzcbbBouZENZqh6Pc2XulHEmmymaz8Yl8rVZzXez3e9efmfnq4H6/d45EZ3AT2ESmChkg8NTXr1/t97//vX38+NHTJ7GJ9+/fux3yYmLlyHa7XeJIAo1gxHq9do7/9u2btdtte/Pmja+WYFv8VnzkOHTmGhzIMZ1O7c2bN/b582fPIPn69avbHvZ3s7mx/f3qb+bIzJEviSPPlWd54KpWq346DhNmBWs6RTQMwEYo+oTJ8ikrK7r5U5dviRRSdDkSR8bAAAZyrwEDjIt6cR5ditXVCfo1n8/t1atX9uHDByuKwhVPFEXTMPhRg8CgWXHi4YnlTfLOSb9ATqvVyvOeuQaQIg+41+vZ169fPS/91atXNplMHCAwIsYMWQM6nNxD1IOoKECLQ7FpWCOxSvyQQKfTsfV67SROvnm/37fPnz/7EjiEjrzZmKsTE0Di8vLSIz3NZtMGg4GDFps5v337ZsPh0HOkzcydUceOjnnhMPnQmrMMwavcGDendpkdjhb++vWrR8Qmk4lH1zhRiNQe0hAhOCJT2DLph/QBgm21Wvb582fb7Q4nJiETcrF56aOmKmFf+M5ut/NN6fyPn7VaLX+I1QdmwJAIDuPDpyeTid3e3lqtVrPxeOx10Qc9zUtPDQJ4OSoX/8D2WWHsdDoeBAAgNWIFCBJlNCtveN/tdr6hH19CfnqyG3WhB/pAAWQhS3ACWXENxMbeAqJq+Ab6YfKsuMkPKWjUybX4CrhJUbyjfv7WfQWQoUZmlWwo9BufR8a5PL1kjswcmTkyc2TmyL9+jjwXlHy2B65KpeL55bpBjU4TBZhMJqW9SQDwaDRyIXJSUavVcqcCuBiw5pRj0GYHwOj3+/bjjz/6ZygNwfK0T3uAA3UTTeGpGME2m8f3Z7TbbRsOh54GBxHxRM7qDY6tGxaJKA0GAzMzfxM60QjeFM+SMH0gEsRKmJI0UZSbmxsrisJTRiAYyJuNqDp5Qkc4p9lxkzOkst/vPeJ2e3tr7XbbJpOJrddrfxEj4yLiQV2r1cojTqTTcQoToEakgqNzSV8BiLAxHB5512o1Gw6HNhqNXN/IQwGbCMx+v/fThjiRSeXa7XZ9GZmN4qyyYQfYC85MW6vVyn772996tHcwGPgpQrvdYb/bhw8fzMxsPB47SDPxgeh1/wQTHyJt+/3eBoOBywp9mpmNRiObTCald8MAnOzbIAWgUqnY+/fvPTrJkr4CECt4RPvQt9kxwmVm7ofVatUnC+Sh39zcuI2xKVYnmI1GwyORyIR8bWwI+7+6unI/NDumDKg96yoMGERbpMWiO+QOmdMvndRALGAboE4Uj1UGXUFA7kxUkJniDlE5CAPZQCpMzCDv7XZbSuFiIoCtQ2xcS791AsRYwQx8CdJAZugSTNS0NMaSy9NK5sjMkZkjM0dmjvzr58hz5fyh8Y8sAL+SCEACiCBMOsV1k8nEB6QpEESRUB7fozD+16f88Xjs93C06X6/9xf/cZ8aHsJmmZ80MAVbjB1DWSwWDhyqjFgfho3R8vJKooson2gVKQ0KIkVR2HA4tG63a41Gw6N8ACkRGTU46gRIOBVJIzHk1hKF0ONEOUEJQgTciTBxYhTRq9lsZjc3N7ZeH452ZaOqmTmwEAFjYyrpDwAYoAKJkGoRr9e9cWbHF/0RveI39kXKCOCkZK8v7sOOSI/BkbALdeBms+k6nEwm9uXLF7u7u7P/+I//KOU1s5RuZr7HjP0RkCipGaRlYhdEvswOm9jpz253yNe+uroq2Qt2O51OPfKGLJlMEPGt1Wr2+vVre/funb1+/dpJTE9LSoEmxElkVqNCl5eX9urVK+t2u94/NgiDBxz2wsQDPwF4sTF+aO/q6srevn3rQAvRsZeQqBg/SrCk/ZCORDoRkxaNGJKOontbNILOJA05Ine1Q1IQwA7aQKbgDvbJZIw9AuAM0TlkYWY+uaNNbBmfh1zBAn4gMXBYo7b0SVct1LfAPr0+l6eXzJGZIzNHZo7MHPnXz5G6AhbLsxyaQQ40HeDJEzBEMBgRIKeGxJIhudkYHifEoQAA1cz8M+rp9Xp+OAOn62Fol5eXVq0eXuKHQ6FglKgRGV1CBIhbrZZvdmRZXCNzOjYcgXEDPjghhMdeLXJ6WU7HkJTI+JvIHZtFAUYIB+NgCbzRaJSO2SR6imFhbGbmaQq8M0MjWkQlMEaW9TH229tb32CN/NEdkQecU52egy2ItkFgHB5xeXnpkR4di9khWkvUhzFARpBjo9HwqA0TgW63a+v18ZhlPgMQlKSZoBAl4nMAqNFo2O9+9zv78OGDR54B3+FwaEVR+EmIRIEqlUppzx9yIloNSNImEUKi4xH0Go2GpykQ4YPIdQkdO57NZvbHP/7Rc8DxK1KUsCPIgz4Q0ePloNglvjedTv09N+gPPySViDERhaMefZdHURS+Lw/CJ3o5n8/9OjCAqBapFsiIMSADJpfsJcR++v2+VSoV/xwf2mwOp2myURk8u7i48IgxusLeAHeNJuokWiNlTETBsKIoSgfOcB0Yo3gKUeDXkLwWcIeonGKz1gNO0kdwUUmePPpcnlYyR2aOzByZOTJz5N8GR55b5XqWlEI6qZsU6awuX+rAeVrUlAqz4xGf5LAjQOrRZV3uNzN3CI675IWC2+1ho+SrV69KT8YYm5l5vSwxs5zPNfSPtslz/vHHH+3du3fuaDxNkyfKtTg6S59slOT4XQiMI0eRJb9x7qI45MJ3Oh379OmT1ev10pvb37x548v/GiEh2qURN6KJy+XSc8QhQp7aAS5I9u3bt755ErkRrWKDcVEU9u3bN19Ox1DJeeZvctQhLzPzDaeAlpl5+0VR2Hg89nzu7Xbr0TGIisgGmz1JSen3+07kuryuQEAqB5uE6QP2xVI7h4YAeCx712o1f4Hk9fW1/fGPf/R6Npvj5lAicQos2LhGY8lBxg4gP2wTMNWJDxvKd7udR2wZJ3ZmdiB9yOzVq1cOjJy8ZGYexWMJnzQGnXgT0UEekMz19bXbOmPiPkCbCYeSbLV6PFaWiOZwOLR+v+8pKOqvTCABZQ5bMTtG04h46eQDLGJcyIxcdTbWYgO6oR9fI3KKfaMLM/MJh05+8CEiy9Vq1SdipA4xJrU7xsP3iqcUtWXGpVjKeBmDYjF9wYbAclYdsEuIGYLL5Wklc2TmyMyRmSMzR/71c+S5Fa5nSSlU5bCkpsuGOAwdQbFmViIKJRSigTi7CgCBQ1pmx6ghigbYtW8AO9EAje7oUy51Y6goG2AjqsgTOxETPY0QMuHJVzcED4dDj0gRTSLix0sAASKUTR4/wMiYcUo2umrKB44B0RJN4xrdwEiEgzYvLy89kvPp0ycf66dPn5yQkVe1WnUSBvh2u52NRiMriqK0KZYNvKQTMLFQECMCiH53u53d3Nz4Ua46Bk6NAuCwI/QKAOjGUJ0kcMQr0Sp0TYSUNA1stFKpWLfb9Q2sTIbu7u4cGNAL0Y7dbucnb5FCgI0hf7VxtWGIG1lRt066OLHo9evXflIkciDVgckLpDqfz63X65WiVrqsDhi3Wi0/IRMCRN/4V6/Xs7u7O7u5uXE5EGnklC9sGB83M58M0Q+IHxsnAsskqlKp+BjMrORfEDBACHYAonoPIE4UE1JS+eKDRNZ2u13JjnWTvaY/gCvUUa1WHQ/MjtFy9AYW8coC+kz71K+2oZMRcAk71Kickh8+gK/pyoqSExjLmBWnwbhcflnJHJk5MnNk5sjMkX/dHHnugetZVrjMzJ1BB61/6xMi1wNi8emap8xG4/ieIp5cNaoDKKAoAA2Hp/79/nCU5rt37zwKQu5mo9EovTsBobMUznI9dREhoH0iGnd3d94vjfIBMPX64ZhNPTIdGWmajkYjIT+uJ6JCtEojAkSSAFDahxy4j/Hr+xem06kNh0M/2pQlewCXjbE3Nzeed45xATQ6GatUKv6OCRwZ4ACgmHSQikF/2AjNCU8Q4+3trc3nc08LmU6ndnNzY1dXV1YUhX358sWGw6Ht93uP8kHWpAqQWz8cDj2dBJ3x/gkcjnQAoo30w8z8BC3SWiqVikc1GSfpB5A6gAdwmB33E2Dr2C37BZDrarXysbOUzpGxmkM/HA6d0LHXyWTixMFekHfv3tlkMvHIJ+9fw+bxOSYq2JimJpiZ7y9Al7Vaza6urmy5XNp0OrXLy8vS5ItoFptokXelckzBwSdIo0FvCnq6wRwb1f5xdPDHjx99MqqTXXTGZEiJGaCHmLBn/J0JGLbD5ADiVOxiIoC/QPDoSP/WqB7ta1/BUMVRSAL8AAcYh07O6SM2SNGJnP5PFI96ICadDOTytJI5MnNk5sjMkZkj/7o5Mq6uafnV7ImA9OmOJ0dvRJ6G9SlTB25m7nwsaTI4JSF+a1SQ+iECQIoX36HwzWZjvV7PnYe8Y5bKAQLGMp/PPbpAlA0hm5lHFiBG+o1xkHLAE7+Z+d+Ax3K59NOSttutDQYDX24lFYE66TObUpENxLTf7+39+/f+Fm+ioBAchILMAfJarebvNgAYiuJ4EgxLyj/99JODCdEpImY4Ju+ighiKovD3Rrx7985Go5HnzXN6FdcDBgA3dU+nU3/xHqkWRBcARMgIWZKWQB9JEYGEiWrtdjtPCYA89vu9HyOMfFQXkCPXb7dbPyUKWyWCQ/oJ7wFBVwAr48Zpa7WaAxnRzs3m8MJsPZkJm6DNm5sbq9frdn19bRcXF3Z3d+d2vFgs/D0n9BsQg1QBDPSoPsZRvUTI9F58Bt2hGyJcRXHMzVZwZIJCW+iR77EN7SOTRY2yKxgrqHc6HZcBmAE4YlcKxJoigX4pCsykURFBw59pB33wPfZB9BD5Uw8TK7ALf2bywSQbW+c76ke2+/2+dEQuY0a+TESRE31hfPymbl1pURLJD1xPL5kjM0dmjswcmTnyb4Mjzz1w/eqUQl2WY1B8ztKrCp8OoggGygDY7MkJNCgEgOEJVkEdZ+Rlb0QNOGWFVAEcuigKf98FS5n8EHnA8ZQkJ5OJRx4gCU4xioLWZU7eeaAbhVmSNjM/tYZlY31yJoJodozo0a/4RA4wcTINhomzcRoPBs2LBXmBHqc7EXWMusXgITImEcgDIICUcAYlMIipKAqPIk6nU79HAZgc61rtkPv95s0bJ9SLiwtr/m/23qVHjiy5/jweHu/3OzKZmSSrit0FqSFo0asRtNH30qIH0KeYb6PVbCRADQitRlU1ySIzM94e73eE+yxCP8sbVIn/ooqaRdEdKDSbzIxwv9fMjvm5x8wyGVunSqVie1Aul0377Dod0gjsinvkiJrAhWQCMJJknZxgcABFvt/t5sT9M+8D+YXrkNgGdQRcy+XS2j+7jDUMM98LA91sNi/kQfwuWv9arWascS6Xs8GmnuepXq+r3W5fdHtKJpOWQOVyORv6jAyD5AJ/df/zfV+j0UiSrCgYX8PnSeBYi81mo9VqZYmk53nWJelwOM+mcY/u2U8AhUQBAHH9jrbV7ukB8geXgef5iSP4Fc/IQFV8kT0FMNlv7AltOp2fWDc3ccD38VHsjfoHEjr3VMSNC+wDa+qCiasldyUk7ve6yTgggk3x3R/GWtdu4+vnXzFGxhgZY2SMkTFGfhkY6Z6G/Rcs+G//5Wde7uK6b62uBIKf4ecIQqfT6WIjOIJ29dM4HobD57AoOCIbgjESVK6urpROPw3R4/K8cwFlvV63N35+jqNqfs7zPBtYx/dhaLA6bIYbvIrF4kWAd1k/mCNaoPJ5HFmzee5xMGuGxIHZGLyRFwoFjUYjTadT+z2OPwGTVqtlIE+ggn1y5SUEkfV6rfl8riiKNJ1OdTqdFASB+v2+zZyIoshYRQIFQfrh4cGmuyOdoAsQjgBLBeBjxABZMplUp9NRIpGw1rzsF7+TTqdN8gCjBBBJT7MWPmSZYYNg/Ha7nSUagC2spPTE8hDosXH+LplMWlH6arWytZ9OpyZ/cFmdMAztWfAH7s9lU9lz1ot7wuaZVzOfzzWbzWzgZRiGarVaFrTwR1cWAfuGfwHEJFs8C4kTdRb8O+sYhqF9Fv4Jo03CgR0TnEkoCFhIIQA1hlmSFJHsuOwUsiXiiCu9QLvvMunED9htfJa4RDzBRmhxG0XRhf7f9S+3IJjPgDF1k0/3HlxGk9/F7tlzwBL/cGMnv4PvbTYbAwSS7w/Z/zAMLxIY1svV4WN/bvLjMtvx9WlXjJExRsYYGWNkjJFfBkZ+7PosbeGlpzkc7pv2hzfDW6LLTOGk7sPBlOFw7pu0718Wz7GxBIkPNaEMH9xsNrq/v1ej0ZD0NA0cfbD0xETCKqIBhkGgow3Huq7BuiCZzWbVaDTkeZ51ROKeMXwCCnMvZrOZMRtuMGUNYQ3ZaNaK+8Zpfd9Xt9uVJOvWBAjC0FFwCWAcj0f7WUAXJ16v16rValoulwqCQJ73NBfG8zx7NhyL+8AODoeDGo2GMbhMup9MJhfdkaIosrkZyElWq5WWy6UFEeyG4EJXo+12q3K5bGC+XC5VLpcVRdEFw0gg3Gw2tu9ukTB/T/thpBbT6dQcE6kN+wnrRHAAPBKJhNrttjk6n5tIJAxs2HtJNuHd930r8KZOIpFImK5eOjN8FLA+PDyYbyWTSU0mEy0WC6VS54L28XisWq12kZDAADOHh3vn3gAWN4jAygJk+/3+Yqij5507ZLH3FEDTsprPYbBrrVazmAEozmYzkznk83k1Gg1LNPD9w+GgarVqSRd25spTACliDLbEcwKyJBcwXMQZpA0uwwhAAZ7UgnBvDGKVZDUGsNbEB5JKPp97AARI8mDk3ToU/Nz1excgeG6SW/fkxJVTYZ8/ddIAuLksnctqkgzH16ddMUbGGBljZIyRMUbGGPlZuhR+CBIfsqEYMpvPTfPvrsNjeFEUmbGh/YUdco/x+H0+bzKZ6Hg82hu3JHuj5WgeJoAWnSwabFCxWNRqtTJWjyJUghGFf2EYWoEtgRDmyPefZmjApGEkBMvj8aj1eq3dbmeda9BuM5wwkUioXC6bRhhj4+ifgJdOpzUajZROp9VoNFQsFi/2BgbjdDpdHA/zjI1GwxhGZBmr1Uqed571EASBMTnZbFblclnPnj2ztrMEHOyAQNlut9VqtYwRxcBLpZIxpVdXV2YDLjvl6uOlc9CqVCpmE659kWTg3NgG7B7ORuBjP5LJ8/BIgjd2mEwm9fj4aEGR4OoWomI3/B6SBhgeEoQoigwg5vO5BVWO1nkOipDT6acBlkhCCOYAXyKR0GKxMA04SQrdq+7v7y+OuFutlv0OmvtCoWCF7Mgr8BcGpAIgJFTu+khSs9k0trZSqVgwm8/nZm/YhZtwAX5IP/CHTCZj68Jk+Z9im9g7WDtiDiBK0jGZTNTtdjWbzYx9Ph6Pms1m9mckKOv12mIYfybYA6y0UfY8T9Vq1ZLM6+trNZtN1Wo1kySNx2NLNlzmFFum3sCNge6JB8/kBnr2ivbEblx1mUtiJXUx+Nj9/scAACAASURBVAmxz0362Qv3hQAf4vc/BKD4+rQrxsgYI2OMjDEyxsgvGyN/8QuX53n/RbfpHrURXHjzc49/MTYeCMPj2BRGA8Pg3/gcFoTP52f7/b6xgsyNyOfzNmeE4C7JJtTDlHEUXyqVTDM+mUwURZFtfjJ57kzT6XRUrVZtLWjLSVCUZEEc+QTfwRs1nVY42s9ms6rVahaQMDqKawFDPuN0OqlSqVhb2jAMTedOy16cC4dFD3w6naxlLJ2X+BnYMpKC2WymZrOpZDKpdrutZDKpZrOpr7/+2oAW9qNQKKjZbJochc8EYHe783yOcrms8Xis8XhsNuGyTLAb0+nU2BKCOkzb4XDQixcvTLsNC7Ldbi+O2kks3I5HrryDble5XM6AkI5OJC8UqLrH8slkUsVi0eyGgaTYEI7KsEDXQVer1UWHKDchw2ZYA7doGkZxvV4bsMAmY4d8TyKRsFk2o9HI9gGWCtnMdrs1sIZRJNEB2Pv9vhaLhbU93u/3Gg6HGgwGtjfYKcGRPYIdlJ6KVKMosmGQsHHMeNntdjbQkfWma9RqtTIbJ7mSnlhsWDISQ3yfBJKfo66DZJS6jXw+r0KhoHK5rEKhYHp5N5nyfd+Y7uPxqJubGxUKBV1fXysMw4uZLcQcN2YSL7E/6akdLnbMxc9++Lv4UbFYNCkOiS/Pst/vtVwujcX7ULaGbyB5cWUR7qkL/x9QjK+ff8UYGWNkjJExRsYY+WVg5Mdeuj4begIMrrGyQWwWLAz/jl4ZQ3b1r8woIPDzsCwmD4VT8O+pVMo69mBoMB7H49GYuQ9BhcBdrVatc1MulzMWz/M8jcdjc3IMlGN/JoYjqfjwaBag4PjbZRtcJtNla4rFohkNGw8bQIEnAZx7Oh6P1r4XJgdD47gZFkuSFUDDRqbTaXU6HXMqWtx2Oh3VajX96U9/UrVaVb1e1+FwMJbpdDpZQC0WizoejzbwEm02Gtp8Pm9OfX19rVwup8lkYgwaz8nRPS1Ms9msVquVKpWKAYXneWZjbqJxOBysPgBAkZ66J7ka+XK5bAEpl8tZUS17CZMDW0NNBTbFs7gyFkCVBIBhh2iMsV0SEDcoEkhoq+oWlbZaLc1mM9Nx83zoo/G/fD6vyWRiazAej61QfLPZ2IwcmPHj8aharWYyCGocoihSqVRSNpu1ZAoZy83NjQV86Zy4tVot05673bhcfyDQucyQy0ifTieTl5DUZDIZ6xIGsMP6FQoFY3P5XPYI6Us+n9fhcLBif5fJhCVl7Uk2+E4YUt/3rbMbQTwMQ/X7fd3f31s9A/UB6/XaZBQusGA7PDuATfAvlUpWTI7/w+yTcH0oV8JWsQHWmJgp6aLVtHsvJAYAPn4EqLj7RCIQX59+xRgZY2SMkTFGxhj568bIj12fTVLofhEGzmLwM4ALG8MDe55ng9ZcVg5jOZ1OJmvgwWFF+Gw0qQRPGKper6disWiyApgVQIujRe41kXhqTyrJOudkMhm9evVKzWbTQGE0Gmk4HBobANux3W6N4ZNkwY6j1MFgYG/KnU7HnhtpAiwSIDwcDjUajczROXZPJpNm/G6AzefzqtVqJhMhyLlH+xheIpHQcDhUt9s1LX86nVa73dbd3Z05wt3dnTkl69Jqtez70JrTBYf9IVi6Rg6wclwLMwKbVS6X7ei+3W6r3W5bR55KpWIMp+d5arfb2u3Os1Nevnx5oSHGOWhnjE4Y3TBdnGCH+fxaraZWq6V2u22diLBPbA6QAVzW67V9RxiehzjC8gCE3Ec6nb6QAABqy+VSk8nEwIuhmJvNxpg79rPRaOj6+lqtVkudTkfPnj2zOgBYPDpbVatVtdttZTIZK4LFx9gb1oVORjBY2Ml6vbaAUyqVVKvVjJ0tlUr2PC6rRNImyRhCWDXpqSMZ9wAD2Gw2lUgk9P79e7tffIS1QcP+m9/8Ru122zT4+D2yJPYJnX673TYbRNJCQGePYVIBG5dl9zzPEp90+tyJqlqtKggCDQYDPTw8mKae/SKuMHuG2AJoEdfcPeGzqSkhRvFnpF3YB8kwjDLrj3QMG8cWsUtkLIARa0OSLsmSOWpd4uvTrxgjY4yMMTLGyBgjf/0Y+f/LCReGJj1NtHdZNZwuis4FgLB73CAbyGLwlg7TxFs5RsuiwzrxvRiuK+FIJBKaTqfWChVmDi0tx7B8BozUarXS1dXVRUcmAiYDDiuVir09f1jcTJCpVCoqlUrGOFBAO5vNDNhgzhKJhBqNhgaDgQEK6wA7uNvtDCAbjYYFGGQXmcx5fgcsJIGAe3MBJgxD1Wo1OwavVqtaLpeqVqtaLBZ2LJxIJEybS+CkMBM2ZbPZWDch2MbJZKLdbqdqtap0Om2aataKe8PRjsfzPA0X/On2czqdTDrRaDSsuDidTuvh4UH1el2TycT2qt/vGxiwljjo+/fvjS2EWUE6kkwm9fz5c02nU33//fcql8va7/cKgsBAG+aXTkXIIZgoT1vZKIpsoCGsG3/nsiJhGBrrRXCDbaI49nQ6mSxmv9/r5ubGfAzNNGsoyX4H20Hakc2e2xwTZIbDodkhQQTAhw2G7SSB4b77/b5evHihbDarXq9nzPtqtVK9Xre4wPORVFGkD+ATF9iHZrOp5XKp169fq9PpWACGjfQ8T61WS7vdeSglAE2iByPNsEvskoJu4goaeZi+0+lkMhVJViuCD0gy1ng8HlsyiD9LsmDNoFQCOc9AEGdvqJlAQkVnLGoE6vW6DalFJkQCAJMP+wdrCngT/9xEnXjrdrPCPpAQSbqQuODPMMrx9elXjJExRsYYGWNkjJG/bowkbvzU9Vm6FHLci+SEQM/CYywc1RGwT6eTDZwrlUqaz+cXx55uwavLrG42G9twGDsWiP/cVrK5XE7VatUWjInkSBYo4kSeQKec9+/f6+rqSl999ZXevHmjIAjUbDZN895ut7Xf7zWbzezYnKP3w+Fgx+6wVqlUSvf392YcyWTShhXS6YnOPovFQs1mU5JsrdDTUhQLEzCbzezoXZK1osXBMAZABJ0uwdBl547Hc5Hyn//8ZwNw1mk4HJrevFKpmJQE+Qe6aBIIZpc0Gg0DbphO2nhyv+l02owfDTtsE4AC+5NMJjUYDCw4sR4EPPbXZQpxTgLparXSbDa7CBywMAR0CsbpFkQCgjwG1hJ5Ap2pYLwIZKlUymwMh0QzzBG5q8tH7099ButULpct+Dw8POj9+/dqt9sKgsBmh0jS1dWV+d5isbgAZtYnkUjoN7/5jd69e2c2tlwujT0GpEgMs9msqtWqBZvZbGZ7NB6PdXt7q1arpYeHB0nngLnZbKxI+eHhwToNudIqbJ7PZu9Ho5Ha7bZGo5Gxru76NJtNpVIp/fM//7MymYwVPHO5TBh7t9vtLiQfhUJBi8Xion6BdUOi4fu+SRLYV6Qmy+VSvV5PnU7HfI06CWy2XC5bcoS23QUWEnA3nuHn1EwghdrtdsbmMe+GxJrYCkC6HadceQQMJOBNAuu2NAZ8XCCS4sYZ/9MrxsgYI2OMjDEyxsgvAyM/dv3iFy6O3j9803MfAjYBI8K4uUlYIjShfCaBUJKxW4ANhoVz8vMwWAR0Nr1UKhnQofUloMG0oJPmTbhQKKjX65n8IIoia386nU5tcB3Ho2w+b9+dTsdaX65WKysyRtf97NkzO/qEVSAowgIBLjgHmlPWhgCKQXIvsCxXV1dmfMfj0VgTCjc971y8/OzZM7VaLW23W3333XdmYGjOE4mEvvrqK2M3arWaVquVSWAAEI7WYWIAUZe5pR0tR8+TyUTVatXmh/C/OBT3xRrBVuF0ruaW/XULWYMgsAAA89Rut81+UqmUMXfusXIYhmq327Y/mUzGClavrq7MuagfABRw0lQqpcViYSBAsMAWuRcCBECH45JEwES5pwuwqGi2+/2+ttutKpWKyuWyttutgiBQIpHQ27dvTQYEqxaGoWnLff+paDwIAgvcyWTS9up0Ohee48vVatXsE105do0vBkFg4EXAjqJIo9HI2CvWYTgcGhDD1nqeZxKjyWRibaQ5CfjLX/5ihcF0a8Im7u7uNJlMLEa9ffvW9qRYLGo6nZpUJplMajgcmvwD6ZX778iEsCt80vfPgyw7nY7u7+9t//iMzWZjtSSn00mTycRYVp6fE4RKpWKyDBIUkhokVPv9/iI5LRaL1g5bktkRvgDYRFFkw0qTyXPBPEX+xEPiLvEWOyAhjl+2/mdXjJExRsYYGWNkjJExRn62OVw4JiwEF2+Q/CxBhCBEgSpBgEBOUSkBUtJFgR0LJJ0DqlsQx9E6hXuz2czYOe6NhWaGRaFQ0GQyMcOmAwsa0EqlYmxOqVTS7373OwsiFA7yhs5mo+WFDSG4+r6vyWSier1u9wtLFASBrq6uLtaTNeLo1tU7Y6AcdwNuGDrGjEQlnU7bYEeCGABIRxlYQHTGdMfhXpgLgm4fNuhwOLd3peUn2tjRaGTPin4cxjGKIlWrVdN2wya4Om+AHeMuFovqdrsX81M48m6321qv11osFuYgBFcCb6PR0HK5NHBqNBryfV9BECidTpv+mlqD8Xhsx9CpVMqK1avVqkajkWmUsTlJFuAARrpqdbtd0wHD4A6HQ51OJ6spgDnBJ7bbrVKplEajkTGksK6wStlsVv1+X/v93maMNBoNTSYTZbNZTSYTffPNN9rtdur3+7q9vbWWrDwTnXxcRhEbxDfdfY+iSA8PD1YYm0ql9Pz5c3W7XbPh0WhkDDvrkcvlzJ7DMDTWjqSUzmEUf1cqFWNaKfh/eHjQdrvVV199Ze2DXSkWbOrj46N1W5LOiSvxoFarmRykXq+bpIKgTIzhmel6hd9FUWSs+2QyMRkIyQSDV4kJ6XTa9hz/PJ1Oxsy57YeJXbBxSCeIOQA2Ugu+h3oQulMhJcHv3CJvt3BaktVOwC66pyPslctyxtfPu2KMjDEyxsgYI2OM/DIw8mMvXZ/lhAtw4MsIJBgfm8FxufvWigwCVg99KQ9P4R0XAER3G4wY7SaFa2jBWUyOoHkD5nPdIEtBL6zecrk07fl8Prc3dooSX716ZcEDTTjFo4fDQUEQKIoi0wlToEmA2e/Pg9tqtZodq1YqFY3HY2Ni3I5NrjSBYsXlcmlFgrVazQwAMBsOh9rtdjbRG+Cg1e9oNLJ9QDsPkwYo04HneDyaNAGgASBIxD5kxFKplMrlsjl3uVy2v8f4WdswDO1zWZtsNmsBm8TB8zzV63Vrvdput3V1daXRaKRyuaxarab7+3tjb0gkcFAYIoLPdDrVzc2N3UMul7NCUZcJ5nfpdBWGoUksYJWz2fNATzTfjUbjwjZwVKbcJxIJjcdjSTKpCAwbQQaJCqBCLUCpVFK9XjfZDfUOyCt4bhgyJDg//PCD3r17p9vbW5vzwu9UKhVNp1MLogRhujJtt1vTnddqNb19+1bj8Vi/+93vzEbcRADWCXafxAxma7FYqFAo6MWLF/J9X999950Fr/1+r3q9rul0aq2l379/r2q1Ks/z1Ol0LOjDcHHfnAzALO73eytQRvJAong8Hq199Ww2swSTAmnYceweW0deVa/XrXjb931LNjiJQIpCTOQ/AAC5ECBFwCYOJhIJS47q9box5STPxF0Ai+Gr+DXaekDWbbPs7gusJHEUm0Q6g+Qkvj7tijEyxsgYI2OMjDHyy8DI/9UXLhg0l8WzD//P4/PT6WRvhrA8aNMJ5kgeuGn0o0gsACCOGVkoPgd2xZVusFjpdPpisjpvpjAUfA9acxgT3nrdAASADYdDhWGo58+fq9FoWAHuYDDQ4XAwPTzadd/39fLlS1UqFWOemGDOd0qyIAw7gPQEYJ7P5/ZdvLW7xa6wYYAzb/E4F4boslqz2cyCPm/4gGSz2dTV1ZX6/b6xXBgXQYzPjqLoYmYLCQPOR3cmAKNcLtu8CPTKOASSj2QyaYkeTg2rAvjBsMGuUuyK3bXb7YvEYL/fazwemwPyrDAuMPmAN8kRPyPJkoZKpaKrqyv7fgIMwISMAMZmuVxqt9vp5cuXFqyZdj+bzUzLfzqdDNATiXNBu1uMy14izygUCmq32xoMBgZy6Me5Dwqg/+qv/krv3783BpL7jqJIi8VCq9XKillJDGDNKFButVoaDAZqNpvy/XPxK4XQbgKAL8IYufIWgCWfz1ub2FarZUGebmcklgAMLDJscTabNX8hcZTOkgUkFhQ1Y48ESeIUdoUUiWSDxCadThsjCZtNzUWxWNT9/b3VefR6PUuwSG7DMDSfdU80iGX4OMwojDUyJ+Qj2CS/j6wjkUhoMpkY6MBop1IpS6KXy6WtEbUCJGkkhvwMvibJkmdJ8QvX/+CKMTLGyBgjY4yMMfLLwEh8/aeuz9KlkJvihgg0vEUTRDAkHNQFHoIIoMIbuu/79sbMGygPD9vBw1Noiu6WIMUGNJtNe9N2Nc3cB8Gbe+M7OdZkMznS3Ww2Gg6HqlararVams/nxkZks1nN5/MLnTUOi4PDagIK6IYBZjqp8FYvnQNysVg0QEY6gUQD3XgQBJJkR+kAKGAIyAF0aHDZA8/zTJOM9rfRaGg8HpvjUnDr6v9hHTHaSqViQe94PA+L7Pf7KpVKKhQK6vf7ur6+Vq/XM+YRllGSOaW7F7Ty5D64UqnUBTCzVm6w4bh7NptZwMjn8xqPx8pkMup0OhawYT2Qu1BYClPk+74xZtQvrNdrCyLUS6zXa9Mow5oQfPCXWq1myYrLKmGXJA3ZbNY076fTSff397q6ujLggG08Ho8KgsCSjEQiYYlCuVw2XbJ0WU+C3SwWC7Mfjt2TyaSePXtmz+BKAYIgMFDGN/A7twaEzmEMWkWnHoZPLXGxO6QAsIzYNwkDNoAkCYBxWTQCJwBDQEf7T5wCJJBMIZ8iqMMSUqvAOqNNxwYoVMc/kZ5QDE68Yt8lmWQCMCGZZX3ZA+lJN46v8HPFYtGGpEqyWMg+RlGkSqViBeLYCsl2GIb2vPgdPst3foy5i6+PXzFGxhgZY2SMkTFGftkY+YvncLk3w02zUAQvl8EhyLjFahSeAhBsAkV1BBECGIwfgRL2jXtx742F4ZjeLXIkUAFg2WzWAjObipHDpvBG3Gg0VCicp38ztJCAy3PR6pTNWK1W6na71vml3W4rm82q3W5bIEZbjIOjNWdTXWaLQArjgAHQZhZHx5EJbHwef4/GHA03yYD0dFzKbA+CKCwowMn9HQ4HC8I4AAwILBTOw3E5AwQJDqwztrRYLGyWC2wu3WKQMpCweJ5n3ZU8z7O2uNghOt9isXhROE2Ng+s8x+NR1WrVEoIwDE36QDIgSUEQWMFqGIZWdI1spVQqKYoiC7bJZNJmYSwWC9tzik7ZYyQOaPm32611qapUKmo2m1bbgO6YPWSdJVmRMAAEUwurhu8QfA+Hgw3z5DMymYwxwqfTyZIR5rCgiX7//r0ymYyq1arpxumsxjoDHplM5mI/ma+CvUm6aCd8PB4tIePeACP8gRMYNPx8J/vA0ElmiiCbGA6HBkT4HFpySQbQSBZgFbEht/DZHUgL+4/kCC0/AOEy3QRvEleXlYcNZp/dhG6326nb7WqxWFzUhrj1AMReCorxe5dFxK/53dPpaT6KG2vi69OuGCNjjIwxMsbIGCO/DIz82EvXL37h4tiawMVC4LwEIx6et0AWTpIxb+5R5GazsaNrAityC4IdwILzcbTM58/nc1WrVVuk+Xyut2/fWptSjJH7gymDxWMBmWOwWCysGLZQKFjrTXTcu93ughEARBuNhqQzE/L4+GgMVTKZVKfTsSNntMU4GYENYwVkAMH9fm+AgKZ3uVzasTiFvRgDwUZ6YmwwaoL2w8ODMTXNZtOcnuJBAIP1cvW2BHvWlHVj33e7nbV6PR6P6vV6Wi6X6na71qaXe0skLgfaYV98FmvB3vF9ODrSGgp/YUWfPXumSqViUgySEkm29wT9drttQRibg92kNSusMKwONRXYF+BDXUKtVjNGk3XHjmkjC9C7PsHPsL//8R//oSiK1Ol0rJAXWZEkYxsrlYpNgc/n88ba4U/r9Vp3d3cWxMMwVKPRUL1eN7YNW0XDDsAej8eLuRrb7VatVsv23/fPxeWVSsV09e7zFgoFK3hNJBLWTclljrBf5Ee0lXalGKfTuSsY7WzdOMQ+07Y2iiJj/oIg0P39vfkts2tgWSmyJzngz4AMrapHo5ElLPgJEgueD8aQpIG9TaVSVjtB96XNZnPh7zwnbN9ms7kAl2KxaInI8Xi0xOpwOBjo8Xx8ryuXYJ2J59ITS/hhzMZX4uvnXzFGxhgZY2SMkTFGfhkY+bHrs5xw8abMMS9vlxzDS+e3X5cF4fd4Q2QhKECUnqa++75vAex4PF68zfIfAc0Neq4Olf8Ps8VxPd8NKwKD5wZfNp2AlEgkrA0pm87nS+d5DTg4a8IaNRoNjUYjeZ6n+XyuVqtlb+qlUslADKdgrWAA3E5TGI77777v2/BKmAsc0j12dcGdY3468sxmMx0OB9VqNdPH4rzL5dLYGBgA1oy9QAssnaU0zC8AmDKZc4eZyWRywUrB9rh1B7BNOE4YhhqPxwYYURTZ92YyGfsdpDKTycRmXXDEDTjDOLOG4/HYJAZIRGBYsIn5fG7FtxToZjKZiy5PrVZLz58/1+l0bldKkGi328rlcrZ2rBFH9K4Ofr/fG+iRaDBD5HQ6D7f87rvvTKPNZ5K4ADqAU7FY1MPDwwUjDnNKMgbzid6ZVsduUCZA4h8uk8exPb6USqXML5ETACz8HQkBvsj+E1CRQ/BdAAVxAlvBRtG7AzwE2mKxaN8lnesL0JtTgM36uj7C/ruf5xarU9NAHQhDRMMwtP/vJqmw2NgNySI+T20KtQAkUMQe9g9dPKcVyWRS1WpV+XzewIO9ItEiXpMsk1AANO5pCzER32K/4+vTrxgjY4yMMTLGyBgjY4z8qD7E87z/S9LrKIr6H/kZJZPnAX5hGGowGEiSsVeSzKDQihLEMAr3zRQHZBP4Dhgy3spdVo+Wq4DXer22Y0OmqvM7ODfHjCwugY/7ZQEJYhh+Op22t/1EImHHrhgIUg4MBN0pzsbzwJDV63W9fv3ajJPiV0CUYIDswtXGSmfWZLfbmRFLMkBwJSYUw/q+bxpV7pn/TwtZvov1D8PQGAm0wNK5WJh1QIpRq9XMuTFkHAKGCoaMQAE7Aagdj0crSOQImmN89vlwOBgLSwBAbsL6uA4Fszcej629Ky1tU6mUsbyTycTABXsjkBG4M5mMBT9AZjabKQzPtRiDwUD5fN702bRQBhTdWR10c4Ihmc1m1n6V55Zkg0lZS4LYfr83PTXrGkWRtdwNw1DNZvPClg+Hw8Vx/Y8//qh0Om1ghk+xf/gH95hIJKwwnSCIBAif5+/wMbTnJBbM3Iiis27a7ZAEg5dOpzWZTEwSgtwJZhP9NskB9+6eIBSLRS0WC5PflMtls21qCXK5nHWFItBKTww3Ng57xRoEQaDNZmP3jw+58geYND4TmyK+ELvQ8OMHh8PBmHISD2IYiSZAyt/B6uGz7BlSDpcd3+/3BmjsIX6Mj7kxlvt1k/34ijEyxsgYI2OMjDEyxsgnjPzYS9dHT7iiKPp/PwYk//kz9kWu8btyA3eB2DCMAAmEJCv0o5DN1WASPFkoWA1AiuCBERHkYWLYqDAM7YiXoAcjwO/DhhAQWVh+xj06RhcK2+JKFjA+9w2do/8wDI0Fevv2rcIwtKNV3s75Tnc9MRyOh1OplBXerlYrLZdLTSYTeybf900y4BYCEhiSyaR9jvTEsrryBOanhGFoQQCNfxAEpod293c6nV4wechMSCQ4buY4ndakMBqJxFMhLmuWTCY1nU6NLaIIF+0zTsAxvsuuTqdT03oj/yD4EJxxPJc15l6Q5wDMMDewky7oIxPBjiki7vV6BqA8K4mYu/4EWIYyep5nvsH9UhzstkmFQWY+TqvVsu968+aNdYlyJSLX19fGksH8kgRxj9JT0T/MHvuHL/j+uQtTvV5XsVg0JowABrC6ds56wQqHYWisaz6f1+FwMEkKiUaxWJQkY9SJLwTX4/Go6XSq/X5vcgeelc8nbiDHQgvPCQFJLp8HIJJAIKVgTUh88ZtkMml+AsPtAgP+zJ65iQX75SZ2FMi7+wYgAnLcu6s/Px6PF6cS2A73AAC64OYm0viC+z38bnydrxgjY4yMMTLGSCnGyBgjn144/7vrs0gKXY0zGw9zJD21Eua4j4chYHjeU8Eob5MsPAvIQ+NcsGQsIm/ZrmyBzk0uY8FbPw4jyY4LCSoYFs4FqHFUn8lkrAjyeDya/pf7x+lZA4pscZ5araZqtar9fq/vvvvODHi32xk7ASAwR4IZC6yNJDv6z+VyFtRhtWCgKNClaJV75PfDMLQAgHYdffbhcDCtNfvrTlknALiBDHnFer22uQrr9do6H3HPyCywGRy81WqpXq9bgGbdcAiGfTJdnQAFexeG4UVdw2Kx0Gg00mq10mKxUKVSsQGTJCTYgeed55VgtzBIgBkFvBwtY48f6vZLpZLJNPb78ywOV+LCsD8kJNhlOp22IOUybbBqgDU27fu+ptOphsOhJV/UPpxOJwVBYM/P9zAt3rUjNP3J5LlYHl/ATrAZQI3iZ/aNjkuu1pw9Zo1gjNCowzoS8EiQAElYZXwJv4chhfF3WVXiDvIdNxjTFWwwGBh7j+wDLT8nB3w/rK2bFBPkU6mUBX4K2CXZM6TTadP2k+y4NoesC2YzlUoZ+0diw3diy+6MD+yPBOdDoMCv3D+7AAYQs27EV5dl5PvdOM//j6+ff8UYGWNkjJExRsYY+WVg5MdIyc/ywsXbO2+OPBTH7uiKeZMEuBOJhLFcvLEjRXD11Ry1ujpO3ipxKv5XkjFybpDDIHmLZRYCn0sRLsbi3uNut7to4UlQlmQFljjFx8dUvAAAIABJREFUdDo15oOg6xZ5ugCAobE20+nUjAOG5urqyiaJc2xM8CDIJJNJ9ft9C9Bo3jEYjnCRPWCEH0pS3KCNlIACSJxfOks0xuOxrUE6nTYZRSJx1jWjpyYgEwz5OYqZYSpdFot9JjC5DkOBLQWphULB2Eja27qsMFKbSqVyAer7/d72n8CI8xMkAEeYQO4Fh9rtdsZOEmhyuZyKxaKGw6EB8ePjow0KPJ3OheouWMIKEcAJvPV63fyMWTD4Bs8GW0RgOBwOtu4APsWwsKWw2/w+98/zEawBZf4DDChCX61WlgTANiPxIQEheWPfeBZiBYDK2oXhuXOZy9Div6fTyb4bf4ZhA8yxd4p2AXFADZ8cjUbGRGKjJCSe59mMGvYEeVUul7MEk1krzOUANFgHYgdMIBf+zj25cgXAEPYdJs0FJ+ydhJvnBHQ+ZP5IhGBoWUvWAvaQxB6AYV0NLP4T2OLr064YI2OMjDEyxsgYI78MjOTl86euz9KlMIoiAwGAhcVhM3hDdI2V4zr3zdwFEh7GXVgCHkfxGC4bB5vEG6vLAMGscaQNewjrwPOwyRQuwiAAbI+PjxoMBnbftVrNvpuOQjB8HDXyth5FZ40s60AHJ2Qiy+VSrVbLDHu/f5pQD2PJ/cNKYITNZtMkGNVq1WQM6K/L5bIBZ61WuwiQMA6Hw8H+F+ccDoeaz+e21mjMcW7XgWBWAW1JJk9wAz5H2QR9tLYEOYIRbAYgwd8RxAEhZla4DDL3QoF2IpEwppWgh3PAVnieZ0fy+/1e3W5XURRZMTY1CJ7n2TNwzwRUkonBYGAsFADLvXEkjiQB8AfE3OQIppnvn06nF8+G3AT2BdYUMGKdASrqGZDYrNdrkxCx59g6QyiZ28OwR7djlptMYjswYewZdlar1Ywdc6VSuVxO7XbbBnJyr7BeyE9gx1yJCfGB9Sc+EEdYR/T4icS59TKsGaBG0gcbSxxIp9OqVqu27zCGo9HI4htgRqE4n0cdBhIeaqCIJwAAReVo5vnuDwEDMHGZPGIjzw6wuIweTCp7A/MJYLIPgDz/7mr++fz4+rQrxsgYI2OMjDEyxsgvAyP/V0+4uHgQtM68qROocAgCu/v2zbE9VyqVMkPlYd1iWumJOeHnMSAYExbydDpZEGYRYX5gcNLpp3ahfAYFunwHm+l55+nvMCIcl6IX73Q6ms1mms/nBnIchXOvFM22221zvpubGxUKBQVBYGzYer3W4+OjTVNnTRh4hyZ+uVxaNxm06gRL5iq02221Wi2752w2e3HMjJPCXmSzWdP4onvFyThednWsFOe6QZYAzt4UCgVjMAmMAAOGjMESTIrFommKcVxAEVDdbDbWxWgymRg7yf9K58LISqVi30V7ZECbn/E8zwpPSRYqlYoFIRwfxgMnxuETiYRarZaur6/NZr/66ivVajW1222TXPA7BDOCNnURODSs0vX1ta3tfr+3QYWuTINnoBMPjBk+VCgUTFOObVJc6zI52A9BmOeExSKRo+CXAAmT5vvneTk8PwMUSQBgZqUzY0QxMwkSa4OE5/Hx0favVqtZMbQbR9zkhgDO3rBnlUrFgjgMOqBJMshzk/whMTgej9bdaDQamSyIOhi+HxDie4lN2J0kk3W4JxDEK1ha4gb368ocSH5d38V3+Dy+g2dlv4mhgA6f6/7Hz7onD+yJaxPx9WlXjJExRsYYGWNkjJG/boz82PWLX7g4HseoMDpuGgBxWRVujsV23+L5PTbHLch1N8fVusImsLjuESBOwhEkjJj0FLzo+uP7vgHLbrczo+VzwzC0wMkmMB+Eo2lmFGBgvG1L59kMxWLRhhjye6vVyrTU6XTapBAMtHt8fJQkK/aNoqcJ9oAGYNZoNEyS8Pz5c2sXCqBxhM06rNdr9ft9k4BwH/v9XtPp1AbfEUDdt/r9/tyRqdPpGAMEi8vvuIMw2UekJ2joU6mUdRPCeVw5BYwW+0Y3Lvaz3+8b6wAAlctlY0Vd2YXv+8b0NRoNaysLu8IMFBwONgmnxaESifMMFIKcy1CtViubVL9arXR1dXXBPnqeZzUSBJfD4WDFvQA+vkXBKvtdq9VMUsGRP0wkvw+gtNtt3d7eWiCi4Nb3fSuSP51OqtVqurm5MYbI7VSE1AL5UzKZNKAAqJCiwP7CLmFr+XxetVrtQhdOm2PugfUnuPu+b3YB+8ysEfe+kFnwd/wu9gX7RAKCzIP7BRRcQCa5zGbPrZGDIDA7xD4bjcbF78Akw6oC0tiN+7sEedaDpMdNYDlx4PfcGOh+ntuJivvhO7F5WDwSuQ9jLfZOHHZjqPszfH58/fwrxsgYI2OMjDEyxsgvAyM/dn20LfzPvVytIw+y3W5tkVxJA4aI0X14NA/rQvBaLBZWPAtzkEhczgKBvWCxXZkGm+we97mBnADFWzdvxrAL+/1e5XJZk8nEjAlpgud51hUmm83q4eFBr169Ur1eN6OBQdlut8rlcqYXzmTOszaQi0jnDlTVatWKeD3PU7Va1el0npWB8xHENpuNzbRwW/ny9l6pVMxBOYpnj2AfPpQLZDIZG8QIE1soFNTr9UwrzHEtcxtg9kqlkjF6MLDtdtv2drVaGVNzPB6tQxGOxBG7q7tHZiPJZAkEDFguGDuO9LFBmAy3yDsMz52v0PKjwXbZmmq1aowsNuMeocM0J5NJG2LIfhGsJBlbCCOLnWMPMKWAFhp+EgSAAbYMdvVwOJjNkURJ50CELcP8kHDxDKwtweZwOHeQ2m63urq6Mp9mD6WnQlj8jEn2BLv1em32zWe78ynYD5hyEprdbnch/YAJJjACrolEwiQnMJzYMlIJ4gvyJbpQeZ6n4XBoMgYK2t2EF6kQzCmJoCRjMLHv7XZroFEoFDSdTi/0+Ng9+wrTDXhyny5TDRiSjLgF6qyfK69gD/k71g2QJQ6GYWi1IawpfwaQuAcAF5Bi3d37B5Dj69OvGCNjjIwxMsbIGCN//Rj5sesXoyfBHHaoUCiY07iLy9uwJJMawDzA5gEgHOFyVL9areyokw2GYeEzWUDYPhYDo3CPhQEXFpZjdwI1QXwymVig4m08iiK1223NZjPl83lNp1OVy2U1Gg153rl1J8WnMDt8TyaTuWA52u223r59a0fgjUbDdL8YcaFQUKfTMXBEw1upVNTtdlWtVq2jz36/t+NbGBnYA4Lm8XjUu3fvVCgU1Gw2FQSBGV2pVLKhfsgZEomEgcDt7a3m87kKhYIFGZyC+RcuA0BAciUF/DuMDUEVNpLAStDGgQBlAlEqlVKv17NBluwrQEPB9263s04+2AQgVqvVbDYIQSydTms+n5ts4HA46PXr1xfSHlgvJCwwpCQXQRBY16xut6tut6tvvvnG2CCYSZxakunPgyDQdrs13TjH9LBIzEbhXth//IggjQQEAEaGQK0A//7NN9+YxADW6McffzRmlCApncETXT176hZ1kzz1ej2t12u1221LWug45fu+yWh4Bhhy9PvEitFopFQqpWazqWTyXPSOH/Oc0pkVT6fTms1mGo/HFyBG4X2z2dRyubR1Zw1hto7HpzlCxCYAh+5SJMEvX75Ut9s1xhhdOckp/pxMJjWZTAz4XWkO4O5+v6QLZpNE07U7EoLD4WAJB3HQZdY/9DOSPAAL7T9+DENHQuBKOPBzaini69OuGCNjjIwxMsbIGCO/DIx0lQgfXp/lhatYLJomlMCFAeM4MDA8qPRUfJjP541RYfFwAtgA3iDdt9RUKmWyCd7ApbOswPd9OzJH70kRsOedNccc5eMIMCzc03A4NAAJwyd99XK5NG1vKpWyo2PXiNy3/263qzAMjT1Ip9PWZQZdcq1WMyaEoMPled5FEF8sFmq1Wmq1WrZeri4fdjCRSNjvZbNZKxh2n7Ner5tuGGYClsU90p/NZrq5uZEkO4KFQSNxQJcMo8L+0wEKJ+H+cAaCjauLRrbyISsMq8NnbzYbYwhLpZKq1aoxmeinF4uFNpuNBQv27HQ66ebmRt1u14CfbkvSebo97BhMM92osOnBYKBWq6VGo6HJZGLMCjIP7IxAnsvlLAngZ9lrGBVA0x1uSjBIJpOq1WqSZCwTiRX/GwSBnUZgE9gxdppMnlsrVyoVnU7nNr2SbB+QMcCYkaQBmgQa6Ym9p5D6dDpZi+blcql6va5k8lyoPJvNVCwW1Ww2lU6nrV0u9w/gTCYTSxAPh4O1rEUuwd8T/JbLpa0hDCfMFs9CEGUt9/tzlyvf983mJZld7XY7tdtttdttA810+mnQJEkHa1Iuly1xdpO64XCo4/Hcvpo14jvdRAoZBWuCVtxdbyRH/Nll3Ug4eH709O5Jhe/7lryTXBIT0awTJ0kSALqfI5mIr/96xRgZY2SMkTFGxhj5ZWDkx65f/MLF2yEBH401N+MavCtl4DgclgqjpePOfD63z8UgWCz3TZQFcI+hYTESiYSxRq78gTdzWBIYMTYG/SaFp1F07gDT75/nW/KGj6xhs9no66+/VqFQ0Gaz0fPnzxWGobExo9FIxWLRWA+e9+HhQVEU2ZTzfr9vQIMkAlaGI9jdbqfRaKTb21sDW47qWVMYLdin1Wql+Xyu+XyuMDzPFPE8T+Px2JyoXq+bHIA12u/PbTLR3O/3e2PyxuOxvdEDiKVSyRwjn88bA8TzzGYztVot05APh0OVSiULJq5GGKagUCjYcT5yiGazqdlspnK5bHYG+8aMDMAZBuRwOGg8HhubgwTk66+/VqPRsH3hM0kYJNm+8ow4LKzM27dvjV2Ooki3t7fGvqVSKb148cIkD/gMDBE2x7BF1t89xneP09kjWL4oivTixQsLLuwTrLpbB7Df7zWZTGxtaN2KbISaCIIdgC6dk8B2u63RaGSAgM8TbLj/6+trC6bPnj1TPp/XarUyXf18Prc4wfePRiNJTy20qWshMB8OBzUaDQu4h8NBz54902Kx0Js3bxSGoV68eGHd0FizKIrs90lWSAyQBO33505b7Euv1zMZEuvYaDSMJX94eFA+n7ei78fHR5NmkFQRC5HEAKbL5dK6OVFTQFCnhgbfdyVlrnyIP7sSGBJEkl5+DpaP54YtJ96eTk8dmEjY2UtiNTGUZCK+Pu2KMTLGyBgjY4x0Y0GMkb9ejPzY5f/hD3/4JViif/qnf/oDg/BwQAwYRo+AyJE2b4V06gFUOKLzfd/e/mFZ+Bl+zmUROALme9lMpBu+71tQ4ag9iiILEPw8bFSxWDQmBmaRt9jhcKjnz59LkhkqbGG1WtXDw4MymczFNHGOf6vVqrEvfMdsNrtg0HK5nOnbaV2K1jYMQytgbTQaNuAQJhNmAGaItZJkE+d3u53K5bJ9B78P61iv1+34nCnsu91O9Xrd5BhupyYKwTH4/X5vTCy1BqlUSsvlUovFwtioWq2m9Xptx9fVatW66xBUkOGwFrvdU4ceijpLpZK63a4FUZ6FlqjcH+wV63Q8HvXnP/9Zw+FQf/M3f2OSGRhInFWS3TP2kkic55EgKeE4Gq36brdTv9+3gHBzc2OOLZ2Z0NVqpWKxaKw2DEyhUNB2e543A0gCkJJsSCM2SZE5jBzFqDCCAJIrJ5rNZsYUk6ghd3LrH7AHLgq+SRKiKLIgut1uDQiR8ywWi4t7x39Zl9FopMPhYHUWSGH4/4vFQqVSSd9++622260VRrus/r/9278Zg3p9fW3SpmfPnmk6nWo6nV5Ilij4hlErFosKgsAKkaktoDXv/f291Y0cj0e7z0ajYTUbV1dX2m63VmieTCbt3gFA4gXPvtvtLtbSjY38OzYIYHOigayHOEjyTuAncZV0kViTHCFLgdUEeHihcuVOMLNIzTzP02Qy6f7hD3/4f34RcHxBV4yRMUbGGBljZIyRXwZGLpdL/eM//uP//VNY8Fkkhb7vm3Pncjlj2dwOMYA1mnWO6KQzo+a2oyTwoyWGeXMlARyd4ww4JItFkSIbwv1UKhXTk/L9p9O5YxAbxiahnS0WixYQYaJgHG5ubhRFkb3tF4tFjUYjO7an4BW9Kdr41WqlbrerUql0MQsC7SlBBV06Gmi0wvv9Xu1229icTqdj80RccHXZTQwLmQmBaL/f6/Xr1/ZZxWJRi8XCZC9BEFjbWRym0+lotVppNpup0WgYUOXzedN8E3TRvk+nU5Ou0PZ1Pp+rVqsZ6BEseO75fG7Gjo4epyEpQYfvarjRvsM8SGdpSCqVsnX67W9/q263q7/85S/667/+a+uExb5JZ0kC+uRyuXxxFL1YLHR/f2/Ovt1urW4BhjSTyWg6narf71uihN6coE2ygf/sdruLZINAAxu5XC6tIBaW6HA4qNlsKpPJWFCH+UG6Qicpl4XBz5BmkBRxbI7Pksxxn+jOAVH0+uv1WrlcTtPp1KQM1KOsVisDVGQhrDf1HRTe5/Pn+TsEXUkW/LfbrRqNht68eaPZbKZSqaRMJqN3797pdDrpq6++skR2u91qPp8bQz4ej1WpVC5aPi8WC0t4YZ5hoj3P08PDg4IgUCqVsnsjBgRBYHZOQuj7viUzm83G4pdbt8O6cJIAQ+2y9cQhkjiYefyBJAEbR67BfSQSCVWrVZP8kLAAbiTVsMWsMSwg+4Qdubr/+Pr5V4yRMUbGGBljZIyRXwZGfkwF8otfuLg5tNMc67utQwmiLByBniN6ijcrlYp6vZ5JFnjTl2RvlRwPcmSNXpMOL4vFwsABZ+U4kWPw4/Fc/MfRMCACo8jiDgYDVatVVatVzWYzLRYLJZNJM8jNZmMabjaj0Wjo4eHBptpLsuJbmEp08TCU5XJZb968MWNmrXibJ3hiLLSTPZ1OxpShz63X63bkj2SD5yX4Iq/AcDebjbXuBWhw2kQiobu7O2th2mq17BgX54ZRq9frJnfhuH+73Vphq1v8CThT6LhcLm16PckBwXEymVhQBGyRacBiUTROMsNRMGwaLAcAWSgUTCbB0ErsB8BCrsM8EuQr7DV1DtjPYDAwGcTd3Z3evHmjcrlsumCCOTUWPBMthtPptIIgME0/iRRMHMEbBqfZbCqXy2k+n5sPsfbuUftut7OCcJg+gqDLrBNkZrPZhWwIWyK4EtypLaCYHd8nGWAfWWNXd48NYPMUWWcyGeuAlUwmNRqNNB6P5fu+Xr9+rUKhoOfPn2u/32s4HNpaUQwcRZHu7u6sfiSTOQ9XXS6Xajaburu703Q6le/7NvMFWQ5d3ZCNcHIAc85cE4DIjW/L5dJqF1ztOPaP/7rSg3Q6rYeHB4t9rCt1DEhouAj+7AOAQoxkH7FNN4l3texujZCbwJJ8RVFkf8+fXcYwvj7tijEyxsgYI2OMjDHyy8BIbP+nrs/S49ct3OMYDmdhcXhLRBtMgR0sGgwGumbeHo/Hox2BptNp06fy0LzhUpRLJy2YPBaFgMPv0apWeiqQ5DsTiYS9KR+PRwVBYGwczrDdnucmfP/992q323ZsnM/n1Wq1tN1uTXLB/cCALJdLm2LOkTGSAJjCzWaj5XJ5IU2oVqv2tg5rQcHr8+fPFQSBZrOZFb/CKnqep36/b4GeI36YiyAITJKx2+00Ho/V6XR0Op2MmZlMJnp4eNDd3Z0xE91u19gc1hVGkISBfaYL0mw2Uy6XM3aMZwDwAQOCymQy0X6/1/X1tTFCy+XS2LB0Oq12u63VaqXlcmmSizAMtVgs1Ov1LDEheDJUs9frGav1L//yL/r9739vNkeyAwPU6XQsOeHoGIBEC73f7w2I3aNvl3Epl8u2J1dXV5pOp9rv95ZsAO7FYlHValXb7XmoIc9AATT1FcPh0GQjaJRhkFz2zWXNAXHuG9ACDGj3i52n0+e5N0htCEwkdLB2gIGki+RuvV6bjImgiFQjl8tZ1y98kHUlCZJkUiQKcd+/f2+yG9hpbH65XOrm5sbaRUvScDhUv9/XN998o+12azYwnU7V6XRsHfg+ng1wuL+/N7lOo9EwkCfhmE6n6na7evXq1UVQzmQyliCRJLpAe319rdVqpSAI1G635XmeJYjUwJAIH49HA18SHjqvsW6AID6w2+2Mlee0g1iLP7gadfYAcHFlaUgr4uvTrxgjY4yMMTLGyBgjf/0Y+bHrFw8+JjCzWMlk8kJzS+CHAQiCQJJMNiA9aZH5DByB4I8jcAzJ78Lu8RadTCZVqVTMcOfzuRkDbACLyvdJZ+kBAZv7hW1BA0wQ4a2dYlgYgH6/r/F4rCAIlM/ndXNzo0ajcWGg7969M7ar2WwaG8UxLvKBfr+vXq+n2WxmbIJ0losMBgMDPYIbMgaKYn/88UcbLAh7B6uEgXCkvlwujQGAMaBA9cWLF8ZOUAjc6/UUhqGm06kxTRR1wqpSBJ1Op23SOyxJOn3uulMsFm0tf/vb3+r6+trYTT4LNqPRaNgznE4nY9vS6bRarZYymYw6nY5JbRgECPPmBgjP88yuYCuTyXNXph9++ME6KQEY6JUPh4NJP2CcYWmWy+VF4SzsL1PfD4eD+v2+FouFJUc4rMtEh+G5SxM6bOmJseFny+WyAXy9XjdmERA6HA4XgYPuSd1uV+v1eYAnwYPjc36Wglh8Aq07jHihUJDvnzstAdrYPnZAcIWNIhhtNhtrHw1owabiBzBcMGa+7+v29tZ8sV6vq1Kp6E9/+pMVZ8OeJhIJK1z//vvvNZ1OjQXDvl1JHIXpr169MgDE7mAoYfNJckajkTF/JF1v377Vd999p9FoZDNH2FuYLyQosLWsNbGrVqup0Wgok8kYy4l/8jMuoNAWm8TQTRgo9iWpYD+Id+j0AR86PZHAuWy3W39xOp2s1iO+Pu2KMTLGyBgjY4yMMfLLwMj/dUkhX0TgABS4cHKO5SaTiRkNzBbHf+gyefvlrZE3fIydN2EYAXSvLCZMEgEUPTtsGgbGUbfLsrDZYXguwE0kEtbekrddupw0Gg2t12tNp1PTr6Nxh2nkz77vazweG9hNp1NjOJB8MM2eDjl08eH4nyPwzWaj4XCoIAh0dXVlx+XValWNRkPdblfPnj0zpoA3dd/37fg2DM/DCAnI/X7fEoJSqaSHhweNRiP97ne/k+d5ajQaGg6HkmRAXalUDODz+bx1mXKlBux/EATWnYdgBkBzlL9YLCygYhPop9vttk6nc0eoQqFgR/9oo9FvJ5NJs0fsC6BFKsKxMQW+t7e3+uGHHzSZTNRsNo2FQ6KDdp1ZJe6xOGvseZ4BDh2aTqeTdbpC/42dMmCQNSLQoXXGdpvNps1/4Uiez4Mxd5k4agtYXwIcYI2GmcSB9YGlg/3h91OplCqViq3vaDQyxo+aB/6XxA29NbNT0Gmjl+Z+DoeD6vW6/TuBkeDmeZ7u7+8tGD88PBizCutbLpdtryVpNBrp9evX+v3vf6/vv/9e+XzeJt6Xy2V1Oh153rlF7N3dner1uh4eHqw7F0A3Ho91Op1sqOzd3Z1qtZqdUJCMXF9fG/DSvQmAJnlE8gSbjdaf+0Djnkwm7XlpJU7CA7OG5Ivkl0RbkoEVP0syiN+TmPL9xDuSPVfqgU2GYWiJCjYaXz//ijEyxsgYI2OMjDEyxsjPIimkVSOsBZIHdJHr9VrS+QiVKfFojCXZUTcBlTdW6Uk7ztEg+mwYIelpNgObA9PGzAjmjCDrwIHQ4boSCwBqOp0aW0E3FQBsMBjYoieTSQsakkz3jpYVho3jezozLRYLc5xms2l6eoIxDggwcw84PAGVAElA5TmfPXtmg+eYtQGgSk/sDAC93++NGcjn87q/v9fxeNTXX39tR/rb7VatVsv03eVy2cCNfXXrByQZG9tqtcwmCMSdTke+71uRLWsahmc9M8ESo0fiUq/Xlclk7OgfppZghIYYfS/PDYCt12tbQ7fYslar6Y9//KP+/u//3mxpsVhYcuFq1gFUpDbYPU6635+nrtMSWdJFQuAOmUylUgbISAXcRILicBiqfr9vwfvu7k5BEFhNAnZHm17pDFwUhVYqFT1//lx//OMfzedIXE6nk+r1un0fvsJsjMPhYFInOovB5hAI8XdqOfh3l4Ei8JHUATisKTpo2Nv9fq9er2cBDn064I5cgHu9vr7WX/7yF+VyOXU6HQ2Hw/9SU8Bw1eFwqMlkolarZfHGrSMAAE6nk25vb5XL5dTv9w18kRgQCxqNhklEiE2JRELT6dR+DhaX4uLVamXSi1wup3z+PJiUxBidO7p4t3MdrC8nCpxq8KzI1gAI9oQEGwaWi3XCf0lkYf357Pj6tCvGyBgjY4yMMTLGyF8/Rn7s+ixdCjm645gQY8BZeNPnmI4uM64enMXh7ZJAOZvNJOmi0JEuL2g0eXvlYSkuRcZBsR8B+XQ6WTtOtKM4JxsOs+f7vhUKrtdrTSYTM+DD4aAgCNRsNu0ZkB9gkO7RJ8CTSJw7EOVyOZs0fjgcjClEV430gy5OzDx4fHzU4XDQjz/+qGq1qnK5LEl2vA+Dulwu9ezZM5NiYDQwXjgCQMqxNPr46+trk3DAEPJMsGnIMThGf/v2ra0fen5XCsNwPjoxsc7cPwkFhbfIBRKJ85wQ1kqSSQVwegIEzpdOp+3eAAcYHu6B4lyc1fd9/eu//qv+4R/+wdYSrTyFxhSQwuTi8LBbfCf258pR+H3WHGbFbc9MQgOzByCxdtR48LPM1chms5rP59putzY3p9vtKooimzvD+sG0BkFgNpBIJKztL0Htw2JgmC3ulQQAYGe93OTM8zyTSki6WKMwDI2hgkmCvWOfWZvr62uTW1EQ7uqqsQ8Y9T//+c/yPE+3t7dWt5HNZjUcDm220Lt378wG3PUFvIlHBOLBYKD1em3gj0+5pwGSLGF1ZSkkHHSV8n3f2Lft9jzcsVarmdSHteBnONUgaWefKKiH4aOoHz05rBvyjw/BA6bPlc+wV+wLSdD/CVDi679eMUbGGBljZIyRMUZ+GRj5seuznHAlk0/DGnFm9+9V9igxAAAgAElEQVQJGgAPjAdH4YAAhktwSCQSNpiQz8TJCH4EB0Aql8tpOBxeGAGLji4YbSsMCp/HZuEM7nE7xsWbcRRF1kHK8zxzaAp5YcyYeI6Gl3amQRBos9nom2++sWPww+GgwWCg4/FojJ4784Aghm4UFuXh4UHPnj2TdO7mNJlM9Pj4qOfPn2s+n1t7Up4BjTDMHQCaz+etyBSmAgCZTCY6HM4DIpvNpmq1mulk3cAK60lAdB0OWQ1rXSqVjMnDAbgI7KfTyVgx9LlIBAi22+3WBvbBVsACcRTNM8PeIVGgaJnhg+jIf/jhB11fX18wOW4gdbvYYJMEfSQGsHs8O/8RcCRZ5yzqB1wmJorOdQswcpVKxbpOIaEgmO92O83nc3333XdqNptmy9gue4Hd05LX7f7Dz1K4j4YfjT1Bh+fm33e7nRUEw0JtNhsLnjCR7C/7ARvrSq4oeB2Px0qn0yYbgemiPsNNAlzGiWemC9Pr168lyVhn3/fV6/W0Xq/16tUrkzytViuzM2yeNdxutwZkSKvm87k1AYAFJ3nAXtzCYHTexD8YRNaE5BbNObpxZBauhMutT0GXThyCbSNmep5n7CM2i/98CHgwx+wvvoaNk0TH16dfMUbGGBljZIyRMUb++jFyPB7/9zjwcZj4P1/ox3nDB0AwDt6AWVC0nTgBGwbTwia6BXm83fJm6hbtSbLf5a02nU5rMBjo6urKJBkYOoXBBJ7VaqV0Om3OAwidTk/tVTebzUXbS+ncWpap7vw9+mT0vYAV7J87i4PgiRMHQaDpdCoGZG635yF5dGXCibn3MAytANLVcRP06DDDsS36cd7qYWzYP/4jUAHidPGZz+eqVCoXsxd4m3fnLDSbTbMDDBG5Bwwsc0oACJgLGCiMmSNpSRcMMcfrJBJMrEdXTnICG0TAwRGRUCyXS+sOhB0mEucWv4PBQO1221gpHBapD4CAE1OEDMvCz/Eck8nE7Ipj6kwmY4Wlo9FI7XbbmBPuF7vC2YvFomazmfnKfD63Tk3YCsy3JLM1kijqJ2BkKdAmsLmAPBqNtN1u9bd/+7cX987/sjYkOdgL60JChawB20TewNq7NSqdTkeZTMbAhBjjeZ7evn1rLYRJDAiGBEKCqMu4DodDA9HFYqHJZKJ6va4gCCzZI4C3222rYSAR87xz++bZbGbdx8bjsd68eaNms6lsNmtgxZ5LstggnWfVYO8wZKyHqxvHLn3fv0i0jsej2QLx1a17wd7dhBeAZz9ckCdm41vYPn/vJtkweTD38fVpV4yRMUbGGBljZIyRXwZG8nk/dX22tvA4eiqVsuN4l6nASKMoujBaHlzSxULxxurqKGFJ2BB+l4enaxOGdDqdVKlUTOcpSZVKxTYWoyMw8gxsahiGNlTPLaDMZDLGbPH7sCsEe9grJCNojl3Dh3Hq9XoaDoeqVCoqFosaDodm5AQHgihMJFrUKIr09ddfG/MxGo0MdF22CxmE759nG7gtVAm+OO18Plen0zFm6/r62lqrckzveZ5KpZKCIDC5CY60WCx0dXVljEUul7PhkRzLw7pFUWTtXnk2gmwURfZ7rgwEoyZpwL4IuuyRJKsR2G63NlQQLTeSAO4TtrhSqahararb7erm5sbmy2CPMGySLjq38VmSLLFZrVbGdDLrAh+BTSPBmM1mNj+GtSepqdVq1iZYkt68eaNvv/1WDw8PajQaBk7Pnj2zWS1BEJitcQwPUzcYDKyomGcicYuic/cdOlEh7XH9ked36wCwdzTQtCd2NecwRa6fI2nCd+bzuYrF4oVcKZFIaDAY6OXLl+r1esb+0lb66upKm83GtN4AKDZ6PB7V7/dVrVZ1d3enMAw1Ho9Nw1+r1YzhZw1YF4qPAVAaGrD/y+XSZhE9Pj5a7IGtI15xMuD+mTiITSyXS43HY1tvlz1jvQBf/D+KnmYckXSRMPK7kgwIAAyYQTfuuacpLvuOtAdgia9Pu2KMjDEyxsgYI2OM/PVjJD/3U9dnqeHCiSkuI3hyfIc0gjdEWBQMxdVPZrPnFrFosnlo9LDoNmHF+HdJdsQoyQyWOSHuUS336/5Zki0Wi+syiLBPvOHSbQmGyffPha1o3Fkb2A6O0AG2+/t7PX/+XIfDebbIq1evtFqtzIHd+8WAYENdowIIORJvtVqaTCbabrfmlAAAXW64MC6X2WJPmJ9CoIB9oevTixcvLICjuQeMwjA0BgR25vb2VoPBwJiJUqlknYFgOfhZDJi9ZL/RDjNXASYwmUwamLLmx+PRkhlXsgGrBtN2dXWl2WxmmuN0+twF6+XLlzarBRkFoMNeSrKiTJhNwAW9MOxkuVy2YEkgIPhh1/iA53kXs2JYf4Ir3azoRkRxOZ1+OJKH7Uun06rX65pOp1Z4HUXnAmtmdOB7m81G9XrdgNdtKUuiQXBarVY2iyYMQ41GI63Xa2tRTHJJh7FE4txJCzkNfo//HQ5Ps2iQ9/j+06DRWq1mbCl+iT0hEQDQCaa9Xs9Y9n//93/X3/3d31mR+2g00mKxsOd3Ez3pnFCyDrDTs9nM/LxarSqXy6nb7Zofu4W8MJKuhASNPEywJPu++Xxu+nuYOmRRSCNcTT7rkEgkjOkjqWY93fgImLPmSCdg6vg3fo6kH5mMy1rH18+/YoyMMTLGyBgjY4z8MjCSz/qp67OccLHY7rEe7WYxKLTALB5BBwCHccpms8ZouAWdrgSDt3mCl1uEx+fg1BR6zufzi4JLmBAMD3bOPb5drVb25g6IlEolYxz4WZiK0+lkulXulw5IBHPuk03c7XZWKIpkh2DMc7Oh6MH5bveNGnlFPp9XsVi01pun08kGSFKE/SGLcjw+zR2AuZrP51qtVsYyEWyOx6Pev3+vfD6v6+trk4dwXM7gSGZK8Pfo2Nkb7oUgQNAj+LMv2Bb3D3CytwAJgZh9oOUrDscetlotK84mKLrMCDUOJEHSWfPvHk1jdwCYK91Ip9N2/A2Y7nY7tVot03ETYLkPjrBJlCaTiebzuTG+SBm4j1Tq3LY5CAK9evVK0+n0gvVbLpcmEyJwoA8naAAiBKz5fK7pdKparaZKpWLJHL+z3+8tUWQPYJWLxaIxkKxroVDQfD63bmTUalADAUC57BBSinw+f9GWOZ1Oq9frWT2D9CSFyWazBt50SUNLP5lMNJvNtFqt9PXXXxtIrVYrkyCtVislEgkbLst+I2+pVqvyPM9a/vJMo9HIGHYkD8xFATzxG5cRg012ZQnEJYCaZ5Jk4OTaMvsKGLnF3MQRN3lnn1kzki5J5mMuy+iekBC/iRsfY+/i67+/YoyMMTLGyBgjY4z89WPk//oLF2/fyBB423ODpBvkYWpgjwCH/X5/IWVwHQzDw6ldNosN5RgXFlE6a6crlYoFJ1g2l3k6nc6FrGwwmm4W7nQ6GQOD4dNBpVAoGFMDi+AyF7ytS08a2MlkYiwL7M9ut9PNzY0Oh8NFoSRv4NJT0JtOp8b64MDcF8BKF5f9fq+Hhwft93sbcEjRITIEglY6/TRjgtkl3Dfa9lKppLu7O3smjundN3w3OG63W/s7mJlUKmUzUI7Ho9kCzoLRoiv3/ac6BpgG/szz0mGIfUqn0wqCwFitq6sr7fd72w+0z7B82C/JAUHw/v5etVpNrVbL2gkjtSCAArgwHoCzG0Bce3EHHvIsu91OjUZDy+VSb9++Va1WM/3y6XQyNpQkAOkMoDWZTJTP540BYz2wM+wbP0PjjyRgtVppsVgYo0zAI2Btt1ttNhvTY2MXyWTSbKhYLFogJBFza0aQL4VhqMFgoFqtdgHC8/ncOitRMwCjRNCH+eP7kJUUi0XTjXuep3q9bkwrdQZXV1d2v8vl0mRHyEaIGWEYqtfrabfb6e7u7sIuTqeTFf9HUaTpdGp2SlzhnpEQUU+AbfA9+D72S7MAkjhAFTafOTduHMR+2XuSb+yRGADIAtDYku/7F8XDAAn+jEQtl8tdAFh8fdoVY2SMkTFGxhgZY+SvHyN5Kf2p67+v7vqZlxsAuAhugIh0ZvgymXN7zkKhoHw+b8fyHAlLsrd73mpZbAIWDuY+qKtdJ8AgE6D7EI7kvlFz3+7bKoYtPbVg5Rh5Op1qMplY1xwAhP9gj6IossFrh8PBiohhyiSZQ+52Ow2HQ5vqzUwSmEHWBcaGddlsNprP53YcCsBF0bmVK2uCBnY6nWo+n1sBJ8B7PB7tOFmSMUcMoyMowS5ks1ldXV0pn89bO2IAjg5EPDMgiRF2u12NRiNjGz3PU7VaNWbzw042MBfYEtKadDpt+nakGm73GvaYPxeLRbXbbYVhaPMdAEjWjiGdyWTSEppsNqv7+3uTSjQaDXM+N3j6vq/hcKj1eq3d7tzdS5LJIUh8YHTQ7GMraJNJnCqVimq1mtmjW2sAiwPL++bNG2O7YZer1aoFA4rRJanRaKjdbiubzRqT5TLpd3d3Oh6PVjQPI4+PIUVwWXrAdTabXQADensStkwmY+B8PB6NCeb7fd+3AlbmmZDYsSZhGNpwVdhEAJp6EQqciTsvXry4YEgZsPn4+Kjt9jwcEh8Lw/PwQoK/W9NBMkPQJYaR9MC6nk4nm9fCZ7kvKYVC4cKnAHf2CrAj9pEM0TWLzlvUIBDXiIc0IuDeSd6wPRIQWDziGL7CZ7kMH6CMX8XXp10xRsYYGWNkjJExRn4ZGPmxF67Pgp6e512wLxg8D4CTw0Yhi5hOp+ZQGD9BXno6EnXfMnlwjnldtpAJ7BS78qbKsbj7+cfj8eLo3T1ihCGAvUArG4ahHYly5A2AIV1IJBLmvBgeR7EwT+VyWanUuSCYQmCYS1i74/Fox9G8iUdRZBpzwANmhLdxtNtMlQ/D0Fi44/FoMpGrqytbq91up8lkoh9++EG+f9a4M8hxuVzq6urK2DLuZ7/fWwea7XarwWCgYrFoXaFyuZzG47ExDLAYmUzGZqlMJhPd3t5qOBzaHmDABIsPj3nZH47WK5WKsS8wPLBxrM10OtXpdDIgIUhWKhUFQWBFwjh3rVYzEPj222+NzXUZj/1+b/ptCnfZ/3a7bWsFCOL4yAAoeOW5cFLf91WpVJROP7XrpSidII0tu0lCoVDQZDJRrVYz9iiZTF50zMJOk8mkJQOwTQQWgi73iT5akgUmVwuNNvv58+fqdrvabDa6vb1VvV7XeDy2e+Ae1+u1yWqQsQC0MFY8O61++Xf8ZLc7t8GlUBtJTqlUMkkJsqXZbKbvv/9eL1++tGeF8Yui6AJMKNhm75G6YH+wwRSHd7tdW0sSL+ICXcUkGbMWRZEVPsMcMnSSOEkiVywWNRqNrF6HpJgEkZa/SNF2u50BAuAMKBDvXNmGq3GnVoC/l57qYojZxCTsL74+7YoxMsbIGCNjjIwx8tePkZy2/9T1WdrC42QsCDpiboyHIZiyABwTu2wLN+4e4bFIOB1gg7HzFs8bK2/h1WrVingJhkgt0DfjYGxMFJ3biGLUiUTCjlYBQdgNmLDFYqHD4aBqtWqMBG/lhUJBs9lMURRZe9rNZqNSqWTPVq/XrXMSXZn4DHd4HKwXTAZBLZM5TyNnzVymB0aFdrCAyXA4tGNnwKlarZpWlmNk99gfp0LXzv67DIpbbIvxYuytVsuAjwLYIAisEJd7dlkSHMj9M887Ho8teM/n84viThdc379/r1arZUkGsp31eq3j8dy1ir/fbrdqtVpmL4ClC6TYHdp85DLs52Aw0Pv371Wv143JTKVSJrtZLBbGOsKyrlYrK0xmpker1TKbdRnl4/GoarWq4XCoYrGo6XSqUqmk8XhsGm0SJ+xnPB7bwFFqNlgH7IMCX1g3GCUAkT0CWDjCB6xJ3nq9nr799lvbd2Q0MKL4C4ASReemAhTY93o93dzcGBhjY8Vi8WKOB/blsuvz+Vztdlv5fF7T6VSz2UwvX75Uq9W6YOix38lkokQioZubG51OJxvamMvlTHJBYgMgdLtddTods0XWDHAHbEl+AUkkKyS30hloGo2GFouFstmsgQJsdrVa1XK5VBAEFqtIpthjJEm73c6SBVjwD2M0+81nsRbYtMvewfxzEhFf/7MrxsgYI2OMjDEyxsgYIz9Ll0JJFkRhNwj4sF7ceBiGVtyIcfDmDQjAzLgyBH6WRYH5oNMMbJzbh3+32xm7wvfwu65GFLYNpg7HnU6nevHihWq1mg2TYxPYdI5ot9vzMEQ0uWzEfr9XvV639pzuMenxeFS5XNZisdBvfvMbTSYTHY9Hk4/QjQUQ8n3fjuLR5hMEAeJEImFdmCignE6nZvisqyQ9Pj7qq6++0u3trfb7vTFu0tP07FwuZ0P5Go2G6awZHEgRNbMrcPTFYqF6va5qtarZbKb9fq+XL1/q/v5e8/lcjUbDvgemyPd9cwjWD2YOHTgOtNlsTJ4AE7zb7czRCO606gXYGZRJkEulUhYkKR6t1+sXLCJFuejSOe4vFAo2DBCArFQq6vV6qlarqtfr6vf7dkzv/j4Fv0EQGBvDcM9CoaAffvhBmUzGAiwFrIA2cpJOp6MgCFQqlVSpVIyNwScTiYQajYaCIFCn0zGpTalUUq/XU7vdtg5jBH0YH8A1kUiY7QACMGvZbFY3NzcmiygUCnp8fLQ2sNPp1JIA9iyfzxvouYz3YrEwyQTAvFwudX19bfuHPbMudCFrNBrKZrPqdrtms/l8XpVKxQqib25u9PDwoE6nY0X6QRDo3bt3Oh7Pg1RhSf8/9t6sx67ryNYda/d932VuJpOdLFk2UAYKfijAhesf5gJ0UFXnR5xfU1UPRsEolMuyZVE2ySSTuTN33/ftfdj1Rc7No8sjHdH3wVwLECSRmXuvNWdEjFhjjoiAeaNwG3DY7XY6OztTKBRSoVCw75dkdSf4qxv0kedgu7CZ+/1eg8FAi8VCxWLRwIHvR6YEeHKqQL2CW0NAQtxqtWz/drudFamzziT1MLp8NhcMOMm7K0UjNvrXd798jPQx0sdIHyN9jPw4MNL9uXevDyIp5G2RAOkyNyw0EgoeEn0qm5rJZOxtd7PZ2ELDGHG8DLPDIgIKu93O9LIcvTJccTgcqtfr2VEqb7AUA3P86soquNf5fK7Hjx/r9vbW5AwcsXLkiaZ2OBxam1LYSYIr/w/7td/vbc1gSDabjTkkb/OAAAYJiwWQoHcNhULqdDoKh8P2nOwBUgKKJgEsggKtOGGFpKN+ng5HOBH3QneoYrGoVqsl6X6idy6Xs2BFsTBthwE89NSwjbCKBFUSikwmY8ETh4J12u125hiTycQMHSYD0EX/3u/3T/YGhqTf76tSqdgskEAgYIxUvV4/YdqkI7iMRiOz8VAoZIGLwsx6va6rqyu7D47XYapghcLh4+yPwWBwohFeLpdWyAnAknyhyW82myY3ePr0qWmLuSfY7n6/r3g8bowkTCVJFEFmOp1a0XMsdpxPQoLQ7XYNNCl4l+5bqsJEwwJnMhktl0ul02ljEAGfw+FYbE29Cv5GIfxut7Nagj/84Q/K5/MmSUDaQ7F0OBxWp9OxNYXBHo/HloC6NRLYMUzXeDxWvV63BMoFMLqIwZwS21zmlGGa3W7X4geDW1mvxWJh34/fsP7EJgI2ANVut62AGmArFosW+D3PswHEtGomEQ8Gg2bHgBrfH4vFlM1mrdMT7LZ0/1Ig3c+D4gWAv4cV9q/vf/kY6WOkj5E+RvoY+dePke+7fnDTDB7ODQ7BYNAKNzkKJ1CHw2FjAvh/Np2F5hgVVg0HQ4/OMS1Trvf7vTF0gA8ONZlMbDHR7fLWShtQmCOMNplMmoYZJol7wqBXq+Mwt0qlYvM3zs7O7Hd2u53NHBiPxydv5IBPNBo1x5xMJtbNhcJCdMJ0zIFZSCQSyuVyBi5IMyhiZBBjOp1WqVRSrVazzjlISGBhCPTz+fxkxgHMUqvVsqF4ONlgMLDgTGE3DAABhZa5nnfsTAMwFQoFhcNhY3VgTdLptIEIbNhqtbJ9Zv8IFoDHZrNRq9UyZgjJBfZHopNKpUxqQFBBGoMNw+rC8lHzwB6xr3T0ApRdKQCa+lDoWNg5n8/V7/fN2fl8dMyANPcvydr/3t3daTwem13DqqxWKzWbTev8hA+gZWbdpOPcitevXyscDpsNAN4EdI7HeaZ+v29yomAwqFwuZ7puimCr1aqxs6FQSK1WyzTky+VSjUbD/AXJRq1W08XFha0V/kFtRjabNVkDrWUrlYr6/b5isZhKpZJJYvr9vtklzJJ0ZLrPz88VDAZt6j3JKomAK1VaLBaq1Wo6Pz83BpY/B8yxO6RZyEmwi/Pzc9tTl/WE+YPJg1GmuBb5RKVS0XA41HA4tC5o1EwQBylyhlll30g2sFNkDtwrdgXDj/QBcH03ZvNCRXIi3YMLJy/+9f0uHyN9jPQx0sdIHyM/Dox0X8zevYJffPHFDwKTf/qnf/oim81KkrFxkuwmOXaDuXFvcr1e2wLx4LBdBF3+vdkcO5IQWNDBE1To2iLd99xHPy7JtNZuQOaNG2kAG0RwicViZnj8LkGBI3befmHjYEQI7gSH+Xxu8woALgwBnX4ikbBAOp/PrdiRAIFRSvddeTjOXiwWNmdiMplY4JFkQQFWjXairuaf/0e3DaAmEgkrkkSzDwMDA4HxcwQLy0ZAhtkslUqKRqN2zxgqwErSQMDHiA+HgyUfm82xfSvfHQ4f52NIx0nosJ8u0wtAcXzNvZEwjEYjZbNZK8R98eKFFXeiy3YlMtgwnZpgYIfDoXVrevXqlZ49e6blcmlH6thlsViU53kWTAFOWF32t9VqGVBzD7SDJaDAgrOHJGrorimiff3fbXRdFmo0GqlarWoymahSqZjdsi4c9efzeQtKweCx/Wk6nTY9PesBI4meu1ar2ZrBSLrs82q10nA4VDgctqL1dDqt3/3udyoUCjo7O9OrV6/ss0hwAoFjp6nb29sTxtvtqDUej9Xr9axYGPb+q6++Ur1eP+noFA6HrYU0toaPY9vuMwHwyDUSiYSxpMVi8SSRQkYSCoVsVoyr/ccX3Nk84/HY5vd0u11LfJFIAPqwwYvF4kQPD/C78dFl/tgD/o65PNJ9LZEki93SfW1KPB5Xv9+/++KLL/7XDwKOj+jyMdLHSB8jfYz0MfLjwMjZbKZf/epX/+PbsOCDvHAVi0ULWHSbYcHQE7sBlH8fDseOJEgdYCEISJJM+w2oELR5E+bNlkDBz+PgGJjLdHDUCnOHoRCcstmsNpuN8vm8/Z0ktdttYyGQdwCYaFiROMAw0ZrSDSiwkJFIxD6Hjd7v9xoOh2o0Gsrn86pUKraRlUrFZCM8ezAYtMGTOABGCgBjZJ1Ox97aASCKtzn63e126nQ6tmeSNJ1OLcgBfKw3k9nL5bLtPaDLz2PcMI2r1cqKaWHCCIzYEIGcwLDdbs2xd7udBoOBsQ/okZGKuDNLCAQcxY/HY0sEeHZYuFKpdHKsjpYcewLAeG5ssVAoGOt1dnamTqdjEhzAvVarWf0E80soQF0ulzo/Pzfds+d5NhPl9vZWpVJJyWTShggmEglVq1WzRe7J9QtAI5PJqFAo6OXLl/rP//xP65AFWIdCx+GHSIzQnMMW9vt9Y5Xc+h2SvtvbW0sMYHhJLln/SOQ4lPH6+lqTycR8kX1iHV0WH1BDfuP6ViQSUb1eN0kOEhcC7Gg0ssGW2WxWb968Ubfb1bNnzxSNRjUcDvXkyRNL1pCR8HsEd2yUuIIcBmlRs9m0pIvEmBoWJDZIs0jUCOrb7dY6Ph0OB+VyOV1fX1tMjEajajabdg9IoUjK2WNqO/B7GGq+k8SAtcEGKdrHXmBBkbSQcAAksO/hcFi9Xs9/4foel4+RPkb6GOljpORj5MeAkX/RF65//Md/NPZO0v/WDYgja46FeVt02Ztut2ubJ8mCuquNdhcnHo9bUAYoaE2JUwWDQWNHKKB0QcoFD7TfqVTKZldwb0gVAB5+B2dC8sDneJ5nQ98kWeErAb1QKFjxI8/o3iuacoIHrVgLhYIVxhLkx+OxMY4cx8KWoCUnUIdCx+JLWuFuNhszPunImrTbbXmedzJQbj6fq1QqKRI5zm7gCPpwOKjT6RhQUpyL8fLMsJWutrfdbqvX6+nzzz+3+4LRIWi7jKLLCJMcBINB1et1ffXVV1oul3r8+LGxFLCaTFb3PM8YTfbf8zwL7ASG8/Nz6+LV7XZt/9DvVyoVs3MYte12q0KhoP3+2J2J/T47O9OXX35pXZ+CwaANFwTICaoU+zKYMRgMmi4eKcloNDJ9cL/fN407vob0geJP9iASidhMizdv3iiTyZiNh0IhY9/43tFoZPIWWE/YI5eJR9KQy+VsjQA4ipzxoXcLSW9vb21NkNzw/c+fP1exWNTvf/97A/nJZKJIJKJOp3NSWxCPx3Vzc3OS+JAk8GepVEpff/21Li8vbS0uLi5Mkw7j/+TJE7VaLd3d3Smfz590X9rtdnr16pXJWWD9PM+zhIgZM/g/DHQ4HLY6jcPhYOw49rLf721WTzabtYA/n8/VarWUSqV0d3dnewHI41N8zruMHD+fy+UsQSIhg+FD1kMSi4+yV+wzLwS8JPgvXN/v8jHSx0gfI32M9DHy48DI8Xj8//nC9YObZuDABAUWnxuCeQIUYOt4S+RYEQciQNNthWAIG4IOlJ9ng3lD5chekjmqG+hxPo6GYXtYRI6keR6MwNWWLhYLe0aYKNhAScbkBYNBCxA8by6X03q9Nk0pQEz3IoI1AZSJ6J7nqdvt2nqwnrvdTqPRyIYCwlhyDMx9IFNAw45OmqC7Wh1b0sLooOvnezBKijEJ7HScwgFwFpxsNpspk8kgRbIi6UwmI0nGfsL0wprBimDkrCuFkLQBlY7B9fnz5yoUCsZ24sjYGoXGFLDG43G1Wi3tdjvVajWNRiP9+te/1s9//nM7koYhIdgTeDzPU7lc1mQy0QQC+/YAACAASURBVPX1tarVqiUbm83G9ODxeFzX19fKZrPabrcmkwDQYRuZSUGBNoEeQLm8vJTneTbTAilNPB63rl8Uw3c6HY1GI1UqFasliEajKhaL+tnPfmYMIGsJ+1mr1fTll19a4seeICvq9Xo6HA7K5/Omre90OtYSOJfLKRaLqdFoWME3Rd4ELthsQDESiVi3K2RGtVpNL1++VLVa1fX1tTKZjJ48eWIzbJAnbTYbTSYTmyVCR6bpdKrlcmkSgEgkos8//1ylUkmdTkfNZlM///nPtdlsjMWiCL5QKCgej2swGEiSKpWKSVkikWN3rWj0ONskEDgWOgNA7DfrR1IGqALO2WzW1mcwGNjpQ6vVUjQa1dOnT3V1daX5fK4f//jH+uqrr3Rzc6MnT55Y8EfSQRLsSoqQf5E40hWKGMjau9IwkjbuHQmPG8ORSRFb/eu7Xz5G+hjpY6SPkT5GfhwY+b7rg7SF5yYSiYQ5Pzpr5AiAAYbKcSi/DxAQ+Dh6RQ/KMSNH8RjNfn+cU8GbJseUvL3CsHBfAB5MHoWTkqy4FSkHBrBarUw7DfChvWamAEe/HPlS4MoRf7Va1Xa7te/zPM901e6RPc/OsbErM3A3HhYT1nI0Guny8tL0ucFg0OZVoBumKxXa2Gw2q3q9rtevXysQCJjUBMZmPB4rHo+bswP2sDmwWbB07hqn02kD+ljsODei0WgokUjo008/VSqV0osXL0xLznE0xg67udlsrK0t3YwIyLFYTOfn57q6utLt7a3pf8vlsskR1uu1MWg4Mce+aLwBnUwmY3pp/mw4HNrMj0ajocvLS9M+53I5JRIJtdtt035PJhMVi0UNh0NjtfL5vNktn80+whS7f55KpVQul9VqtfTixQu9ePFCv/jFL3R2dqbD4aD/+I//UDKZ1I9//GP95je/0S9/+Uv7HAo+m82mpCNoDIdDk/gg2yEYMlMDn53NZorH4/ZZksw2JKnb7SoeP87byefz1ukqGAzq9evXJlshoO/3e7P5brdrvgnITKdTZTIZS7pYIzpLkUhiw0h3sP+HDx9qs9no7u5OpVLJ/BpGm8RvPp+bJOv169f65JNP1Gq1rNtWq9Wy58QmkBulUillMhnlcjnd3t7a38Nov1sz0G63raAYWdNut7PEbT6fK51O2z4jxyExJI6Mx2N1u13l83lLolxmkr0hvuKrrAHfT6c4kiRJdmogyVg/EmpXFsP3eZ53UlfiX9/98jHSx0gfI32M9DHy48DI910fpC08b4cwC5FIxJg3gowbbAiGLsMC87Fer002MZvNbAjhcnkcRDebzYxJQBYBWxMKhUyKQRtR6X52CH+3WCysgJC3azYE5ozEgq4kOADH0jgCml6AEOOTZM92dnamTCajbrer8XhsDAoTzweDgTKZzAlo7HY7e3Y2HscajUbGBrKG+/3eio4pToTtw7g4BodlYG4GjkPxK2/5tVrN9Mbozek2BaASAJfLpR1nl0olK3Su1+sGjJvNRi9fvtTf//3fy/M89ft93d3dmcYYlpJWxOwLs1e2261qtZqi0agNwYzFYnr06JExfoPBQHd3d7ZeAFqlUtF2u1W73T7p4rPZbHR5ealarWZMcqlUMhurVqvq9XonkhD3qDmfz9t6hcNhm8BeLBb19u1bA6RPP/1U0lGWQpEnrY1hLJEtwPrt93tLWl68eKFIJKJPP/1Uv/jFL9Tr9dTpdPSjH/3Ikq3BYKDRaKR8Pq/ZbKabmxvV63VjLpvNpmq1morFok2Yz+fzmkwmarVayufzGo1GGo1GKpfLKhQKxiTCFqVSKQMTEgAC33q9tvWCjYMxR/qDBAf2qFqtSjoCDYlIrVaT53mmMW82myYpIt4cDgdLdmazmd68eaPFYqHPPvvMmPnVamXs1eFwLJwtl8vWNQ1Jw3K5NDshzmB/8/lcvV5PxWLRfA7pwG63swQZuQsSEJgxGGikTDDRvV7PANKVeCGRIBmtVCrWlQo/ku7rRPg8/AuWFKkZBeL8XCwWM9mXy4zDOLqadBJQTmT4Hf/6/pePkT5G+hjpY6SPkX/9GPm+64MNPuY43i1AIyhut1tzPgwGDTb6WN7yU6mUMWRortHGsoAwMq6MgYVyAydBD2DDUWHI0JpzxAjThrGs12uTL0QiEZXLZQMt2sr2ej1jEx88eHASBGezmbEN6XTa2DoCD6wOjpNIJHR2dqZGo6HxeGy6Z3T9bmF0JBI5YZqYGF4sFo2xwfCYY+AWDaJpRr6QSqWsixVMB4wFx/auNAJQ7vf7SiSOE8LZS7TU0WhUjUbDukMhgen3+8YceZ5n7UbpVjSbzfTy5UvT+qbTaWOHYV/RgbM2JCLcG0DNIMrhcGjtfmH/Pv30U93e3urq6krZbNakKeFw2KQekmx9KJrGLrF3GDOSF4II3atIWAgyjUbDuvJwxA2jGgwei8npLISMpVqtaj6fazQaKZfLWQ1BOBzW1dWVMpmMDoeDsZokRAy1TKfTJlXCttFiE1xIuEiUYJmy2awKhYKGw6H57Xq9tkJdunoBTCRp+OdgMNByudRsNlMgELACZwLo7e2t2TlyJhKldDqtFy9eaDAY6O/+7u80m83MzjudjqLRqOLxuJ49e2Z1CARpfJWAyho3Gg1LvhaLhUmCSAQAgGAwaNIgCoVDoWNTgW63a/IhJAskUMgiqCVBGhWPx60lMDGCRBeWjL3p9XoG2sQMbAUWH5aSIO8W9RJz3X/wE1fiBqPHM3NvSJdg9pBRcT/+9d0vHyN9jPQx0sdIHyM/Dox8n+z+B79w8eGwSDgUDkLw44iPI0+YLdgBivd4W4Qlcn8X6YB71MemsyCe51nBHeDkzqzgGJWFQnPMhi4WC9OrMvyMtqJ8J8GCFqA4PVpZNpMgg5abwExghDF7+/atPM/T2dmZHfHDjAQCAetOBPtGNxyYw/1+b1Pt2+32SfGyCzIYJUMeMZBoNGrzDMLhsBUoUviLZIHBcq1WS8ViUaPRSIPBQGdnZ1ZsSQA+HA7G4oRCIb148cKKpG9ubvTw4UOdn58rlUrp7du3ymQy2u2Os0EIhJHIcUZNo9FQuVw2dg+gHgwGpteXdFIIC0vZ7XY1nU6tbet6vdbZ2Zl10wqFQmo0GgYMt7e3ury8tC5Ek8nE1gZNNAnKarVSNpvVfD7XYDBQPp9XJBKx43IkMsg12Kv9fm86epwWdu3s7Eyr1cp03IFAQG/fvlW5XNbFxYWurq7MH9Bqc/QuyZg65BC9Xu+krS42gd+S/LC2pVLJbB+/6Ha7NqCRuRgu44uNAyCpVEqNRkPZbNYKy7mveDxuCR+ae1dnTZLDYMhWq2XzTJrNpoFiNHrsfIXsKJfLaTQanQx2BeCxF0B2vz/OjqFzWSh0LNSn4FaSgRQzSvb7Y5eo169fW50JjDMFzm7S47L/sMzB4LFdMGsGOMdiMfONSqWicDisXq+nfr9/MtcnFAqZTIzATvJEzKVGhr3DhmD4ARkAFJDh74i32AV/D8jz5/713S8fI32M9DHSx0gfIz8OjHyfCuSDCPI5CjwcDieD9VxNOZ2YuKHpdHoiqcCxOGaEfSOYYphuW0eX2Xq3oM0NhBwRchxOQSXFq/P5XMvl0oIw7BYaZgoZl8ulBSSMdjweKxaLqVgsajAY6Pr62tipUqlkBbQcfQN6GNR4PLYOM/v9Xu122zYXllOS6WxXq5UFh+FwaAwK8hGYp2w2a0XFdKDiODUcPk7lZnggenS+MxwOW8vVYPA41C+Xy1lgorYgGo3q0aNHphXmH+l4HMweFgoFlUolA6lOp2NFyDzv3d2d/vjHP+o3v/mNvvzySwNsZCivX782oMSG+v2+HXMPBgN1Oh0tl0tbn0ajYQwPtgfLR3B48OCBzQJ58uSJPSdH1pLMXmGgYGRhn7LZrLrdrumc3doCl1lhj93CUQIN9gDISdLd3Z2twfPnzxWNRlWpVJRIJNTpdNRqtXQ4HE504gRpjsgJyjDOv/3tb027TtJEQiHJWhgT9CVZ8A6Hw/Y7sFnIJjimz2Qy5lcAe71et/bN2MdqtTLJALZHvFgul+r3+1YUvNlsrLUvnc9CoZCy2awOh4OGw6HtCwkGdk5Sg4wLJjcYDKpUKpnEis+jyxCMF4Mms9msFouFBeX1em3zWx4+fKhisWgF2+Hw/dwb9OHMOsF+AVYK+avVqtVPADj8Hfey3+8tUSBOcRoRCoUsASVuwOzDhhJj3RcAN04Ta/b7vcVUvh97cut5/Ou7Xz5G+hjpY6SPkT5G/vVj5PteuH5wW/h//ud//gJmCLYJ+QNGwgPAAnFDGBsMWjR6bNkJ+FDsBgPEIgEgvFUTeFh0dKxuMSZHjEgGKFaEDaQ7Ed8PAwggBYNBK9bDYIvFonq9nv28e+RZqVTsvpPJpBXPcizNBhKoaIf69u1bewvn6BLWiKPOfD6vZDJpcy5wmslkYqwdAxxh3dbr4wyEXC5ngAZbxps6xZAECXTPyEu4JwovGZRI8XC/3zdGFrlLLBazvY7FYqrVaur1eioUCiYRmc1mur29tXa9AD5A0uv1jAl7+/atJpOJqtWq1RtIR30zUopSqaRgMKhXr16pWq3acEG3OLjX69l3wWLA1MxmM5VKJQUCAdNrk6zA5mI/HMFjw8wbQR7z8OFD0/26x89Ie/hu7GS9XqvVaqler1t9QrVatWQkHj/OB7m5udHjx4/NhziWR28N2+zKAHK5nBWOAmywPayL53kGbvv9UVdNITEBB3BCboRN81ndblcPHjyQJBtEuN0eZ7DwGdwXjBt+BwATwAKBgHq9nu2PJJOGUDy9XC5VLBa12WzU6XQUCoVMvkDB/n6/t2R0PB7r5ubGOo4Nh0OrNUGaFY/HjRFloKUruUGzHQgEVKlUbO1IEFhfmHsSJuJkv983xpj1pmsaEg4SSGIeiZ57weK5OnUSHb5rvV5rOp2ajXIRZ93EFaCCtUMqAYhGo1ENh0O/Lfz3uHyM9DHSx0gfI32M/Dgwcjab6R/+4R/+cm3heRskyAMiksxw5vP5CRtDJxiOCZEi8MC0rFyv19YyluM83kgJWvw/jEMgEDCjhQkBqPgcAgJs2HK5tOCCM2YyGdPLw5TlcjmNx2OlUinN53MbsohsAucCWAjYsAeAKsWSfE84fJyajuFtt1vrIIXhusxPIBBQJpMxR8GIYSX5udVqpVKppEqlYo6Ko3Hf1AOwvrPZzJwKTTaOzZ6yti6Qo2snOHIET0BCu879ZrNZXV5eWgBfLBYGkNjN69evtVqt9PnnnxtDd3FxYXMX2Lvb21s9fvzYjpTb7bYFT5ye4BOJRGw9WFMKWHO5nDkytomuF5aL+3edEkerVqtmB/l83hgemMv5fH7SOQs2GJ8gWLx580b1et2SlUKhoNvbWz148ED7/V6PHz82CY3bkpc9XK1Wpo3P5/OaTqc6Pz9XsVg0hon9dBM4/s6Vebi1DQwOdf0e26OF62Qy0eXlpTG0xAJm4rCe+CX2iG25TDr31O12rbA4nU6bFInCZfYjl8uZHAgWkRjQbDb18OFDpdNpbTYbvXnzRpVKRbvd/awW1uJwOFgyRUyIRCJWcA9zhoQJ1g0JBBIY1iYYDFqNhisNYV3c9XQ1627djxvbwuGwgQTSM0nmyyQugAPJgRu3+Xn+/W7cxjZgDmEJ/ev7XT5G+hjpY6SPkT5GfhwYye992/VB0JM3TBaem3GP+QjgBFuK9Ha7nRXcukd9kchx2rOrWXWDDCyKy4Kx4Dgli8kxH6wibB2bSWAjSBNMOCbebo+D7DiShtXBURliR8CFmQgGj8Wdq9VxDsmjR49szgbaUTTx3CubSkCSZM8DIANQ2+3WukLxdh0IBKxdrOcdC1/fvn2rBw8emA6WwIORoz+FLeTK5XInmlrWDTnCaDQy599utydT3rm3bDarQCBgz1goFMzJ4vG4sWgwVOFw2O6RvXvy5Inp5hOJhB49eqS7uzuTCwSDx4GJDx480HJ5bCs6HA4toHW7XWNTSVqY8/GuHUwmE2NxaZuLLXH0DYuLvh8ZCnrqcrlsIM46Y3d8JlILkiKCeDQa1SeffKK7uzt1u12lUin1+31lMhldX1+rXC7bsEekOlwkH/gJdR6BwFF3z94Fg0EDmnq9brUUHMWjJ+d+KJongeA7XY0zUgqkSthpKpVSNBrVYDAwzT26evzSZcyJGdFo1DqNSbJZLBStEySRQ8GeE9j5vVgspkwmo0ajYaz7xcWF1YpEIhGVSiXtdjuzZ/aWPcfG6US2WCyUSqUMQPB1koybmxur5eDEgvqNVCplQCkdmUGAOJvNWjyTdAJwJMwuW++ydqwdYMMaYgf4L/EFwHHjJ/fE87MXrmaf+O5f3+/yMdLHSB8jfYz0MfLjxsgf/MLFmy/FuhiRq3XkCDYajdoxniuZIGiwKPTUR7LAXAlXB4sju0eUaH4xBJgT/hzWiU0BKA6HY4EfzBqAxsRpwIS39VKpZAZBMCHQ4mQUPsM6FItFhcNhm9MhycCUlpq5XM6misdiMQ0GA+12O2P3YKJgVjg6Zy4ExhsOH4cF0vEmHA6r3+/r7OzM2Az3HwB+MploOp1aAIW9oO1qLpczw0NbDxsIQ1YoFKzLzXA41OPHjxWJRDQajexnKYamDfCTJ0+sePXzzz/X9fW1ksmkcrmcptOpHj58qPF4rFAoZAMMafXqeZ7q9bpKpZIFl2g0qmq1agWeFKzyHARbikM5UgeAYdOQACA7oV4BRnK73ZoOOxAIWJIBm7RcLjUYDIyxYVgofgPzDBPLdx8OB52dnen6+lqed9RIw/7M53OTZDCTo9PpmJRiNBqp2+0qFotZTcdisTA5hSSzI0Ajk8kY2AMaLjP3rm4Z4GQN8PtsNqvpdHpSnIwUYbfbWTIGW8X6IgUgEOKf+Lakk/sKh8NWKwIAwvzCMNGta7FYWCvaWq1mf05RPc8BULpMmSR7PphxAFaSyTxYN2wM5hTZBD7JAFfpvo01CSWMPl3piFWsUzh87CBH+2c3eXelMfw5iSg/y17u93v7WWKXeyqCjfDZLsuKj/vX97t8jPQx0sdIHyN9jPw4MJLP+rbrg3QpdL8ENoW3a27UNUYMheD77tEzGl7YBowMRos3dL6TwDObzYxJwXgxBhdwZrOZdXJZLBZ2xOgeQ8LA4fTxeNy60CQSCbtP2Ae6PAF4HFmzJjwjjsWGYyQ4AfeD/pk1gZVx1y+bzRqziTwCow6FQvZG/+jRIyvm5b5c5hMn4ngbYAgEjm12YfHco9X9/r5lKMAGk8ge4Hi5XM7WdLlc6uLiwpyWAB8OH7t3lUolC7DMeyFgV6tV7XY7K859/Pixut2uDofjnJbb21tby3q9bowmgAKLAhs3Go2MpYU9BvA2m43y+bw6nc4JuwH7hT1TKAlTBnBms1nTVVM8i6SiVCrZkD6Cged5Nn9mNpup3W7b581mM5u+3mw2VSwWzc5ms5kFZ7p/UZvA39O6eLe77/wFs8z6YfvJZNLYWen+6H65XFonMBds3OJwPmO3OxauU4PA/JL1em2BmCBF8oZPFAoFs0lqUmC+6JbEfpJMcY8kUZvNxrqhAbDpdFr7/d5kTtgm6+V288J/8Ed8gZbFPONisTjpEockoVKpqNlsWpKaSqWMPUOPD4tNkTvAxmexhtR/uFIaEm9siviIHfEzLhPuJkLEJOIIn8Was6b8mSSLi/71/S8fI32M9DHSx0gfIz8OjPyLvnBJMgcDSAg2sFscv7lGxAOhez0cDtZN53A46lX7/b5tEuyZ+zvuRk+nUzu25IFhwlzWC5aEt2V05fwZb+7MgOBNmvkOrp4bcEMXT0AE8PgZlwEolUq6vb215CWRSKhQKJhGmzdsjindDeVtH8Mh2PJ7sVjMGCscEkODvXElJRScxmIxCwoAYDQatTXgGPjdAmQCAJptjtI5gub7AExAPRwOG/NSKBQ0n89Nvw4QYAvIJJj9IUnZbFatVsskKmi8AfbtdmvOw7OwZgRCfg8dOjYVDAYt8GQyGY3HY9Oxw6RQcI7zcSQdCh27+DQaDR0O961MAVwGKbIn7O1+v7cBk+Vy2Zwfv+K+YC7b7bZ1YkKGNBwOFYkcuz3Bfnc6HSWTSXU6HQ2HQysEBjAImuxrMBg0n2K9x+PxSfBC7sAzuBIatz3scDhUOp22zkSe55nch8Jb/HqxWBj7jyyAYIp0CUaW1raTycQA0rWLRCKhZrNpAMxaUHwMA41kBBCnuxbxhvqBYDBorZ2TyaTJNfA3l2WMRI7thEkCSAaxEfacxBdgicfj1vlrt9vZf2OrxA4SWz7H/fP1em1xydWhS/fzQqjBIjbzZ4FAwDpDkRiwptg934st+Nf3u3yM9DHSx0gfI32M/OvHyPe9cH0QfQhO5n7Ruw/Eg8PMSDpxFI7n+DlXowxjRsCnUJe3UTrvwKLAirHgyAjePYKdz+fWmYR/cx/uM8DAdLtdTSYTjcfjk4JenB8QZSAerMR0OtXr169Ng8q8AyQEGANHuGy4qxElWLE26IlHo5Hp3zl25r7QvgJsvJmzLslkUtls1pgtlzVdLBbmXLPZTOv1WuPx2D5TklKplHWDYs1geHAQiqPRYcN4hMNhNZtNW7PxeGyAFI1Gjd2CFYMR9jxPhULBmMtisah0Oq1CoWDthRmoR9Eo9oWdwMIA9jBhMDfj8dgCjOd5VsTM72O/2AtMLrKMwWCgdruteDxuXalWq5WxQ/P53PTn7lG7JCuMrtfrNr9Ckq1BpVLRer1Wu9024KM9L34IqwXoP3/+3ArfCXqu3d/d3anX62k4HJoPdbtd27f1em2gOBgMNBgMNJlMrM0vSUg0GtXt7a3C4bAGg4FevXqlw+FggwyRqJRKJQNs7v9wOFhb6dlsZkNC+V4Y0Gg0qmazqbdv3+rVq1dW5+EmD7SjHg6HyufzOjs7Mzvm1ACfpqg/k8mYj8B6Afx8Bzr4fD6vfD5vXbrYQ2obAD9YYDTqJG+73c5aI0ejUZXLZR0OB3W7XWu7jr279sWeujUDsIr4DYBCnDgcDuZP77LZ7CtJJ8mh2+mOuE489l+4/u8uHyN9jPQx0sdIHyP/+jHyL3rCxRsmgRqnIxjzxs8N8cbPGygPwX+/q1clEAD2y+XSNp6j7Wg0akwfwRhHx8iQITBTBMaJYM4RMWCEgcDIUTiKAaCFdov1XM0nrAMbzZC2dDptXWxwbJ4/nU6bRpz1IqC6R50c249GI9MZ45DNZtMmvtPqdLPZmOY9mUxqOp1akWkweGwzCkAGg8cWoHSxicVi5vh8P+DstiJ+t2AYJyAAwKy+ffvWpC8EONZkOp0aS8D3hcNhY3dZSyaSA5zuMS5MW7lcNrYIBpF7ctmLzeZ+/gLdrpiTw33gyIA7rXZJNkh2YGxgvRqNhrFnFMvu93sbxsl30TIVG4Q5CYfDKhQKarfb2mw2Oj8/P0m8kGVwzI40ab/fazQaqVwuq9/vKxQK2RwZwLbZbFobYXeNUqmUBbVyuWz2ie1TvOt5nrG+u93O5mj0+337vdvbW7VaLT179swAYr1eK5PJKJ/PGyjTHQ0/3O/3Zrcwii4zSltbEgZXysB9dTodBYPHbm/4PH4WjUbV6/Us4OdyOUtssHGYUVrfEteoWcE2iBWuFIQGAel0+n+Tl+CvnERQmN7r9VSr1SyZI2ZNJhNjPLEv9gzJC4wj+0TSTSwKhUKW1L0rC+TesRuXpWZ/2BPivX99v8vHSB8jfYz0MdLHyI8DI993vfeEy/O8/8fzvPz7fma/P3YkYcFoV4mDuw53OBwsKKNlhfUgGPCG6X7m4XCwVq58Jp+PNCEQOOqZCW4AEzpkV8bAIsPk8LYP4+YenWMgbjEpYMKbOsC42WyMIYhEjlPu0fym02m1Wi0Dxn6/b2DBIL/ZbKbZbKZkMmnGNpvNjPUiONMJhsFxu91O7XZbjUZD3W5X3W5X19fXpp3lszudzomWHJkHhY0EsWq1qnK5bC1VeWuvVCoWWAki7Bfg1m631W63bTjgbncseGaIZ6VSseLafD6v29tbYw32+73u7u6siJQOXBStkmRst1trIYyjE6Slez0/LBaFswAdawmwtFottVotTSYT6w602x3npnBsDdN0eXlp0g5+f7lc2hH54XBQuVy2YudkMmmsKNKaSqVimnFAMp1Oq1arWceq/X6vi4sLPX78WOfn56pUKicBjY5E+/3e2FJJFvSLxaIViVOkzZ6TgAEuaMIB6O12azUYfCY2AIvbarV0fX190vJ1vV7r/PzcvrNcLuvt27eaTqfWSrjf76vX66nVakmSDV+NRqMmw8jn81agjy/CTr169UqhUMja8bqDTQeDgTGEJILITtrttrFt2C9MNs/AaQJ1G7CfJDgEd9YDWwfw9vu96vW6AoGAxuOxtb+FdXaLiWHQMpmMdrtjpypYaUnWfcpN1JF8uXGPpI54QILrgg72hw2yPtj5crm0RGu9XpvPSzpJellz/7q/fIz0MdLHSB8jfYz0MRJ/et/13hOuw+Hwr+/9bd33tKc7EW+VkuwYjrdAbp5jRBg1Jkxzwcpw/AnjR/BmkTzPM+Pg7wEtz/OMwXK147ydwhQBIgRTmBWOHl0JBYWJGBKBWtKJVnU+n5vBrdfHOQClUklXV1cqFovW2ejhw4cnOlyONdfrtc2noJ0sbMB0Oj2ZU4DGdLM5Dn4EiDzPM20+WnxYA/TwTFhfr9em9eW/WXdmUpAocNQMaI/HY9tvz/Ms0BJsN5v7YmX2hjUtl8u6ublRr9ezugHYXJzBddR0Oq1er6flcqmbmxvF43E9evTI5qDAwlEUCjjCPhBoXT0vbA8A7TIzsVjM/h7bpRaA74JthkHGucfjsQXup0+fmrNSGNtoNMzmG42GcrmcST/S6bSxTcFg0MBwtVpZcTXf5xZWkwyR+Nbt/QAAIABJREFUmJG0cXyOPGC325m+GzshyWLfh8OhBoOBfbe7JoVCQev12n6mWCyqUqloMBjo4uLC/JUC3bu7O11cXNhwyGj02Gr4D3/4g4rFop49e6ZMJmPPg7/SySkQCNhsH4r48RcAA501oAN4zudzPXz40Nb57du3pjGn1oD4BaMXi8VMakNChQQiEolYu1zWAECkaQBsPnbOiQBJz3K5NB+NRCJ2mgDzhv/g2yTHxAr8ii5Pu93OGE4SAvdEYLVamd279SvENUAKlo9rOp3aOhLfkDH51/HyMdLHSB8jfYz0MdLHSOKbG8vfvT7I4GOOot2gS4Bx3zwxFAIEjup5nh1DslmBQMBmSEQiEXvr5aEwfCQK7uLBRrHI5XLZdJ9ckUjEgls4fD+53ZVLzGYzA6H1em3tQzF0jpQTiYTp1jmWhllhM0ejkarVqiTZ/XBE/uTJE2NS3La+n3zyiW00z00w8zzPtKVID2j7SvDs9XpmJKvVyroT4Tx8P6wOnZkw3Gw2a0fUaL7L5bKk46R09o8AB5uITSARKRQKBsTpdNqOdefzuer1ujqdjh0tsxeS7PnYA9qHRiIRffrpp6b7pV0wrAOSGfZ5OByazMDV4NNVBsY0l8vZPBKY4lQqZVKO1WplhawkH8yFIKEBzN3vm81m+uSTT3R1daVyuWxH4DgqzMh6vdbd3Z3JXQaDgVKplMlXMpmMIpGILi8vzdaxWWQjnudpsVjo9evXFtgBi+l0qnw+r0KhoOFwaDbktktGHuPWMsBsAX749KNHjzSZTE5kFbFYTN1uV9VqVZ7n6W//9m/V6XS02Wws6FIoXqlU1Ov1jGEjMJPI4W8kIjDnsLqe55l/EXN2u2PtSTabtUL0ly9f6uzszFrfArB0PXJlFjQi2O/31goYVhGb4J7x5+FwaEkgiRDACyiRrEwmE4tZ2+1W7XbbupIdDgdNp1Nra0stSygUOplrA1MJk86+UQwP402xNWtDYs+zoafnM/lvmDvWmPhOgudf3+/yMdLHSB8jfYz0MfLjwEhe5r/t+iBdCmG86GSEBtI9UoaBgvFwdZC8pWKwqVTK2Jh3u4FwJI2MgqI2mDc0rbCJ2WzW2DTkB6FQyIyQY3OYHBwIFgpAgt3liFKSKpWKSSFgRXhOCm35u36/b8PjqtWqvv76azuaXK2O7UEbjYZ6vZ4F3FwuZzINtOtuUPW8YxEr8zc8z7Mj2OVyaVph5BLIOSiopggUMHADL8fJlUrFGLh2u61IJKJ6vS7P82yoIFpavpvgEIvFlEqlbJDdb3/7W3322WfGlsFAZrNZ3d3d6cmTJyfdlNhz9t/zPBWLRSsG51gYYIrH41ZsigxkuVxa8MYmAZrpdKp2u62bmxv98pe/tJklsMAwrwRpjpphZUhwYJDRES+XS5vUHgqFNB6Pbd1JBGgjTNAIh8PGCj1//lw3Nzf66U9/qkKhoPF4bHY+n89VLBZtvdGwU5iODIJ1xa7H47EKhYLOz8/V6XTU6/VUqVQkyeQXgDOASHtcGB8X3JPJpMrlsqLRqB4+fKh2u610Oq1IJGKSgn6/r2w2q1wup1arZZ22JpOJsZJIM66urtRut5XL5czX6NRENyV09t1u1zTpweCxMxXzN6LRqA1nZLjkmzdvrNXw73//e+33exWLRQNpbAS9PgkVMSiVShmweZ6nVqul7XarZ8+e2doCpJvNRhcXF+r1emYnMJ+cFAB4sVjMZg553rHrW6fTsecCKCKRiNU/UNPgAkSxWDSfo6kAzBsnFdg/LaSl++J9mMx3E3XAAw27y+z51/e7fIz0MdLHSB8jfYz8uDEy+MUXX/wgIPmf//N/fpHNZk2zzc3glAR8juhg76R7LTrHnAQQgg+BZbPZWICloA8A4Ahcui/oRrOMMS2XSws2LgPBMTfFjHQNApBKpZJ6vZ7pVgmykuz7OQJ+/fq1gR3H6zCBg8FAkuwNP5lM6urqyob1MWjx+vpauVxO2+3WWJV8Pn/CMMLGuVIHgux2uzUmiEJU960cXTqBo9Pp2GRygJLCYOnYOjUYDOrm5kZPnjwxXThH9AyhWyyOA/VCoZAqlYoxXXzXYDBQJpPR2dmZBdJYLGYFl/v93oqgAXlJZrzL5dLWDoYFEIapA7DRepMEkMDQfhVQIFjChsEuxuNxY6BGo5FSqZQNfzwcDtYVSJK1QgWEYEvoWEViEI1Gja3Z7/fK5/MnxZp0z5pOp5pOp6Ztv7q60uPHj/Xo0SM71icoDIdDk73k83lrw5vP57Xb3Q/i5F7xC2Zf3N3dqVKpKBwO2xo+ePDAkjWSQORMMI0Ex1Qqpclkojdv3phEZTab2fDSly9fGqDT1hVmut/vazgcKhaLqVKpKBAIWHEzzB12AEA/ffpUv/vd77TZbFStVtVoNCTJWDfuebvdajKZWE0DcafdbpukQpIxXwRufJ8YwfwcOrkRq7CdWOzYXppWwjxjNptVu93WdDo1aUQ6nbZ7GY/HBtjhcNhkF0iKZrOZFeuToIRCx2Jo2Dm3W1ogENBgMLDYg5Zcui9UR7KRTCYtQQDQiKHIuwCWdyUVyNOCwaB6vd7dF1988b9+EHB8RJePkT5G+hjpY6SPkR8HRo7HY/3qV7/6H9+GBT+4LfzhcDh50yVgccRJUZurBWZT3SC52+3sv3FOz/NM84kRcJROYIVRkGRAxlGiexQIA8S94RCAWzKZVLVaNbaJAEfwpJCUxQZM9vu9bm9v7TNgcjzPM/mCe1xK8ShOt1qtVKvVrEA2Eomc/D2a1FAoZB1vRqORHdFyX51OR7e3t3r+/LlevXplhsYRLgAOG4oEgOJAGLFCoaBCoWBv/ePxWE+fPtV2u7WWsjjBdDrVcDi0gIWDhcNhc0aKKBuNhklpYOdgIQmo3W7XNMqhUEjFYtF0+WiKh8OhDahcLBYaj8fmkLC4SGP4O+yTI2c3eSSQZjIZk8nwd0+ePJHnedaeFgYDVhd7x14Gg4Fms+PAvv1+byBxcXGhm5sbffPNN5rNZnrx4oWi0ajOzs4Ui8WsjSxgmEwm9cknn+ji4kL/+q//qkajYQW+1EdQnEkg3Ww2VpCNzIfiXoIHXYkAzdvbW/sst7gdvyYI7nY7A1GSH5KHhw8f6u7u7iTZ+vrrrw1cZ7OZEomEnj9/rs1mo1arZb7BfWy3W5XLZbNF/JlEo1qtmm38/ve/N4aZpBVWHr+DMUS/PZlMVCgULMGJxWJmI7RlJumDzefkYLfbaTAYqNlsqt/vmx3CnkYiEWWzWdVqNfM3WGWYTuyckwn+Qct+OBzU7/dt/fh54qckDQYDq1XZ7XYGaDCK7BuF6fgUrGK5XLaTFex/tzvODeJz+c535W7EXpex9q/vfvkY6WOkj5E+RvoY+XFg5F+0hku67/rC0ZsLHrAWvIWiY+emeWPE+Hlr5I0UxoUjVj4HDTXfEQgcOxPxlprL5RQOh3V3d2eyCY6QYTVIIJB1sHEUPHK8S/A7HA7K5/NWaIiDAqiwRbB2vBlXq1XT43Kcmc/nFQ6HrasRhYcEs/F4bEPp0OhzpMrGw4xkMhmVSiUtl0szoO12q1qtZsaPXAQ9NvuD/pfgN51OjQGA+Wu321ZAOhgM9PjxYws4tK0NhULGkvI58/ncjr73+2Ono4cPH9qxbCwWs4F/FCuu12tls1kLBDwPXbQAF/Syd3d32mw21jY0Go2arCOfz2u1WhnYwKoC6gSe8/NzjcdjPX/+/IRZ7nQ6JvtgOCDMCQGaGRuwM6lUSg8ePDAdNfrgUqlkgbXZbOrf/u3f9LOf/Uyed1+QiywGEOYzqdPALqfTqcLhsM1lga0ZDAamPw8Ggyc+lUwmrX4gkUjoRz/6kRWJwzh2Oh3tdjvV63VLxlirRCJhQR4GKBqNajAYKBQK6de//rUeP35sdRrVatXW2/OOc2Emk4kxprBz8/lc19fXxtjhj9lsVtfX16avv7q6Ur1et9oQai6kI5MZDoftmYkDaPsTiYTq9bpev36tfD6vwWCgxWJhDCTJoXSUjvz4xz9WoVBQv9+3tQbIYfnv7u6MKd9sNiZ7qFarVt9AB6bdbqdms2mBnXWB5aaexk2uOSUgYSUGEof4N/U9xA7iB7EM/yTJgyXH5wFSt+sSSdJ0OrXkl5/hvvzr+10+RvoY6WOkj5E+Rv71YyQ/823XB5nDRcDgWC4Wi9n0eY7aCNiwJxgDUgekC5IMgHAA9OQc6fEPQRwACYeP8yiYq7BcLm1DeTPt9Xr21g+bRGHgcrm04lWOs91j2P1+f1L0zNs7x4swIBTuuV1t3j2WvLi4MFC7uLjQv//7v2u1WllBM0MsAZD1+n7OCDr1/X5vxdOSVK/X1Wq1rM0qb/XB4Gm3JQb2cdQeCAT09u3bk6nn6OwpcsW4nz59am1zKax1nQB7gNWAOUPWMZ1OVa/X1e/3NRgM5HmezTZJJBJqtVrGMMEARaNRG2IJ24IcIpfLqd1uK5lMajKZmGyCpIUagsFgoNXq2Po0lUrZXI9er6d2u20M3WeffaZgMGiT5T3Ps1ku6PhXq9XJ2tKulO+BxYOJwdmDwaAajYZ+8pOf6O7uzoIhGm2m25MU7Pd7PXjwQMlk0iauS7Li9P3+fp7PZrNRrVZTNBq1QIJWHFa5XC4rm82aXv5wOBj7gzaeI31kF/gHjDs2jN8lEgn1+301m01J0ueff66f/OQnljBkMhl99dVXKpVKNtTwm2++UTgc1qNHjwx0uCcCO3pxWH008rPZTN98843K5bIxmAAmwI8fEleKxaIVx4/HY9VqNdP8c4qw2WxMYjIcDhWNRq0AmORlPB6bnzPUdTKZ2IkCa0XdxrtJNWxqLpfT3d2dFd0DaPiNm3ijLSfmsO6w8nRucllp4iCSGJJ94ibSNr6HWg9+hueGgST2EO/86/tdPkb6GOljpI+RPkZ+HBj5vusHSwo5WvY8z47i3bc8dOD8DBvH2yZ6cz5H0okRu5vHkSALiWGj5YSlIcjBAFGkOZ/PrcCT7xkOh+p0OtYJRZKxJ5vNxtqCZrNZ1et123CCOUwBz7NarexeYPgwhMlkok6no/1+r0qlYg4wmUxMmwvrhVPzxg/T1uv1jCU8Pz+3CfZoqWHheINnQCGdX5BfcIyLhKXRaJiBX11dWYLAETLyCSQKjUbDWBCSCFisfD5vx7MEYuk4p0WSbm5uzHhxBIALRpSBjBRFo89tNBo6HA5mYxw7JxIJnZ+fG5NYKBTkeZ46nY4xp2dnZ2Zr7DWdp3K5nGq1mkKhkL7++msFg0Fls1mdnZ3ps88+U71eN6kKe5pIJGwuyH5/310H3TyF1KvVSs1m0wD4yy+/VCKRUC6XUy6XUzKZVKvV0ng8tn1BonN3d2f6aBIjCmlhv7D5/X5vTBzMUDx+HEBZKpVMbhMKhUyXXalUtF6v9fz5cxUKBZs4T41Du90+Bor/DlaAvCRruxsMBnV5eal2u61/+Zd/sQn3HOfX63X7bqQciURCpVLJfODx48f6m7/5G6XTadXrdXv2aDSq29tbC/4wW26coeifpI6EjTav7XZbf/7zn3U4HGyPaIMLe43cI5PJaDqdWtcotOckidQukHym02kr/id5giWPRqPGtrvyCmITe5DL5UyCQhxBakO8m06nJ3NHJFkyACCgq280GhoOh8YaYvPSfftn7hOgIw5QxM9LABKi97F2/vX+y8dIHyN9jPQx0sdIHyM/2AlXMBg0J4L1cIvveIt1tcncKIvBQ2G4bvEuoEJg4884ygSYDofjXIxOp2PBEqaKAMCCUizKUbw7eHG5XGo2m+nBgwfGGEmy9rY4/Wg0MvaAdeA50Mi6bV5dfTaFrIFAwAoFw+GwyuWyyUGm0+lJEd9yudRgMLDPCwQCmkwmSqfTWi6X1sUI1qLValnBIUxbLBYz9ovjXrdAlb3B+GDw0EDjrNzXfr8/GWrYbDY1HA6PBvbfgX2z2VgQcOsY+v2+ra/neapWq3aED2u63W7VbDZNtjAYDJTNZs1h0DnTqQeZAAH9j3/8o6bTqeLx40BFnItjY4Kly/IiX+G4mmBCUEYeQ2K02WxMUuAWkx4OB3W7XWuN3Ol0DFgKhYJSqZQxne12W51Ox7obkTjc3NyY3VFbQeEzviDJpDEEM9gsSZrNZrq7uzMQ2+12evPmjdknrHUymbQaAHTgJE8AGAwR94OfXl5eqtvtajqdKho9DmlcrY7tnj3Ps3a+l5eXWq1Wmk6nxn4vl0uzn1AoZH5HYwDXJvP5vMUQCvb5ffw+lUpZkH/79q22262q1ar+8Ic/aLPZqFgsnszb2O/3J4nmZDLR+fm5FQTPZjNjRymC3+12KpVK9juw++1222LTfr+3mgsKywE0/JAkajQaWR0A3aQoFEerjnQKho3ECt0980roNMf6kVRvt8chtzw7iR41KzDSyL6IbawDP+9f3/3yMdLHSB8jfYyUfIz8GDDyfdcHqeHiS3BQ3ghZJAyc4BIIBEwOsFqtjFHY7Xb2MOFw2Ipv+W/3LRtAcHXlHLPzpo+js4iuwy2XS1sggkcoFLKZAKHQsZsQDkFgYvFxLrT2gCBvwEgMUqmUMQ2BQEC1Ws2YhlQqpXw+r2Qyqd1uZ//meZF7cPQsySQls9nMDIfp4Mgn0Oh2u10r1sSxKHLkODkUCtkxPEHx8vJSh8OxSBEGUJKxVgAH7AU1CbANBM1o9Dg/g9/nKJsOM7TxJUgRVFxmFmYOGU4+n9ft7a1Go5Gi0ai1a+VYHk0++4tdwPBSd5DNZq3D18XFhUajkUlXarWa3rx5Y8EKFgZGhWNvAhhBz2WSO52OdfDC/nq9nsLhsD799FMLghzPJxIJPX361PYWrXMwGFS/31er1TL2mIJU5r7Qkpk14sh7s9kYaxaJRFSr1SwRoXNQMpm0RKHVahkDBYv89OlTA0UKskm8+v2+fT5FvaVSSf/1X/9l9jEajRSJRDSbzZTL5YytC4WOhfv1et0+CyChwL5arSoej2uxWGgwGFjw5DsLhYLy+bzVtzArhiRnu90aq1wqlYz9kmRsG8Ga76aTEknafn+c64OPE3uQAFFITMJGPKC1LHU7yC+4LxgxZg1xWgFDCfOPf5GQcxLiyhiQs5Agk3CQkGazWVs/Bt2iu8duKIJ2mUFiO/UYoVDIfMq/vt/lY6SPkT5G+hjpY+RfP0by9992fZATLt4q2SSYEP4M5g59JIwJgYO3WAoPcWICExvE273LknEsytC7w+FgWmTYCIIkcgRYQ1eu4Wp9ATAK4tgsdO/ICNiwRCJhszzQWS8WC2sJu9/vTWcNi4YGnTajmUxG2+1xJol0f0TKPaLzhiljMB+OeXl5qdFoZEzeer1Ws9m0YIxWni40sEEEcoIUmmIYJgwZoz8cjp1pYKMCgePwTQAUx6BVLAAN+wGgjUYjnZ2dmePDlNEJCcmK286T+6BYEeaLdqaunIHfoWMRjMd2u1W9XreCW4JmJBJRsVjUZrPRgwcP1Gw2jYWRZCxMoVCwI20CFfdBAML22JtQKKTXr1+btOLt27cnbCmJ1bssIFKLRCJhE+Q3m41evXqlUql0wnZjM9igK7NxWZhKpWIBGOkLzwcTtFgs1G63VavVTGLAz2+3x05c2CU+2u/31W639fjxY52fnyudTms0Gllig89yP8QH7jkajVpxNbNFrq6uVKlUjM3r9/uWZPR6PR0OB9VqNUlSrVazfYnH4xoMBjaPJJFIWIcvpAkwkm/evFE2m1WxWFS/3zdW2WVF8WnABYCEwQfgAoGAhsOh1U8gecE20NMDfvF43GIOCRMsNv/gT8RRZD40X8AfYeJisePcEp4nHA4rHL6fjUMtD0lRPB43aRUxFvmTW4fBiQzf71/f/fIx0sdIHyN9jPQx8uPASPzh264PUsNFIOItj0DNYrrAggPwdzAN7s+9+2D8g76cQEMHGd6+g8GgsW3odnFuSabl5l54A+cIkns7HI6Fh7ztMynd8zzl83nlcjl7FhgqW9DAsei42+2atANd6na71VdffaVWq2XH+64Ug6JRGAKOL5fL49C529tbAxACZiAQ0NXVlX338+fPlU6nbcYBXW24FxfYmVjuHlW7E8rRESONuLy8NEeHoSBAbLdbk3yw7hwLS1I6nVa73T5xTNepAFGcEGYVJgKpBjM9CoWC3rx5o1AoZAEwl8sZe0H7YHevJVnNAsfFTFePRCLGFL148ULPnj3T5eWlyVAAdhIDEhfsHemEW4hMUJrP5yqVSpJk0+sPh2M3MJcVxG8AEzplrddrVatV2/disWjsMfIkEiAYILcWgVoJmF/+PhQKGfMLw0lCw6yYVqtla47siOQmmUzaUTqsNf8dDoctIQyHwycFxiSV4/FY/X7fBnW6x/ee55mNuENCAaBAIGDrAXtIEotP0m6ZTlyu3hvJE/YM8HFPweCxhXWpVDIwdBNiQPxd9gumj//nZ4g13Cf7iu8TC5H+cILBfCJkL9S+AFIk5q5Mq1gsmnSDgn8SaNaLpB4fc++JuILvAWqbzcZiqX9998vHSB8jfYz0MdLHyI8DI3kJ/bbrg7xwsYnIHfhC/g1A4IiwbjgVhs1/EwgJUgROWB6OIWElmLSNk7iTqQOBgKrVqg1LpK8/hlSpVJTJZOR5ni2W5913ZsKo0JYHg0F1Oh3FYsehcblcTtls1qQNSAxgzdhwt/CQ2R3o1jkKpcvOfD43Zgd2jTVhiCSsByB8d3enTCZjPzscDm29cKTNZmN/B1DCCjI0sNfrabc7dpshSCcSCUmyOSHFYlGRSMQKU3Fed4/cAlAYJZhSjoRhFJFuUDuw2+3MoWjDyjPA3nH8fHNzo1KppEajYYCfzWaVTqdtXQlUrEW325XneRakisWirQM2eXNzo0qlosvLS+uuROAZDocmqSFpcgMLGuT9fm8ynFKpZGtar9dVKpWMTUTzHA6HLahjj/F43KQAFIgDQHw3687zI/3i+VzGm5+DmSHgMN2e7+YfEjbpXurEvnIc3+l07OdXq5XK5bL2+71JdmD9VquVsYXITAhqDNmU7ot0SeI2m43+9Kc/qdPp2Pq60ixXhgMzDlgRGNGH48f4OpKM8XhsQZfGBujVSQj57mQyacXtbn0GySxtsd3ACxDCMBLsAQKSOmQ7ADKgSYIFM4utuuDssrlIOJBqEWPd1tokuLCAgCX3x37znNiAf32/y8dIHyN9jPQx0sfIjwMj/3+RFPIgrhxCuj+G5c/ct0dXC8nDusGGQOvq3/kMlzU8HA6aTqc2ZwMJw36/V6FQ0MXFhTqdjrXP5egyFDoWusLMsHmwHGwe+txisajD4dgCk85MkkxLyr2xaQASQQZZRyAQULvdVrVatful4NTt2gSDQytOjt5xaI6GF4uF/vjHPyoej+v8/Fy9Xs82P5FI2JHocDg8cRruHdYBdgOGDR09OuvZbKYnT55YhxZYD+6XI19AtNVqGXNBkMEmCGiwcoAkzkNAmM/narVaSiQSNv8EGUY6nTb9PZIb7A3nJ3kg8MHg4lTMTqEAGUZuPp/r5cuXikaj1uFnt9vZ2kQiEfvORCJxsnYwhSRIPMdoNLJZI7RhJRjBZKZSKfOB6XRqzBN7jS1lMhlLypArwXYD8tgt6xYOhw1EeIb1em3tfdk/ZsO4DBQSFxgm/BE5QiAQsGcB9BOJhM7Ozkw3DqiTSOAzm82xCBu9PW17KQTGRtwj/0wmY/ZJgTfafNrRAiQwo0gPeE46XsFsbTb3HddIVjgZQG5FLQbsFiCfTqdt8Kekk+fj80kQWCfiWjAYtCJyfAn2lXoaTi+ILyRoxE6e0fOO84nm8/kJY8fv8TvIYN79MzdOE1vR6/Ps/vX9Lh8jfYz0MdLHSB8jPw6MfN/1QehKV+YASMDWvHsRSDBSNka6n/Au6cQYCATu2zBv7+738sb+8OFDO2pMp9OKxWJ2RL/b7UwDPRwONRgMtN0eiwSXy6UtKMaDpjSRSJzM2iBIwHBhONFo1IxuuVwqn89Lup+DMJ/PlcvlTONeLpfNWfhMVzMKCFH06epmAUOOOa+vr/XJJ59oOp1ay1lX24o0wz3uxpnS6bQdMcOcMLsDVo0BiwAPTkEwJKiNRiNrubrb3Q/O3O126na7pu0laXD3HDCB5WWdCJ4wR8hhkBGQSMCIMvsBO8tkMsZ8wLTAkri2w97CLHa7XasHwMlJCGBR6LYEiyTJAhfafKQnsGoEYIJHLBbTcDi0+TCdTkez2Uy9Xk/5fF6NRsNYHoIlXaWwfwJdIpEwDTnF19j0dru1GRqBQMAAwk1oeBYSFrr7AHBo6Hn+s7MzYyvX67W1ZGa+CYkHyQNF6YAdyQSsIywYvh2Px3V5eWn7yXBL9PzIo9DyI6EJBI6FwJ7nWfE3CWs4HD6Rb7APAJhbvwDjh1QDhg/7Xa1WNisE1o/kgIu1J/AjbUG2gx0hi5hMJjo7OzNQ4n7xEf7BB5EFSbK1o1Cd5J1khEJ04i8+wn64QAf4Ay7fFtP96/98+RjpY6SPkT5G+hj514+R77s+yAsXgY8vdt8ocRgWAjaO/+fiLRtmCgcHeHiT5K0Zp93tdhYAu92ums2mzs7O7Lic4+rD4WBOxFst2k9XqsE9SvfAgPNjiOi5cSqMhvs7HI5dZnBQmAtYpv1+b/NAYrGYTX1nPgRv4awVBoCMAqYmGo3agDrP86xgmcCRSqWs3SigQSBEeoEEgPapOGgwGFQsFrNZKNls1oJboVA4YQWWy6UVNxI8ATzadVJcTDABGGCG4vG4zaSAiWDNkHmUy2VNp1NzVtgrd50lWcDkGWFCsS8YCmwNBpfgicYZlgigcTv2pNNp+zmYEQI43ZX4XqQIpVLpxE+wBRIYJAX9ft8YuOFwqHQ6bfYCswxzBYABvjwTReFIFUiKqB8goSJ4sPboufkel82mQxhBzAUVgic2Wup3AAAgAElEQVR6+FqtptFopFevXlliQxAm2cLP+F2SOe4XqQcdtbCRRCJhXatcfT4ADdNNAplMJpXP5y2I0pKXBMcN0nwnbB9BlcLy+XxuPsapBdIVbBH/lY7MHf+NBp19hOGnJkQ6AgHsoStlIB4RC7kAOP6M/efkAIYO4GCtYc+5X+piXDaY9eGe/Ov//vIx0sdIHyN9jPQx8uPGyA8mKeRm+DOCCSwcIENg5MFhUSSZw8N2uG+77zJ+OCH/5q252+3q5cuX+ulPf2oGQIvH6XSq8/NzY9E4hoZZcZkc3tbZSDe4wcrc3d0pm83a38OusPHuBoXDx/aSaKO3262ePHmi/X6vwWCgzWZjgwthOVhLWDhXfrHZbIz9W6/XVtBIpxkYqz//+c9m+O6RMY5dKBQMfGAuACcAi7f39Xpt3ZtwPJ4NwyMo8z2wfQRbPsOVfLDX6Gb5XJw2k8kYu1GpVMwJ3ARgPp8rlUrZXvDdFErCkASDQQuy7hR26X4AKWyJ53knjg2YYxuBQMB0xxydA8qApcsiAtLYCI4LKNDFh45SZ2dn6na7ms1mKpVKdm/7/VH/TvcqSdaGmEBG4JNkQHs4HKxVLglQqVRSu902cDwcDicaaHwBMCBw4rskQSQEdLRqNpvG9rIebqLJehD0YaoJ3m7Cw73OZrOTGgKkJUgD0ITjd3TnikSOM1xarZbm87kBlSuxQcJBfCF4chIRDodNtoBsg/VForBYLEzeAAPvFu5yb3y+C+bcL7bKM2OL/AMYwLZh48Qt4hXSE9aS5+Iz3T9zYymgSDwmBvGcnFL413e/fIz0MdLHSB8jfYz8ODDSPUl79/pgc7hwLv6bm2OjXMbE/TsewH0bJkDxRu8+ND+P0QWDxynUHPFnMhnd3t6qXC7r7OxMnU7HAAsnAcwAGoouYQsJlrS1TaVSFqxCoWObzLu7u5NBidwvaxAKHQtdOXaPxWKmu5ZkRaW1Wk3tdlvD4dCOqXO5nILBoLrdrj0vQdntviQdWZxCoWCshHRkCer1uiaTibEdMIoYI2vCEXI4HNZ8PjcZBIZD5x7WPJ1Oq1Qqqd/vG9Ox2+3sed51RI6uPc9TOp22SeE4EsfD3HswGLSADIvjecciX5d5oHVxPp+3ID+dTk2Kwr7xPQREt0YB4N7v9xY4GSqJxnw+nyuTydj9wLS4rXhXq9WJlCOVShmD1u12NZ/P9eDBA7VaLW23x+GCh8PBWirDlMKu8HzBYNA03bFYzLqNVSoV9Xo99ft967AD4wm49Ho9zedz67603W6tDsOVdaB/BwiYvYJ0hCN2GErkN9j4crm0AnDpfognXdLYP+k+iOHnFNUCcgRjpEzEicXiOMDy9vZWmUzGkn58gGGISJIIukiD8C0XqIgBsHaw8Tzjfr8/6TiFLIHfJ9jjQxQcs1Ywjcx0cb/XPaGAMSd+BQIBK/x2pTj8DicHACZrS9KEhCMej6vb7Ro4STqxDxd8id3uhY8AVO8DEf/6P18+RvoY6WOkj5GSj5EfM0b+4BcuNoKNIYDAMBG4WGRYGVgh2BxXP4p2k0DlHgPDlBAk0JtzRPjgwQO1220LuslkUqPRyDao1WpZQSsBjm4/MGUwb2jC0WN7nqeXL1/qT3/6k4rFommJ+axIJGJyA4IDrBdBOhKJGHN4dXWln/70p7q7uzvpKIVhUASL4fH3sH90bEEmsNkch+795Cc/ked5ev78uYG4yyDAnnH0j1ETqAEQANXzPANsgkuz2bSABOPG3uCoBN9sNqtCoWBrPJlM7GcqlYqtEwYLu8jQQOwjGAzaDBXkF9idGwRLpZLS6bSxJ+w3rBWfDwtGETNFpgQDV64C0wggkVy4bMd2e+y8lclk1Ov1FAgEVKlUNBgMFA6HVavVDOBxeBIc1pJuO9Q3IJVg3fr9vj777DNtNhsrtEVvj61sNhtjYGGekCjNZjMtFgvTeMNAep5nAZ3vRipEkoHUwS3ahe2TZImS689owLELbBC/Ho1GCgQCyuVyluzAugNy+DL2MR6PVS6Xre01n+V2F3KLzOPxuG5ubhQOh00v7zLak8lE9XpduVxO4/HYJAokr9RnwArzHMViUaPRSJJMUkSbX/YUQIK9RN/uAhbxDQAhDhEb8WHiDD9HzKAFNQxkOp02cHXtl3t3k49vSwBZZ2yDuCPdA7h/fffLx0gfI32M9DFS8jHyY8DI910fRFIIQLgsHQtCwOcN0D0uhZHjaBvH5LP2+70NzHOLT1k4jv9pIwvzlslkFIvFLOAw7BCGqt1uW4emWCxmzoDTYZQ4SCwW0+PHj9XpdPTq1SubVM+molHGEGDZ3KF8BFI2ttlsqlarmaYe/THHxhwnHw4HC+wwOAQJWohWq1WbFzKZTPTpp5+q1WqZlGCxOA7pQ0KB8Uqy73DZS9hTHJMhlTCEg8HAZB+AvutoLgOYz+eVTCatmw0OkMlk7JicwIuh0+6TZ2I/IpGIbm9vNRqN9OzZMytgJejheLBG7CtgHA6HbSo5TB8TxpG9uEfTSCRwSNfZsL/xeGzT4ZkJwvE8gydJqBKJhH0PGmn8gt9jzwjag8HAkhaCN2B2OByLY9Eak7jAZsKwIe9hT6bTqa0xxddIUNLptO0f97ndbm04qsuc8XPsHe1x8/m84vG4UqmUhsOh1Y/A5FFz4N4nP0PgLZfLJ4DNoEJYPRht2EhsF+kGDOV2u1Uul1O1WjVJkctGAXjuuvPfMHvEK35uNBpps9moVCoZ0+4GeJJK1ot1gj2LRqNmF/wZfk98A6Q5DUBq40pL3FMS/p/1RXJGAkadDv5O8sLvSDoBmXfjtbue/vX9Lh8jfYz0MdLHSB8jPw6M/ItKCgn6HOuxGGwATsjivMv0EezQ1mIc7mfxHSwqgMVxciAQMAdJp9PGDsbjcb169cr+H1aBzyEwwNLANMDYhULHGR6PHz9Wv9/X9fW1UqmUdeNBykGRcSQSseJSgiLguFwurT2mG8yYBcKAP8CNFpho0mECADzYLNY9kUjo7du3NvDt9vZWg8HAinfpkAOTxb3DpgAoFP/hnLAF8/lciUTCfq9QKJwE1Wg0aoMOmRlSq9W02+3UbDYNUMLhsIrFohkvGu7D4WCyBByAz06n05rNZlYom0wmrZibQYSwEOwjNgd44GwApCvrQS8Oa4e+3+0UBDCx5gQs1s7dj/V6bXrzYPA4owYGGLDmHgFpGDSCH4wJBeeLxULlclnhcFhXV1eqVqtqNBpWT0DAms1m2mw2JisAaGjdCuC8evVKz549M/lHr9fTfr+3drckGxQUs6fM0yHBo0MU3c1IskqlklqtlgE92nISPtaAQArDR+EvCRiMfj6fN99xazg4JVgul8aEvbvOnU7HZrwQc2CbD4eDsXPRaNTYbAIviYVrI8yKQfZBzQryBPzHZQFJdABh6b4tLhIZ1o/EhPsHNCORiMlSttv7lsbEUGQX7okKMg5+ljXGlqkrgekjGSSBcu/JlZz413e/fIz0MdLHSB8jfYz0MfKDnHDBPPBlLoC48glu0t1owMIFHx7M3RTeKDm2ZRHC4bB1LeINNZVKKZ1O65tvvjF2jTdUCie5Z9gN7oGN4Tg7l8tpOp3qzZs3yufzCoePRZHoTt1gHIlEVKvVtFqtTBPLRGsmc9Olh9kWo9HI2B66ISEHgaEDPGFA4vG46eoZZjmbzTSZTIwhu76+PjlOh+HL5XJqt9taLpdWBM1aRqNRC/oc8yYSCQNdZA7xeNxA2PM8c3wkCm63oNFopFQqZd17kJKs12vrolOtVk3vznMSgNFvM3iP9Qc4CLSpVMqA1S2sdFkX2F6chkAbi8WUTCYtKdlut8Ya0bKY+gKOsFOplK0HkoRw+Dg5ngCIs0qyQE9Q4Ls5Pqeodb/fW0Ct1+sneniApdfr6ZNPPpF039WHNQUcsXckFIA2xarz+VyvX7+2CfH9fl+bzUbJZNIAhfoLQJ9kCxlMMBg0KUgkcpwpUyqVNBqNFIvFdHZ2pn6/b7IU2KrBYGAJGEf7MIT8XD6fN9Dgubh/dOAkfu6RP2tENzIaAAAe/X7fah0IuNgPn+UWFi+XSw0GA4srgcBxFgpBHaDOZrP2uQA7ci33ZIIkERYbwInH40qn01oulxY7eBZsgn8TP5HwsDfBYNAGZUr6f9s78+ioq/P/vz+zr5nJzGQySQhZCESImBZCIkT8gSAQZXEDrFvP8QC2WtwVteJXbautx6W21T+0at04RywgqEBFlnoUEJQtlE0gewJZJpNZMltm5vdH+lw+CUkgEizL8zonR0nmc+fzuZ97n/e9z32ee0UYCq0qULsnDxz1D3kMO+XIUL8hG0w2jtozc/qwRrJGskayRrJGXhwaSe2qJwZEPSl2m5b4qfPSA9BN00un/6dGT/9OJBKiM5NgUGOmCgROHBRJ4kMzVDLiKSkpUCg6E+tsNht8Ph8UCsVJXgiqQPL+kEeCngcATCYTDh8+LJZvyXMGQHQs8vI1NzcLUWhubobP50Nubq4QPGrUgUAAeXl5cDqd+P777xGJRBAOh8WLJy8neYjI+0QJodRxIpEI9Ho9fD4fnE4nKioq4Ha74ff74fF4RHIshS1Q/VIyLQBhIOgeKOyAlnupo5HRoXhums0DEEuz5BWljkrvnwwxCQDlClBnpTokrwKdtUADCrfbDZfLJWL2ybsCQAhSa2uriMmlU++DwaDw8kWjUbFrkbwz0XbEciMCQGxxS4aYhCiRSIhdrlpaWuByuUT7MZlMcLlciEaj4j0oFApx8KbFYoFKpRLGlNoReWapU1OoAXmD2trahIAHAgEMGTKkizGk9kXvUq1Wi3umPub3+6HRaMSAiNo6eY8zMzOFR4z6gtfrRSLReQaIfECYlJQkRMnpdIqEb6vVKsIfqqqqkJ+fL0TT6XQCQBfPJp16r9VqhWeKzoiRx2lTTL1arRbeMjKgFJrU0dEhknB9Ph8sFgsMBoNo4+3t7WhuboZWq4XX60VycjI8Ho+I26ZwEaoXGlxSfUiSJHJBaCWC6hHoPBOE+isJlzwcjPpS91APACJmnsKfKKxEpVKJXBHqK/Ss8nySeDwuwjbUarXw1NIzUPsjz6lSeWJr22g0KgaO1PfIFlP/lSRJDASYHwdrJGskayRrJGvkxa2RAzLhophLmmFSJXef8SUSCdGY1Gq1qDTyFpDXhZbvKBGRZpNkBKjTSNKJU6lVqhMnSwOdy4R0PgR1UAqVoPsj40HfTTsxUcMkjxHtKEOeGgq/oKVDMjL0zGazGXa7XXhSjEYjMjMzcfToUTgcDvH/lPio0+lw7NgxEYsaj3cmClK8r8ViEfXkdDrh8/nECx80aBAUis4DGh0Oh+gU1AG02s6zMpqbm4VBo7hvhUIhPH5yDya9x2g0CpvNJpbFyUPX1NQkEippMEBeABKPSKTzvBESAPJWULkUCmAwGIQH0mg0ijZB75TOsKBwGLo3Ohyzo6NDlEPCSQJF3hyFQiG8lXTuRFtbm+h8tPRMuQjUXmgnJhpcRKNRJCcnCyOWkZEhRIt+qF3FYjFYLBa0tbUJzxvFiVPHJoNPXmgAovMCEInAlEBOBoPaHG25SsJNBpIMDuVskDecjBZ5pqk/tba2oqmpCZdddhmam5sRjUaRkZGB6upqWCwWEaoQCoXEO6A8B/L6ktElg2+z2VBRUQGr1Qq3241AIACNpvMQQ/KCyQeRclE5fvy4GBBQX1Or1aitrRVtgbzctApgMpnEM1GeBu1WRUJECd/UTuPxuMhlIVGlwQoZbhIE+i55fgUNqmiQ1dLSImwT1Q3ZIhooUDgF9VMKuaEwJ/obCUUkEoHX6xV2guqI7CzZL7LDNCAjWyu3w/Q5amfU9smO0bsg+0P3CpzI8aD2x/QP1kjWSNZI1kjWyAtfI/tiQEIKyWMnb6B0cxTbCEBUEH2WvAUELbFT449Go6LjAScOX6TOQEazpaUFKpVKCJBKpRJx6pRAS4f7kThR3LB8ydLj8QgDRkbFZrOhvb1dbJUaDoe7xI1T47FYLCKGValUwmazCY+dSqVCamoq6uvr4XK5kJqaiv3793cJy6CGodPpkJKSgmg0Ks4eoTCJvLw8sYRN3gu3242cnByEQid2fqK60Ov1sFgsOHbsGBKJBNLT04XXRKFQCMMqSZ07UdG9U7y4wWAQHYZi2VtaWrokN9LSNTU0tVot3ht5dGKxzkRNipPtHo+clJQkDCHVAbUHWmamEIRIJCK8CSTUFHoijweWx6VTrDF5TCiuWf5vOjciEAgIL408fl/u0aVEX4VCIfIYkpKSRNsgAwEAVqsVQKc38NChQ0J8KHGWvGDyMz7IG63T6dDS0gK1Wg2r1SoEgd47eQ1pWZ7O0klKShJL9vL3ScbVYDCgoaFBtGES3La2NjgcDjidTpHorFKpkJmZKXYxk+cQKBQKMdghg9vW1oa2tjZkZWWJsAB6ZrIDFH9Pu3mRcHu9XpF7YLPZkJKSIkKqyLOm1WrR2NgIlUoFj8cjPMLkUaSQCgqHOX78ONLS0sQAgHIL5MnbNDgim0Mec3kb9Pv9ov1Se6QBH7UPh8MhhIE8+z6fTwxE9Xq9WNWgQQF5CElcyZ6QV44GfWRH2traxOAXOLH1MH2evI8U4kL9jtqKPPSM7DfZamrrZHNogEliQ8LK9A/WSNZI1kjWSNbIi0Mj+2JAtoWnLVfpwehmqUNSR6SZJQARixyPx8V1dC1w4mRteaIkzeLJ00ENJisrCwqFApWVlWLpWafTiZAIiiMmDxsA4XEAIGJT6SXSzDwWiyElJUV4A+hz4XBYVLT8hZtMJhFTTmEBdF4G0OllCQaDOHz4sNh+NSsrC1VVVdDr9cjJyekSv0uCplKpkJGRAYvFgsOHD4v4XfK21dfXw+l0wmq1iu1PqSM2NjZCkiQ4nU6o1eouyYDUMVpbW8USss1mE/VPUCOnpe9wOAybzSaMqVarRXJysoidpY5KoQEU8jB48GCxUxA13GAwKHZgisfjSElJEcvL1Eno/dOggmLUvV6v8GYcO3YMdrtd7K4VDAZF7L/ZbBbL5JFIRLxzGhyQx5K8gSTYtFQvT7z0er3wer1wOp3i0EK5MFFCMD0ftZdoNIrBgweLLVJDoRC8Xi8MBgPMZrMQYUmSxG5HiUQCKSkpoj/5fD7U19cLbyglR1McfyAQEN4/Ct9QKDq33VUqlaiurkYikUBNTY0wJnq9Hunp6aipqREGk06dlyQJHo9H9KWmpiYRilBbWyvip9vb20VcOoWB0M5jHo8HFosFPp9PbEFLHl9qh+Slb21tRUdHh9i+mNqHXq9HY2Oj8NqSZ93hcAhhJuNOce40SLRYLKipqRGeVjLmFG5D7ZTivEn45J5mCu0hD25dXZ0YtFD+Au2YRZ5Eqn8K5aFEbXp2EhOyMzSoJJGlnBMAXdoS2RoSIbJn1EZISCm3hHbqorwcugcK/SK7RIJFXnUScOr/ZCuoDzKnD2skayRrJGska+TFoZF9TbrOeMJFs1v6IWi2TTP01tZWsWwpv0lakpP/P71gmk2TR41ihcnwK5VKcYZFcnIykpOTEY/HReKlVqsVW8rSC2xsbBQdm66nOFwSm7q6OlHZHo8H+fn5kCQJDQ0NwhOk1+vFUq7X60VVVRVcLheGDRsmGjh5u8iYyQ2ZXq/HsWPHoNfrYbfbxZIo7aJEXri2tjYYDAZkZWWhra0NjY2NSCQS4uyK9vZ2HDx4EMOGDYPFYkFDQ4O4d1r6Bjq9R2lpaejo6BBbiSoUnbsMUdyz3+8Xz0j3Sp4KanAUvtDQ0CCEnjxfVI/kNbDb7SI3AAAOHz4sOgqJnlLZucVmKBSCyWQSMd0AhIB5vd4uxpFEhQYxra2tUCg6z0OhgwLJU0OeYOpsJAz0eyovFouJbXzpe8i4qVQqYeBI5KiTUhw/tWEK/aBYYAorUCqV4qBGADAYDMIwUOyyWq0WOxlJkiS8mJSXQAYiHA4LrymFBVB71+l0OH78uLi2tbUVDodDPGsikRBhF3q9HvX19aINUHgNxfFTjgC14YaGBhFCQ57XWCwGs9ksPMF0H8eOHRPecvKgezwekZROniT6O3npSXQaGhpE3oVer4fH4xGhNElJSSKngjzc1JaDwaAQHmp7arUadXV1YvADdBpoaiPUnymBHYBow5RwnZycjGAwKN4viRaFWSQSCdFGyMZ5PB5hpyjRmtoyhbhQyAuJm0KhEO+aBIXyVuS5PzRgoIEMiZ3P5wMAEYNOA5NAIACz2SxCxshG0nXkgVQoFKJOKGyI7o9DCn8crJGskayRrJGskReHRsojGLoj9fXH0yE7OzvxxBNPdBb236U18iCQ5448LzTzk98UiQd58eQVSzHGJDLyiqQXRgJmMBi6xEwrFJ2JmGRoyPPmdrtFeAAtKZMQJhIJEVtNzyBJktgNhmKaCTKufr8fgUBALPvTC6FnIO+Mx+MRgkcNnYy02+2GyWQSRoq8QuTNSEtLg9/vF0u7tD0neZRMJhPS09NRX1+PoUOHio5Py7YAxJaplCRLIQe07EzCQXVODZoMJYW/UFw/GQ7y0pIHhIwWlUkxzPRsZOhoubp7GATVLV1Lngt5oiQZEI1GI5an6VnJq0vX0RK0RqMRQkvvl9ohefLIwKnV6i5bqOr1etFWSGjJIJBXg+pHqVSKTksGlepCkk4c1kceZOoPFA5Ev6O2QQaHciMonp08cPLwDwovIANEhguA8DqTF5d2r5K/U6pbMn506CKdbUFtigZ1ZMRpYEZhMvK+Te+MPPYUzkH1S6EU5HGTx49TLgudkUI5BpT8T6FXJBrkvacBB3nF5B532smLnofaJAk+1avcTun1euF9p0EetUcaPNA9yEMY5PkH9Fm5kJJHkN4n9X2yM9THqH+R9xFAl++lZ6UyyO7KQ55oNYAG6eRFl7d5sgHUH6kN0juNxWKYP3/+94lEoqi/WnGxwhrJGskayRrJGnlxaOQzzzyDysrKHpe5znjCVVRUlNi+ffuJAqUTyb/9Qd6xGYZhmHMThULBE65+wBrJMAxzcTBmzBh89913PU64BmTTDJopyz1wFH7QPZ5R/m/yfMiT4+QzxTOlvwJFn6UZbvd77qms7n8bqHvvXmZfz9K9ns9nYe5ef92fYyDr92zSU9s/298nR94G5G20p/rt7T7lZVJ5A/1c8nJ7+ltf9Nbn+tMXT7efdK/D7td1/84f2wcH0o709r5Pp/yzYc8uVlgjWSMHEtbIH/99clgjWSPPhkb29UxnPOGi0AQAaGtrw549e5BIdO7ek5OTA6vViqamJlRUVCAjIwOZmZldQg7cbjf2798PAMjLy0NaWlqvD3o6L6j7Z3oSs97KoKVCAF0OxjudCqUQgf7Q3wbTn8+fL0b3VJyvz/FT33dP39fXQK6v33X/m3yQRQZKfh0t0ctDjHqCru+tHPnn5IPKvvpfT4JJ/+2p/J4MLIWxyH9OF/o++WYS3UNxiL7KldcLfbb7ILL73051T92ftXu99/dZmR8Ha2QnrJFnh/P1OVgjT4Y18vzXyL4+PyDncNHN79+/H7fddhuAzhjUsWPHYsGCBThw4ACef/55zJs3D3fddZdI5PR4PHj22Wfx+eefQ6lUYvjw4Xj55ZcxdOhQESPa2wxUXin0bzLo8n8DEGV1r9juNDc3Y+PGjdBqtZg4caLYJrenZ+3pHuS/7+7FlCO/5nw1lszFAbVd+fatJpOpS3K22+1GNNp5/oo896CncqLRKPx+P8LhMEwmk9gVibaHpusoubm3/kG7ZtGOWZTXQAcjUhIvxaQDnXaAzjKhAS1tLR2Px7scBHm6dSNJnfHlzc3NiMVi4gwf+Xd2dHSImHWqm57sD50FJEkS7Ha7SNSm76JduSguvrdBM91XMBjs8jx0n0qlEna7vcsW4szZhTWSNZK5MGGN7LtuWCNPMCATLiIWi6GlpQWjRo1CcXExli9fjkQigUsuuQQ+n08khFJYxbJly7BkyRLcdNNNyMvLw+OPP47hw4fjhRdeQDweh9/vF2csOJ1OkUDZ1tYGv98Pq9UqDhmMxWJITU2FQqFAXV0dJElCRkYGwuGw2IEpNTVVJAc2NTXBYDCIhEC1Wo2dO3fixRdfhMlkgs1mw6hRo8TBenISiQRaW1vFCe6pqamiUdHJ4DabDTqdTuyCJEmSONvA4XCIMz1YUJhzFbkA7NmzB6tXr0ZtbS3y8/MxZ84cuFwubN68GZ9//jkCgQBGjhyJOXPmIDk5uUu7pnLC4TB27tyJNWvWoLGxEYWFhZg5cyZaW1uxbt06kUQfiURQVFSEsrIycS9y4xmLxXDw4EFs3LhRfO+4ceMgSRI2bNiAr7/+Gu3t7Zg8eTKmTZsmzgxpbGzEunXrMHPmTKSkpCAYDGLDhg1Ys2YNFAoFiouLMWvWLJhMJvGdp6qbSCSCTZs24ZNPPkEsFkNhYSFuueUWWCwWRCIR1NXV4dChQ8jLy8OQIUN6Lc/n82HlypX46quvAACTJk3C9ddfLw5ldLvdWLNmDQwGA6699loh2t3vh7bCPXToEPbt24e5c+eK+/z000+xdu1aGI1G3HTTTRg3bhzvOvgTwxrJGslcOLBGskb2hwGdcNHONnl5eXjggQfg8XhQUVEBh8PRZRatUCgQDoexbt06KJVKLFq0CE6nEx9//DEOHTqEeDyOhoYGvPfee9i7dy9MJhPKysowZcoUHDhwACtXrkRlZSUuvfRSzJ49G+vXr0d5eTkeeughaLVaPPfcc8jLy8P8+fOxfPlyfP3114jFYigtLcWNN96IxsZGvPzyy0hJSYHf70dqaipSUlLwxRdfoKqqClqtFi+++CIeeeQRjB07tsuBfgBw5MgRvPfee/jhhx+QlpaG6667DmPGjMHOnTuxbNkyNDc34/LLL8eoUaOwdu1aVFVVQZI6d3JqaF7AP9kAABNwSURBVGjA1VdfjVtvvVXsLMUw5yqJRAKVlZX429/+hiNHjiAzMxNvvfUWzGYzJk+ejMWLF0Oj0WD48OF48skn0d7ejgcffLDLzkBApxfryJEjeOWVV1BfX4/09HS8/fbbMBgMyM3NRUNDAwKBAOrq6rB27Vrcf//9KCsrE/krci9WQ0MDnnvuOZSXl8PpdOLTTz/FAw88AKVSiRdffFHsVvb8888jKysLw4YNw8aNG7FmzRqsXr0aEydOhMPhwK5du/D4448jLS0NDocDr7zyCiwWC6ZPn37KQR556Pfu3Yv7778f6enpyMnJwR//+EeYzWZcf/312Lp1Kz777DMcPHgQDz/8MHJzcwF0bsd84MABOBwOZGRkQKVSYfXq1Xj00Udx9dVXIxAIYNGiRcjKysLYsWMRDoexadMmLF68GKWlpbjqqqug1+vhdrtRUVGB1NRUpKWlQaFQoL6+Hlu3bsXf//53KBQKzJkzB5IkYceOHXjqqacwevRo7Nu3DzU1NRg+fDgcDgcPaH9CWCNZI5kLC9bI3uuFNbIrAzrhotCAjo4OJCUlYdiwYSgvL0dzc/NJMZOBQAC1tbViq0UAuPfee2GxWBCNRvHaa6/hzTffxIQJE7Bjxw5UV1fDbDbj448/xrfffovMzEy8//77YivUFStWYPz48dDr9fjwww/x5z//GV9++SUee+wxWCwWSJKEL7/8EhkZGXA4HFi6dCnUajVGjBiBCRMmiK07SRDl8enyuNBwOIwXXngBn332GSZMmIBvvvkGNTU1ePjhh/Hqq69i3759yM7OxjvvvIO9e/fiu+++E95G2uqyvLwcZWVlSE1NHcjqZ5gBQ95fy8vLceDAAcybNw/XXXcdfv3rX+Orr75CTk4Ojh49it///ve45ZZbsHHjRnzwwQd48MEHTzJO5AE8ePAgHnroIVxxxRVYtGgRvv32W0yaNAm/+93voFar8dZbb2HLli246qqreo2fbmxsxI4dO3DbbbehuLgY//d//4evvvpKbK38+OOPIx6P44477sA333yD7Oxs1NTUYNeuXQiFQmI72E8++QSBQACvvvoqYrEYFixYgPXr12Pq1KldDvLsCVqB+Pjjj3H8+HF89NFHyMzMxH/+8x8sX74cU6dORX19PX744Qd4PJ4uoQlNTU14++23MW7cONxwww2QJAnvvPMOnE4nXn/9dTQ2NmLs2LFYtWoVLr/8ctTU1ODdd99Fc3NzF69dRUUF3nnnHUyZMgVTp06FRqOB3+9HZWUl9u7di8LCQvHZZcuWwWQy4f7778cXX3whVjSYnxbWSNZI5sKANZI1sr8oTv2RH1Hof+NWw+FwF7GQJ/nRAWIAREVv2rQJW7ZsQSgUwpIlS1BYWIiXXnoJDz/8MIYNG4a9e/di165dmDJlCp599lnk5uZi7dq1GDp0KAYPHoxVq1bho48+QkpKCqZPn45///vfcLvdSEtLg9PphCRJ2LlzJxQKBYxGIyZPnowXXngB8+fPx5w5c3D33XcjJycH+fn5eOqpp1BUVNTFw0YiuHbtWpSWluKll17Cr371K6SlpWHbtm345ptvMGnSJPzhD3/AkCFDUF5ejkAggGnTpqGgoACXXHIJMjMzxYnl7LljznUSiQTcbjcUCgXS0tJgtVoxYsQIVFRUiMNRN2/ejCVLliAYDKKsrAzRaBThcLjLTzAYRFNTE7RaLbKzs+F0OpGdnY3Kyko0NzeLM0jWrl2LjIwM/PznP0c4HEYkEulSTiQSgcvlwiOPPILp06eL2G+DwQC3242MjAwkJycjNzcX6enp2L59O9RqNebNm4fx48eLs2nIm0ifpeeqrq5GS0uLePZTcfDgQdjtdmRnZ0Or1aK0tBSVlZVob2/HrFmzMGXKlJNyVJKTk1FWVoaRI0eKc2oqKiowYsQIEX41cuRIbN26FX6/HytWrMC+ffuQlZUlwrsSiQTS0tJQVlaG/Px84eHMy8vDwoULMWjQIHFOSCQSwebNmxEMBvH6669jy5YtSE9PFwegMj89rJGskcyFAWtk37BGnmBAV7iATsEIhULYuXMntm7dCpvNBpfLBYVCAZ/Ph/r6emg0Guh0OgwdOhT79u3DDz/8gKSkJLz77ru45pprxKF81Hg0Gg1aWlqQkZEhKo0OYgwGg3A6nSgqKsKKFSvg8/lw4403wmazicMMr7rqKmg0GvzjH/9Aa2srEokEjEYjxo0bhzFjxoilT6PRCJVKJRp2bW0tUlNTcejQIVgsFgwdOlQ8J3UYt9uNmpoa0SjoYDqaqSsUCthsNnHwnPxQQYY51+nuQVOpVHC5XOJw1OTkZOzatQs+nw9erxdjx47FsmXLsGfPHnGIICWxHjt2TJzGbjAYYLPZ0NbWhkAgAKVSiZqaGuzZswfTp0+HRqPBX//6V3GiPGE0GjF37lzcfvvt2LdvH95//304HA5MnDgRH374odhNyWKxwGazoba2VhwOSSfLd3+uRCIBrVYLu92OI0eOwOfzweVynbJeAIh7I2/eoEGDRO5Kampql1APusZqtWLGjBmiLBJEQq/Xw+VyYfv27di2bRtWrlyJm2++GVVVVcJDCQBpaWlIT08X11GeDdkiefm1tbVQqVQYPXo0du/ejX/+85+YNm0aCgsLOaTwJ4Y1kjWSuXBgjey9XgDWSDkDvmlGJBLBtm3bUFdXB4/Hg3nz5kGhUCAQCGDVqlUoLy8H0Hk42HXXXYevv/4aixcvhl6vh8lkwi9/+UtoNBrceuuteOONN3DffffB4/HAYrFg1KhRqKiowJo1a7Br1y7U1tbihhtuwODBgzF58mQsXboUwWAQ11xzDRQKBSZOnIhly5Zh8+bNMBgMCAQCKCoqQiKRQDgcPsmgu1wuZGdn49tvv8XixYtRXFyMGTNm4Mknn0Rubi5ee+01GI1GlJWVYcWKFbj33ntx+PBhZGdnY/LkyaiqqsKGDRtw6NAhtLa2orS0FNu3b0ckEkEoFBJejHA4LBo1w5zrdDdM9fX1SEtLw+7du1FRUYGnnnoKOTk5qKiowLvvvos777wTLpdLJLMCgFqthtvtBnDCC97S0gKbzSaS47du3Qqfz4dJkyZBrVbD4XDAaDSeZGiVSiUOHDiAP/3pT2hpacGDDz6I/Px8ISQA0NraipaWFmRlZYmdj+TPQRsTkKiEQiE0NTXBbrcLr9bpGFgqg4SqqqoKDocDDoej12s6OjoQCoWgUqmEMMh3c6NQsry8PCxduhQVFRWYMGECqqurEY1GsWnTJlx55ZXQ6XSIRCJCQHpDpVLBZDKhoKAAd955JzZt2oT169dj586dQkyYnwbWSNZI5sKDNbJ3WCNl3zMgpfwXm80mdjxJT0/Hz372M0ybNg27du3CxIkTRYhAPB5HLBZDSUkJnnvuOaxcuRJutxvPPPMMJk+eDKVSid/85jewWCzYvXs3CgoKMH36dIwZMwY2mw0pKSmorq7GjBkzMH36dNjtdowdOxazZ89GNBrFFVdcAUmSMHXqVLz00ktYv349otEoHnvsMUyZMgUejwe33347CgoKusziHQ4H7rrrLtjtdhw/fhzDhw+H2WxGQUEBMjMzkUgkoNFosGjRImRmZmLv3r2YOnUqZs+ejdGjR8Nut2PVqlWorq7G7NmzUVxcjOzsbAwZMgQulwsdHR3w+/1obm4Wu7wwzLmMJElIT0+HSqXCsWPH4Pf7sX//fgwaNAiBQACtra3IyclBcXExtFot9uzZg4kTJ2L8+PFdyonH4/jyyy+xYcMGHD16FIMHD0Z1dTUyMzNhs9kQDAaxfv16pKamYsSIEdDpdJg7d26P20V7vV689tpr8Pl8eOKJJ1BSUoJ4PI7MzExs27YNLS0taGlpQUNDA2bOnAm1Wi086eRNUyqVGD16NHbs2IGjR48iFoth//79KC4uht1uP+36KSoqwvr161FZWYnBgwdjy5YtyM7ORmpqqkiKlm9/LUkS6urq8Je//AXFxcWYOXMmdDodRo4ciT179iAUCqG5uRnl5eW4++67YTabUVRUhOrqajQ0NECSJBw4cAAlJSU4cuQIlixZgokTJwoBpjqi7yWPXlFREXbv3o1oNIpQKAQAYutxXt366WCNZI1kLixYI/uGNfIEAzLhopsZOnQoXnnlFSiVSuGN0+v1GDt2LPLz8xGLxcTWlQaDAQ6HAzNnzkRJSQkikQhSU1OFkc3IyMA999wjtpW12WxQKpUoLCxEdnY2gsEgLBYLTCYTJEmCzWbDokWLAAB2ux2SJMFsNuOmm27ChAkTEIvFYLPZYDQaYTAYcN999510noBCoUBJSQny8vIQiURgt9uh0Wjw6KOPigRhSZKQm5uLhQsXwuPxQKfTweFwQKlUYtSoURgyZAja29thsVig1+uRlZUFjUaDjo4O4THo6OgQDZYHO8y5iHyL2csuuwwFBQV44403sHr1auzevRu//e1vMXToUHz00Ud49NFHUVBQgN27d2Pu3LnQ6XQnhQAkEgkUFhaioKAAr7/+OpYvX466ujpce+21sNvtOHDgAL7//nuMHz8eKSkpwkbIxYT6zoYNG/DJJ58gPz8f69atw7/+9S+UlpZiwoQJ2Lx5M55++ml4vV7o9XoxsEwkEggGgwgEAgA6wxxmzZqFpUuXYt68eXC5XAiFQpgyZYrIl+krpIn67ezZs/HBBx9g/vz5GDZsGKqrq7Fw4UKo1Wp0dHSIe5Yf+kgx+g6HQ+TszJ8/H7fddhvuuOMOhMNh6HQ6zJgxAzk5Obj++uvR0NCA1tZWSJKEG264AVarFR6PB9nZ2UhOTj7pIMlAICBya+ia1atX484778Tx48eRk5ODkpKSLs/CnD1YI1kjmQsL1kjWyP6ifPrpp8+ogDfeeOPpBQsWQJIkqFQqWCwWJCUlwWg0ilmzRqOBxWKB1WoVP2azWcwwzWYzrFar2BaXfrRarSiLXqxCoYBOp4PZbD7ppHv5YXOESqWC0WiExWIR5atUKuh0OqjV6pNO/qY49aSkJHH4m8FgELszEVqtFmazGUajUcSo0s5NdG9KpRJarVbE42u1Wuj1ehgMBt7uljkvkCQJRqMR2dnZkKTOc3SmT5+OmTNnIi8vD5dccgnC4TBCoRCuvfZaLFy4UByG2v3HZDIhNzdXxHLPmjUL11xzDZKTk+F2u+H3+zF79mzk5OR06Zfy/hmPx1FRUQGlUgmXy4VIJIJIJIL09HSUlpYiKytLnD80b948XH755VCpVFAqlfB4PLBarZg2bZqwCcOHD4fH40FSUhJuvvlmsftS97j83rBarSgsLERTUxOUSiV+8Ytf4MYbbxSeNL/fD6PRiJKSEqSkpECSJBgMBgwfPhzZ/00iTiQSGDRoEPLy8lBfX4+kpCTcc889uPLKK2EwGGC1WqHT6VBbW4v09HRcffXV0Ov1MJvNGDFiBAYNGiTCJeiejx8/jksvvRSlpaUAgMzMTGRnZ6OxsRG5ubm45557MGLEiC7283R55plnGp5++uk3Tr8VXdywRrJGMhcurJF9c7Fp5JtvvokFCxY802NbOdPYxKKiosT27dsB9LxjiXxGebr0dk1fv+/+/adbRvdKPNW99vfeTlUWw5zLyA8LDAaDiEQi0Ol04oT7WCyGQCAgElUNBkOPBorKoST+jo6OLuV0dHQgEAjAYDAIQywvQ963gsGgOFGeoLKo/Hg8DpPJBJVKJQaB7e3tCIVCsFqtIpQgFovB5/MJb6F8gHq6dROPx+H1esX22/RMAMSmBnq9XsTJ0zXdBSsajcLn8wkBJ1Gj+vf5fACApKQk8UzycuR15fV6xQoGEQ6HRfI1DbpPVzTlKBSK7xOJRFG/LrqIYY3s+/enKothzmVYI09dNxeTRo4ZMwbfffddjxcM6ISLYZgLD7nRlO8sRn+TJ+LKt7XuqRz5T/fP9mQYT1UGIQ8XoCTk7saS7r/7IZHdv7c/xpXuo7c6oL91/50cuu+e6kZ+DV0nr/vu5RAUmtZd0Hsrvz/whKt/sEYyzIUNa2TfdXMxaWRfE64B3xaeYZgLCzJ23eO1yRDJt6Q9VTny/56q/L7K6ckInup+5NfRd0qS1OXzP2YCcqoyequ3nn4n/1tP9XSqcnr6Tvmz9lU+wzAM039YI099P6yRPOFiGOY06M3w9NcgnWk5p/pcX3/vjzE+Xc7kfvrz2TOtZ55cMQzDnD1YIwf+fvrz2fNBI/lkQYZhGIZhGIZhmLPEGedwSZLUBKBqYG6HYRiGOcfJSiQSKf/rmzhfYI1kGIa5aOhVH894wsUwDMMwDMMwDMP0DIcUMgzDMAzDMAzDnCV4wsUwDMMwDMMwDHOW4AkXwzAMwzAMwzDMWYInXAzDMAzDMAzDMGcJnnAxDMMwDMMwDMOcJXjCxTAMwzAMwzAMc5Y44wmXJEmXD8SN/K+QJKlEkqTU//V9/BgkSbr8PL73KyVJSv5f38ePRZKksedx3f+/87zux5+vdQ+c//XP9I/zWSNZH/93nM8aeT7rI3D+2+jzWSPP97rvCz6Hi2EYhmEYhmEY5izBIYUMwzAMwzAMwzBnCZ5wMQzDMAzDMAzDnCV4wsUwDMMwDMMwDHOW4AkXwzAMwzAMwzDMWYInXAzDMAzDMAzDMGeJ/w9aiLv/t+87XAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOy9ebht21nW+Y3dnHNvSEsSQqNRkBIsKEQEAS1ERR+KVAC1RCwCCFEioKKGLtIEiCARpC2waKURKjRGKXrFQvQBCRRYYBAsH4qQgIQQEiMhhHt2M+uPtd69f+vd75h7n3P2DbnnfN/znLPXmnM0X/+O+c0x5xrLslRTU1NTU1NTU1NTU1PT9dPe7zQDTU1NTU1NTU1NTU1N9yr1BVdTU1NTU1NTU1NTU9PDRH3B1dTU1NTU1NTU1NTU9DBRX3A1NTU1NTU1NTU1NTU9TNQXXE1NTU1NTU1NTU1NTQ8T9QVXU1NTU1NTU1NTU1PTw0R9wdXU1NTU1NTU1NTU1PQwUV9wNb1R0xjjF8cYrx9j/Cb+veUdjvURY4wfvmb+PmKMcbLl6zfGGD89xnj6JX0eO8b44jHGy7b9/r/t9ydtz//iGOPXxhhvgj5/dYzxQ/i+jDFePMbYw7HPHmN8/XXK19TU1NT0xkFjjA8ZY/zEFjdePsb4vjHG/3gN4379GOOzr4nHS8fa4tfrtnL8lzHGF44x9i/pM5V9jPGZ2zH/ItofbI/9XvC1jDH+CNq87Rijf4y26Q1CfcHV9Eig91+W5dH49yu/E0yMMQ4mp350WZZHV9Xjq+ofVdW3jDEePxnjRlX9X1X1DlX1P1XVY6vqPavqVVX1R9B0v6r+1iUsvWVV/aUrC9DU1NTU9IikMcazq+qLq+rvV9VTquqptcGbD/yd5Osu6A9ucfO9q+qDq+qZs4ZXlP3VVfVZl1y4vbqqruXCsqnpdqkvuJoecTTGeMIY47vHGK8cY/zX7effhfMfMcb4hTHGa8cYLxljPGOM8Qeq6iuq6j23FbLXbNveHGP8w+3dpleMMb5ijPHg9tyfGGP88hjjk8cYv1pVX7fG17Isp1X1T6rqTarqv5s0+/DagMWfW5blZ5dlOV2W5deWZfl7y7J8L9p9flV9wuzCbUufVxuAmV0INjU1NTU9wmmM8biqel5V/fVlWf7ZsiyvW5blaFmW71qW5RO3bW5ud0r8yvbfF48xbm7PCcs+frt74uVjjI/cnntWVT2jqj5pi43ftT3+lmOMF25x9iVjjI/bHn/T7Vjvv/3+6DHGz48xPnw21hoty/LzVfUjVfXOdyr7lr6/qm5V1YeuTPcNVfVOY4z3voyvpqbrpr7ganok0l5tLn5+T20uXl5fVV9WVbXdhvelVfV+y7I8pqr+aFX91LIsP1dVH13bu1HLsuhC5vlV9ftrk+zftqreqqqei7nevKredDvXs9aY2lbWPrKqjqrqpZNmf7qqvn9Zlt+8RMafqKofqqpPWGnzz6rqN6rqIy4Zq6mpqanpkUvvWVUPVNU/X2nzqVX1HrXBsj9Ymx0Tn4bzb15Vj6sNxv2VqvryMcYTlmX5qqr65qr6vC02vv92q/p3VdVPb9u/T1X97THG+y7L8ura3I366jHGm1XVF9UGY78xjXWZYGOMt6+q96qqn78L2auqlqr69Kr6jDHG4aTNb9XmLtnnXMZXU9N1U19wNT0S6DvGGK/Z/vuOZVletSzLC5dl+a1lWV5bm+TJitVpVb3jGOPBZVlevizLf0yDjjFGbS6i/s6yLK/ejvX3a3eb3mlVfcayLA8ty/L6CX/vsb1j9ttV9Q+r6kOXZfm1SdsnVtXLryj3c6vqb44xnjw5L4D59O1Wxaampqame4+eWFW/vizL8UqbZ1TV87Y7Jl5ZVZ9VVR+G80fb80fb3RS/WVVvNxnr3arqycuyPG9ZllvLsvxCVX11bbFxWZZ/WVXfXpvt8U+rqr92BzL9+zHG66rq52pTXPxHk3ZXkb22fH1nVb2yqv7qSrOvrKqnjjHe7/bYbWq6O+oLrqZHAv3ZZVkev/33Z8cYjxpjfOUY46VjjN+oqn9bVY8fY+wvy/K62uwH/+iqevkY43u2FbRET66qR1XVT+qCrjbbEniB88plWX77Ev5etL1j9oSq+s7aVOtqjPHUgZd9bNu+qqre4ipCL8vyM1X13VX1nJU231tVv1x3BnhNTU1NTW/89KqqetIl28ffsnZ3Vrx0e+xsDLto+a2qevRkrN9TVW+JQudrqupTavP8lOirquodq+rrl2V51RXlIL3Ldv4Prqp3r81W/Nq+DEO4+Yy6muykT6vN3b4H0sllWR6qqr+3/dfU9AajvuBqeiTSx9emMvfuy7I8tqr++Pb4qKpaluVfLMvyZ2pzYfOfalOZq9rcESL9em22I74DLuget32QtyZ9prTdJvgxVfVhY4w/tCzLy/iyj22zf1VV7zvwBsJL6DOq6qNqs61jRp9aGzB81FV5bWpqamp6xNCPVtVDVfVnV9r8Sm0ulERP3R67CjnO/VJVvQS4+PhlWR6zLMvTqs62z39VVX1jVX3sGONtV8aaT7qhb6uNfM/dHns/4OY319Vk55g/UJvtiR+70uzravOSqz9/VV6bmu6W+oKr6ZFIj6nNhdJrxhhvWpuLkqqqGmM8ZYzxgdsLmodqs23idHv6FVX1u7T9bvuSi6+uqi/a7kWvMcZbjTHe904Z2+5v/5rafQ6M9E9qA2YvHGO8/Rhjb4zxxDHGp4wxnhbG+/mq+taq+riVOX+oqn6mqv7ynfLd1NTU1PTGScuy/LfaYMqXjzG0y+NwjPF+Y4zP2zZ7QVV92hjjyWPzEyPPrapvuuIUr6iqt8H3H6+q125fGPXgGGN/jPGOY4x3257/lNpcWD2zNi94+sZx/nZAH+sq9Pyq+qgxxpv7iSvK7vSpVfVJs8m2d/o+o6o++Tb5bGq6Y+oLrqZHIn1xVT1YmztUL6rNNkDRXlU9uzaVvVfX5tmuj9me+8Gq+o9V9atjjF/fHvvk2lTDXrTdnvivar6v/Xb4e9oY4538xHY7w5+uzZ23H6jNSy9+vKqeVFU/NhnvebXdbrFCn1abl3s0NTU1Nd1jtCzLF9QG2z6tNs8p/VJV/Y2q+o5tk8+uzcuW/kNVvbiq/n1d/RXoX1tV/z2ekz6pqqfX5gUcL6kN1n5NVT1ujPGHt3x8+LbdP6jNxddz0lhXlO3FtXk04BMn5y+T3dv/SG1wdY1eUFd/nrqp6a5pLEv/5ltTU1NTU1NTU1NTU9PDQX2Hq6mpqampqampqamp6WGivuBqampqampqampqamp6mKgvuJqampqampqampqamh4m6guupqampqampqampqamh4mu+kNyUxpjLPb97POyLGff08s5vO3a+bXxxhgXvrMNz8/4uR1eZuP6HN428e3yzc41PbxE32q6XlJsePxel77X4s6Pr/UnrcXzLG6TbJflh8Rryk1r+e2y8a8i64zvCf36sixPXmvQdE6NkY2R9wI1Rj581Bh5b2HksixxgitdcI0x3mNZlhfNzu/v7+/8dUb29vbq5OTkQr+9vfMbbHK2MPfZ+fT59PR0tQ0deW9vb+e7jy+eTk9Pi8S5xLPzsLe3twoy7pAci3yyDed2fk5OTs76jTHOeHb+xxhn+ldbzpEc1XWqcdWWvHMs6oD653icI/GleU5PT3d0QBkpp/5yDPej5Fve/vT0tPb393fm2d/fP/vsflRVF+TiOerj5OTkbOy9vb06Pj6+oMvkz5RFvLis7pMuo8j76Rhj1u2a4ob6cJCY2cHPeQy6r1M2nyvFp8s+ixnnVTY5Pj6uZVnOvqcxE58zPaW+LvP+/n6dnJzs+Dvbuq+n3DiztwP3LOc5bylveR6tqjo+Pn7pBWbuY7oMH6saIxsjGyOpA9dHY2Rj5L2CkcfHxxf4OBsnMXk7tLe3t8ghnTkyUnXROSS49zk4OLgAPjMw4Rg+H3g8S04MvOTEyYF8XhnXEw3l59xMLB5A3scTHw19FT5d17NA5bjJLuyrwFMA+JgEMrenbCkbpISV9OZA4u2UoKkzAsxa8CSd8+/+/v5ZcpF8Pp77sAcwSYl0WZa6ceNGHR0dRaD0xYH0R1tStlnyoJ9qHPcfjuk6pq7dpvQVjX9yclIHBwd1fHy8k1BddsYOvytB7e3t7Sw8leg9yeuc653y08fEwyxBc8Gk9vpO27iOPQ/ouPyHx2hrzuE+MLOj2jD+Up5wHXEc8k2ZqJME9Jx7+/cnl2V512q6EjVGNkY2RjZGNkbW2fF7GSO3PMaAupYLroODg51J3ZlmAtNoqb2CWYKkMaqysXjcq0RrMqfqEeed8UpnpVFmAe9zMslqHFZ2ZomW4/IvHcZlmAGzJ18mVAFCAhLnz5O5JzUnT1LOL/U9xrgQrF7NSaDmcztIK0i9AqM2rOTN+HZ9uy6VLJU8UyKnvr0/E9hlfsw+GodgwsrirDqUxlLfqotAwcUH7eQANItf1yNtQ13oL4/7uA6OySdnCX8tbjkux6DtU2ylOJFuaHMHRY9N54HtuQCfVXRJKe41drKZjvUF1+1RY+T5+cbIc3kaI8/baKzGyMbIRzpGbuMkXnDd9TNcZKpq90qZiVfn2N4V7QLRkDMg8NunDHDOsZbIdZ7OlozgRmSweJLjX+fBAWvmNDP9iRKPKXHrc3LoqnOg8MSnc+5gnMvt5pUu8k2g80THv05sR3t7YNKWngwSoHhC8WqgyBcKvtDgOFy4OIAk/6c8XtlyHaUqoldh6DM+BkmVNk+I9M0EXJ5oUpynqpaIAJBiWhVeXzA4UXfJf9aSr+ZJY3q/2bhegZ7Jc5U8MMtNass84zkx8c1jvpiazZPkcznW+Gxap8bIxsjGyMZI10dj5L2HkWt0LRdcSRnJ0AwcKtkTur67U3tSSnP7lT558AQgx/VxkqNRybM+dCZ99wSh9ry1nBKv6zAFh8bnLdRU5WSScHlSALgD+pzkU98FSDrHROoyuGMq+Tq5LGOMnUStPkx6HC9VMCk//60lL+oiPWeR9MKx3T7kl8CUAJZjz6qWngBmiwxf6HiFxvmm3RKI0qfdH9y/Ul/Guujg4OBsO0kCS6c1wKTvcsHktvCYnQE5x1yTl3xRT/IzbTtYizuCuqq9qiDrvNpyzzj58/hdozXf85zadPvUGNkY2RjZGOl863tj5P2Bkdd2wZWS0ywZUlkpOLUnlZVAju9KmlWlyI8CcQYYHkwcKzkMkyfl0nypEsN5JVPa2+9yOhDxPPf2+nxMMjPZ3JmdF9f7Wv+0QPCHX5kQGRzuwGvBmWzt/KXFjSccBmZV7Ty463JoDvkrj9OXfMGiY2lRdBml4E4y61xK9L5oolxcGKkNYzeBTlp80IazpOR24dj0UybGGcgS8AhWszzgfbnQY9tUsSKIehy5/9K2KZnLf5K/+0I5ycLqXapgM7d5jCbdez/PN66PNVBvWqfGyHO5GiMbIzmnjjVGNkbeCxi5RtdywTW7belBQ6E88blR0xzqQydYczhXUkpM5E2UDCGSTLzVPEv2HHctwfucpFR98lu1/jnp1G3jlIDP+9DOTDqcY81R1Z8BKqf1MTwZpoWeB57LnGRIbaqynikndbCmR78F7kHPyh3jhDpgkvfk7eOQPAZm/u6+SPm9n/up62c2ZkrULpPGTQAw45M0S3YEmJQL/E1xLjvtlvjV3GvnRQlkeVzkb/tK+ZTjU2/pPH3K8+5l5LLpmMZJFwBN69QY2RjJeUmNkY2RM5k0bmPkIwsjp/0uHfmK5AZISZIK57+qDDwJVDi2j8ME5ELzvPPtbTiHeHNnmDk62/pYNL7z44Gfqlr6p0pFSv6zAEs6ZxsfJyVh1xflmNnzMn273n1+Vk4ceBwAZ4Gc+OfCRJQSp9tztu0k9aONkj7dR1w+3/4gnTpoUGeeYHyBwc/UXYoX13/aUuFAqzaSn4la592HZmDievGElhKr68ftIXl50eB9vZI2i9kEjtLD/v7+hfyVbJMAw7+7H7nNmS8SmHsspRhOPHARd9WKc9OcGiMvtm2MbIxsjGyMnMl6r2HkXV9wUfn6rluCblCdXwMaGs0F8GSsY+6kKaGv8a+/7hTpuxzFHcINQX4YYN42AeBlCcJ1xnEkewJn/04n9OOeoKlzB+5EDjRecU06Sf5APXDuFDQaj/q4ik+kxOg8uO1cB66HtSpfsnXiMSVt5z+18WTreqT+PaE5yNLmHCPxPauy+nYN5yXpOyVMB5IZAPrcslGyv8c/27g9xhgXYl9/WdX3yp/LnT579Tj5jeeKmSyXxVKKaW/r+khtm65GjZGNkYkaIxsjeb4xci4j+Zz1I49vrBh5bXe4nBFndmZICc4ASz+ulgAozZNuI6uNK5LnaCC2938OdDNnSYGVEqTasNqT5vd+LrPG8LlS8rwsaXvCoTy0l8afjbmW9GbbARI4OV9rSZ102ZaDNJ5XLNw3ZuA7O0Z9efUrLR54Xsc8Get3R1JlzPuTqHeO71s8Eg+zMdO5GVhq/tlCR3/5zEaK9wT+1HUCmSSXj8eE7n1crpRgqd9ZxW1WdXfbrOW7JIPLTD2zjXLrbMHhlEC/6c6pMbIx0ucWNUY2RjZG3tsYeW0XXJxsFriJydTfyQOZiXeWLBkkCQwSf86TO5cnwjVZUiXGyUGA48+cxXWx5vA+T+LZ5Sf/azJS1pQ82S9VitLWGq/kkR9/kJK686BlsKZb2ldJQm43X2wkANFCxu3nt7KTvb2PA13inTqjHlMyFX/pOB9yTnJ6+8Qrq2O0rW89oA5cvpn+UwXO25CPGd9+bKYrPz5r4xW3NQBIwOM2cd9O/ZM+Z7E506cWKPSzNXv756Y7o8bIxsjGyMbIxsj7FyOv5aUZoqtWzkSz4PFbj8nRZkDE9inZ+nFXZlLumsM5iLqR19qmZERDezXOgXnGF/v45xRsyRGpS+eX8vEckyn1riTlP1rn89D2Tp600vYF5zMBhstOn/Xb0AQ+l5UyuN+neXjc28+SB/slflKssb/rmck2AQb1n+TVea9i+SJgFp++v5zzaFx9dh0kfXrcphhLslMe92OO61VN9zmXOS2aUpJO8UufcN0lvXEsl8d5IaV8ugacMz9rujNqjGyMbIxsjGyMvD8x8q4vuNwAniBnbeU8SfCUyD0xJPKA83k9WFLic34ZuDKCQM6rMF5dIF8MQL/qJ+8ug+/jJh/u6LPb3pSFeqfunQhslwWp/nqCYgCnW9Ep6TvPM0c/PT09e+jSk3/ih3pxH5Ueks+wsnUVH/SkM8Y4e4WzA2aqQCY/GmOc/TbFWtLRmD5G8jvala9u5Vhug7Tooj2cJ/bn+CTy4fqfASt1SOJDvvTtlLwpj8dA8p3EV6qG+mevzjnfkl/HZWf6j/NGO6Rc4218scKcwfNp3FlubLo6NUY2RupvY2RjZGPk7uf7CSOv9S2FZFhMXuVWfXI0T0z86/OINBd/hCzdlnfn9vmTESkLA9Tl4w/4uXOdnp5e+AHDpAc6uTuCV5huh9RvxiMdOtlDCTEl4xnY8Dznrbr4hiHy41tdPIkmJ2cSmN3Kph5IvsggjwmYk4+kYGRwU/7LwJRg7H48m3eWsDWvg5fzshaDPifHmNmDAJF05nrm5zV/m+mCn9cSvcuY/CHJnEDRF28+psf0TE9M8LQ7z1+mb6ekv5k+kj+zXfLDptujxsg6m7sxsjGyMbIx8l7EyLX57voOFydLyf2yRMPPVJBvHXDHdKIyfBxVejhX4snHSD/c6HPOZHY+ZqDm8lblvduz5KVjDgCpLXlmAnFH0fxenVT/NJdXoBQQrED6g6wC1mU5/xV7zrmmb352nXNLBeWnjj1w1V5t/AcemQxnAJoSi59z8HawdB9mu1SNYkxI3z6Px4CqddK59O/6Ij/Oq+amfb1i6PwlWzoopUqa+EmJ32PJ84svxhzUEgC7vqkPtyeJOpzlNo8/b0seyL8DiCd59wMtXmaLrssWU2kB5fptuho1RjZGNkY2RtIOjZH3Lkau0bXc4ZIwqjbpH4WW44lB3qqjorxixe8USEktOWm6/effZ8oRn+4USjBejSBv7qx0oL2981flkj//y4TGc0nnnqAODg4uyMz2KfA8WNMclyUyHvfKabr1L13we9W5L1A/s2qJEqDriLea3fZeSdF2C8pIG7nsnkxYHXFQTtUoD3xf4OjvDKwc7Ge+7JXPBDQEEreh257nZhXs2aInJUDZKSV3p6vInIDa5/bjSrYEMi50Lls0kD/19Xh22ROA0C+8rcdOiusEcGmLzwwcGEPuM/Jx2neWj5rWqTGyMbIxsjGS/XzMxsh7AyPX6K4vuDy5UGkyDpXhb9Jx4jkal3O5ATkWkzEDOrXlmLPvVKKSTQIo3wrgsvu46Ze80+cZ0clk7OPj47OxPTkleajn5OQOkBorBZ1XcpLsPKbKqAOaE9t4JcirTRo/VT+Z6F03x8fHF5KWeEzJRjx4gvCFhvsDk2+qgPKfg14CG+rCFy6Um8f9PMdMCyC3ifsAK1auG68kroGG+tO3KLMn8EQzP3cdn5yc7Cx6eJ4JWCR7pd8XIZ9abLo+PZbU3p9ZSPy7HVJ8avGiv6ma7XqmTn2sGZA23Rk1RjZGNkY2Rrr8jZH3H0aOuwXUMcYiRScDudNTCH3e29s7uz3tQcQx9JkCJ6BxB/cg0XF996Qso7Cd2vq8zpP30bieXDj3ZeDq1SPXseva+aUe0jgEAc1HUJ45sldUJCtlkg0dgCiDB5+DhvPtfKjP/v7+GaBSJ/KxNK4HMQOPyV8yOD8ur7fTHLNkSF5mvuBJSHMQZPy8Jyyvlnk/Aqb6Men4go3yOaUYcd2xHbcl0U5rlOSd5R1Pui6Xx4jnKJ+X8iR/mYGCANYr4e77sznXcgxznuexMc4fSnceZnxybl80HB8f/+SyLO9aTVeixsjGyMbIxkhSY+S9i5Hb4kRMVtfyWnh3BAeQ5ED+3bc4pKSSBPZAk3OqXbrV7UGbDCSeXKbU3vlzQ/EcHZr6E6CmOdwB2ZaOIyD1oBHNkgB14ufTYmAWxOLLt5CwD9v7w9Gca1Ztdf1wzOPj46ifmewMNvKo/gxyghQrj8kHCMTkZ+YXDjhMCgICT3z0oaRvkQO4+KOe5X/UD5MRdeTA6LaTrMmH2Jbt6bOM37SNQDxSN6JkR18MOi/U98xP3Sb8m3zU45UxQB+gLS5L7mkBxdzHWOJnju/5bS3eddz1dhWgb7pIjZGNkeKlMbIxsjHy3sXINbrW18K7kLOEROUzyGbG9KtS/8w5HDy8DR3dHcL5VELzyqTz6cCkIGVSIbFKoD5eXfOkRBmSYV1eD7KU/Hx+t40HInUufnXMnZtJ3BOWj5fmT9WcGfBoPlYYfC4PCH99LJOg+D44OLiwh5zJNY05xth5ToA2pI3d1owXJUn3dQ/0tACbLdxUgSGflMv5ob0oi2SY7a1PiZlzpKSlMT2xejzys/TqvsZ+njxn+UnjJsBUW4JD+q0cAobHp/sJ84r8O1X1XGafJ7VZW4CxrfvUbIFInquuXl1tOqfGyMbIxsjzMRsjGyPvZYxco2t7aQaZSNUAMkrG01WwnFUJQuMl5/K+rhw6j9Ps1a+eKI6OjnYSgicFr254ok2GoEweTOKHAUu503heNT04ONjhKQUJ5xI/Xg0k8OqYPzTrenOeZUPKIB0QPHg7d20xwERCAJM9fR+xVwJ98SD98fzBwaYWofkoR5LRQUTtfF8ztwCJD3+w3RcqXs2b2d354ThMWr4NyRdC+px85ODgIFbFGPNqR5/zOPF5k8/N9Cy5ucUqJVOPewcfjkueuG888etv5vKxku00lr7zYXIHMre329ft4rSmwzXQS+08r1wGVE2ZGiMbI6k357kxsjGyMfLex8hr2VI4xvkrQhNTzgiP7e3tnd3mZtJSOxpjrcrngtJ4lwVLChx3Lg+MqotvnnHHpvMwYDgfE4Ta0+FnxuN4VedvXxLxIddU5anKe/T1V5Uwr5JqbPVfc9hZYnLdeCVhFqDiy4+Rj1mQO7FymKr21NHh4eGFfe+SyRdDfMZCbdRX86S33nB+Aqu++8LHdeoJlHJ5ghXfyR/4mQlvpj99djsopqvOfYlVXCY0r9B6ouPiTJ/l76xSp0ovZWRseUU+LaIoJ3ni3n3y5zHtMqqvx4b4ZF/Xs3Kst6HNSL7wEz/K09SF29d9mPw13Rk1RjZGkhojGyMbI+8/jLzrO1xikJUor0JVXawQUOGqcLgjzBxZ43ogeHB5hSF9JqVkNJt/Np504CBTddEJOD6v6lU5cj7ozCmJJ749aSS+GTjiXWAkB2ZV0sHHg01zqvLFW8EMbM3t/pLsmgJdCUUJaqYP8uyLhqoNODoPTMJHR0dnbZm8XH6+EtnncNskvekzZZ4thHxsH8fj0IGbMjl/7KN/vthIupxV5wjEHhvkTXz7+KycJ/2Rh+QDihfNzTdr0cdTbuLbmnSc/X3B5QnabeUJmp+TnPrsFWvqzeM6gbGIvHmVMrVvuntqjGyMbIxsjGyMbIy8ljtcVReV5gwxoei7G16GPD09PQMo9vGHTRPJKGl+Kc0rZEwcs4RbtVvR8uRER/TAo3MwsTFRewVEbVMySgGpz3TAdGVOPakN20rnbh8HkQRaPOZVI205SZUP2piANksO4i0lOVWDOa5XSGkLPtybdDSTOS10/JifV0XMF01rVVQHBLcNgZuySn8JxJdl2dnaMVtguB1ncnMu+p6Px7ben3PSH5INHHBdVl88cNFKQGFljHpIydSBzpOv5xOdd3+fLQA8Z3E+5RXXo8eciHGvMfhMQgI25yHZa5Zzm65GjZGNkY2RjZGNkfcvRt71HS4agEqUAvzBwRRsFNzfnMPPKTlKoQlkXBF0PPLon51Pd87UX/w7uHgCch6dVwYGg4Z6YvtULZolh9nxqtqpPPkYCdB8XPHCKiR1w3lcP+rLBDnj2feeu+40jiekBKoaz8FG333BQL2khQLldL15LKhtsmGaiwst94c1nnme9nEAo/4SSCbd+XG3rf/uDmWj/pzv1C/pfqZXjUuZPHcQSLiAnS1yuVgkD0gY+XwAACAASURBVH7cK8m+qKI8rg/3A8aBA7/L5z6RclUCBvdv/eVibG3bTNPl1BjZGNkY2RhZ1Ripce9XjLy2O1wiN8YsiF1o9vPEu5Yw9VntVL2RQR3gPMHreBqbTujKTE7nPLr8Hjg+pvOTrug5LoPI+fGHPpMjUT/UC/XjPDOhpSoL9ZaqgA6+5CMFnZO/PUiB7A+Hup0pB/klyDFRkHe2oz1YiaO+WYGVzKxqObCmhZD7PT/PFkyuU573pOd2pU4S+KaE5vPLJx2QaBvqIOUKVpFV7eTrh9cWNuncWi6ZgabrzGUVj+QpAb23nxFBraoulZn/Unz62IrFtVzj4wtcBe6XxWXT1akxclfuxsjGyMbIxsj7ASPv+oKLDpKYJdMzhpICpBwHA533uXiOD41eNka6Gq/a/R0JzuH9PXm5k4koOx2Azu1O6gnOZVijBMJ+fvZdfKdbvTPwSQ7vCZzjK3g8wXg79w0HJp5P22lSYiTP8hevlPhrc9cCmm3SgsD9MwEDdei6cUoLnLRQcB/1MVwun3MGpGkxxzlnFVSO6bJRd6yEcWGoY2nh4X7C4/TDWbXe+fJYnx3z3DLLNyTqM8UHgSQBly863Mdn8ceK3Mxv/Pva3Zemq1FjZGNkY+R5m8bIxsj7FSOv9YePxQQFT47nx5xZKYKKTEkljZ8AgvNe9oNzLpMnCDoex0q8qI/3dUrHOU+qtKR2SoxMWJxfPCfAS/rwgEg8ps8JXMQTg3uWrNN4SiACOE9wCQyTLJRb/pV0xURcVZFX6dITnGyQFgL87tVZkip7DrYzH54lv2QP6jTp7rLYYJuUxAlKnuySzyXgTYnLx521uwwgvc8MoNiHx9PdBNlk5veaJwHWLCekN3WR95TfOD7fpMRzrC47MCUdpnhquj1qjGyMbIxsjBQ1Ru7Odb9g5F0/wzWbJCmKgnj/FLQcZ83QNJIblEFNnviP1QE/lnjWPPq9BfLvt4op90xmgsZaUva+rhO/0k5/Z1WOFAjJVjPnS444S/AJGKWjpLs1x14LRp+P55J/eiC6H/h8ae4ZaDFxE0g5PnlOIJ+Ax0EkgYbLlxJ9sn/iRbzPbLzGo3xqtv3JdU0e0/gzWTkf/T4lYn6mLi4DqpSPUnvyc5Wx6SeM85QbRNKbg7PrMuk+ycbPa/M2XZ0aIxsjGyN39dAYeZHHxsh7GyOv5Q6XJx3/7LeH/Zj38yB1R3ClrVVw0pYHdxpevbO//qXbqj6my3qZM/Kz792dOWXSlYOEV/pSAkk0S+JJjhnIeeAz8bgeyQsfJHXZLgONNd2sJR7nmYsG6jTxNgNzT2CetCmTg9wsYBMIMnk4z6Tka1y00D4eQ15hvey2vebwCpwqtgkwXZ9uE58n5YzL4sQT8ox3Hy+Bub57/lqrQvOY3+XQuQSY1PfMz9JYJG5v8bZr/XTOZV7TW9M6NUY2RvrxxsjGyMbI3WP3Akau0bVtKZSDJqcjg8lRluXibX4KxH8+b5orOYuDABXtDsS52cedYbbP2I9xHk8gqVrp5P14fPbZE7I7hPMxSw6zMavyfmXK4cDtY8t+rlcPtDXg1jH/4cAUcLOgkt/xt11SgvXFQgL0WWKkrKlvskfSp+j0dPfHIVkhTImQi6ZkK/Zz/r3q44sMjw/3aSbHFCf+3f3UfSTpx+Wmf5Bnn4v6uGwhl/SUwCnFm/Oo+ZI/6ZxvwdAx17t/15ieU33rCPW7Jq/GXNNN0zo1RjZGNkY2RjZG7urnXsTINbq218IroH1f9ywRqo8nHZGDSDKyOw95mQUmjZcqez5/Ihlojf80ZuJxWc5/DNABivp1npKuJBPlmx2/U2JVRzLosydKDxRPEuLd+fFFhbf3RQb7zcA4Je2q84eKZ4BGGf17WnAw8CkzkxvjhIGv766jpAdP4Pw8A/YUJ0lXHEv8ccGdYiYlWOp3bcGWZPQxHFyod9ljbduA+72DQIpTjjfTlcbxhO3ju/8keZ0PB+e13LA2HnlIMejHXK/U92WA0nSRGiMbIxsjGyMbIxsjr+Uthf62ossm5Tkpileqnpz9sxuef/1q3IPAjaw+6Va+A5L30Q/j+bzsPzOEB2EKEp5P+vTx0u3RtI2CAZACM1UlZ3yn4y7TLBkkW1OPs0Sj9i4b9SL5OPcMtCh3SjIEAhF1RP9hW/mfg6rL7/ZJYJK2RZAn9id/Dmak5C8uj9sjxV7ile2d79mCaPZXvLMC6gmcxL6+uHWdOCCmNj72DOzTQiJVSynXDGhTPkn5Y01ns4VBymue/5wP96umq1NjZGOkH2+MbIxk+8bIewcj12isJf2r0N7e3qJqRDJwSuQXmAjJk8L4rVL1ITF4PWGqvTsO501J1AORtzeraudtQJ4ckqxXBVm2cz5nxDZ6Tagncr8S55yc96rVglmwsS1v63tyJ98z3XPMZdl9Kw3HnIHBTFeiVG2egYnbn+epP87BX2tPPMwWE76okWz6vQ72TXKRPLk6iOpc8uPL9Ody65a+H08yp2Oz+EgAcJW2a1sFkj1FHndpQTDjjbko9Umf12RJixmfNx1fI8/LLjcXQl6JPjo6+sllWd51dYKmM2qMbIycyaAxGiMbI5PM6Vhj5EVZ3pgw8ujoqE5PT6NzXMtbCqtqZ2LelvMrRlJyLr8SnyUrfaYT+5hjjPgL2xzTk4EHmMbXG3n0b39/v46Pj3eSL8dwY3m1R/18Lo4/0xn5pS607cL3Wcs5XL6ZTqjTyxJUkpfnSWkrhwcJz4lkw3RLnL/w7ec4HrdEsGpZtftA8v7+/s5v1PAvx6BOk/6SXPxMvsQTdcTPApAkG8fS3Dzu/9wffJykZ/bztpJz9iOis/mdUnvnbU2GGflChvajz1wlPpLcCSC8jx/T31nV0e1Ked2XZ/Kv6SX5n/ObLgr4t+n2qTFyd7zGyMbIxsjGyPsJI6/lpRkkVgicGQZOStqz4FMbHverZSZsfaaTOAiJ0i1E8ea/80AeCA4CFY3nyVBtmRT0XcTKGnWVkquOJwNT79SpEpxXqyg79Zn0w0TgydP153ymOZKNfX7x4E7u5AnPdTMLBj4bIGLlU4sk+kNKdr4tgklA8/giy5OJiL+i7jpzffG8PrtPuw0lGyttzqPGp045hsaUbujjtAF14NUstx/lYgymCmuqjCXbcxxvO8sd8gHnx7+TL1ZVE7BwbOYKt4nPl+I+2Sb5ygxQBITJb1xOH3MNTJquRo2RjZGNkY2RjZH3Jkau0bU8w+UOJ0pJ3Nu50q7S57LxaOiqi7dKU9CncfnjfHJgVgNnVTvKpeOU8+DgYOeXsmd9HJhn5OCdEtXMOb39siw7FQ8fn07qyVbH3CE1psZL8mqs1K9qnqQUBGznPJPvVOV1HRwdHZ0lWwYe++mz29B1rjFUUfWqiPsNeaJNJRdfj5zkIF/OJ/Xi86UElmTlQnCMze/snJ6enm3R8WqPj+8g69tdSA54h4eHdXx8fGGbEvmbAe7h4eGZD83I5U5boWYLgLRIS75GfSYQuyzWZ3Knv5SVMZoA2AGfYzNu1/TXlKkxcnfexsjGSJ5vjGyMvJcwco2u5Q6XJ+W1Sf02MonOqiBMAOXj0CHHOL9Sd8emMmX0VA0ieFAuObEnTAcZ6iElG/KuMd053JF5O/oyo7oczpd0Q12q+kAA9dfIcgz10zyuVwaHbOLEapEDl+ssAZfPqe9OvngQX/Q1T7ZMPAnw3XccFOQT5DktotjXY0NtyIvLKt0SrF339GcHYcq3lmh9geBJVX7DsbhA8IWXxiGfPr/rjVVy8pESvl8kcBzJrkVdks9l98UO2/rixuXiefpv8qk1XjQOY4bnfcF22ULU7e7+wzZXzTlNmRojGyNdr42RjZGNkfcXRl7bBRcnTEqZJXb/7A7px/V5bS4GoR9zw3uFJilShmPlw9sdHx/vyJr4oTOqKpEclHwISGZJOgFkAlwec2fUA93UV6rIeXVN51z/XvHyAKLuOZbP4XOlPdDs58mFuq6qs4qpV8Coc/qWwM6TE3+7wRcBnkw0v+uEfkW+2c8/p+8c3+3gSdDjidsy0twcYyanP/xLAODzG+5/HF/H+T1VGXWcfuIgQt3y/PHx8QXdcEuK7MxFAqu6XCSk8RN4EmioW8lAnlIc8zv5TjbiAigBmgOpPvtCyeX3OdYWHE1zaoxsjGyMbIxsjLz3MXKNru0thS6cBBFTFJTt0lW2K0JCzSptpFTxYyUwGYrzegJkW47DIPOtDwogzS0DpeTJtp7APXmnxU4ai0FN/ZBHb5fmYRKs2k3mKZhmgO8Jmbe/aVcHPi4KVE3UdyXDlBRcnuTjs4Ujk1+yp/tFGssXLLS/J6bEF+fjOJp7tkhaW6BpvlnCTYnFq3scl3HAePWELMBSX/qj2oivg4ODnUUZea+6CDBJRh73ve2esJPufEGWdJsq+NQXfSXpnDrzOPRFYQJV+kOa3wGE+ddtR5l9sZwWRlu99lsKb4MaIxsjGyMbI6m7xsh7FyO3sRaD564vuMYYy+Hh4YWrXE/UfnXOednHA9jHmyV/H6vqIogkB0mOqe++n9UDhQZZ40/y+5xJ5gRUDsKp2qi/3PtOWbktYy2RUpYUvA7uOs/goGyuj1nS8+ByfvSdQZaAhJ9nAepJnrzyoc4ZyM18wduor7YzpHEcUDxevILk+vJ48SSjOS6rnvPVvGsx4bbiYiQ9GzDzd/KW8oTPmwCR4yVAE60tclLCTCCrua+ib5ePY6R4ZjvO68DLuX0eyirZ1kDXY9qByRdQor29vX4t/G1SY2RjZGNkY2RVY6TriX3uFYzcXkQ+vBdcTJTJ6WeM87i3rdoohbcVZ8lpTXFUDuee3X6v2nXaWeLQVbG34WtYE3/qr4coXZ5ZciT5rXzXn8ZVNSQlcLVzkEl2cD9xe5BmycvnZdskt6of1GeaJwWVJ2S1T74iEOE+bwILE8Ks2pmSEuX280nfKXlxfk9eOpYWKklOB9eZDZks3Qa+aEyJem3xwLHd3uTXk/Bawna9pTndLg441Bl5dJ6T/bw6dhVyPXryp2/zYfBUuXVdOVG3fJjaZfcFjI+hc8fHx33BdRvUGNkY2RjZGOlyNUau0yMVI7e+Fie662e43GmSI9FZZg5NB+JYcmRPUO4wrpzZrb+1RKDv+/v7Z7duyVd66NKTvDupJ1A3LNvSsVwP+i49+FuSnKjrNf5cHvJNfrzKpeBKCcyrK86X/l6WiKRz8enJU/0S0PC8v8rVk44n5zQm2yUwUl/tyfagTX08CVK3nFdvb2Ilj9W2FFOpspTiZeYjBFSPO+rYeZ8labVn4p3J659dNgKdVxBJs3H5vIfOeaJ2+ZI+mWCd76Qbt4+PT79OMZQWaVpQ6pjP4znEKdnAgTPps+nq1BjZGNkY2RjZGNkYeW0vzSBjfswZ8UScmKRACQxc6Q5ETBBrvPAzDTgDr1S5cF74nQAgHmaJccYjeRBvbEti9UlATF7XKAWB+ioJ8HdSWGFgok2A7fKxeuEBOnvI1EErjZvs4wHjScp/xJG8pPY+lmzpvyMiIODzC25/D2T6kCeA2cLB+6gt+3t1iecTGItP+lDa9uE6Ej/uo7RhAhy2TWBMXdPfEsAn/xBfjCH3D++fZKHfz5Ir+U68uL1PT0/PfF6vEKZs6pP80ytuCdTYnjkugU/yhbVzTVejxsjGyMbIxkjy0xh5b2LkzAerqvIL82+TZiCiyf0H6lxJsyShSggF9sCkQt2xLuN5Nr8cbe2fg4B4S8f11x2MDjsLbE82JB5X4B8dHZ2NyR+QVPKaAUqal2NrTK+KJjD0JOjyiWTLNd2wD48rMFgFoQxuQ+qAFQ/X+9HR0Vl7VXEJGDN7UBaSb1eh7zOhkVKyZnvplDL5Z/kDwZhvjtI8M39j4jk+Pj5LdkmnapNiaSa7yO0tvSd9uG6SX8ySHWMzLV5SPy5sNHaKK18AesVTfTmPeE45g+RAxtyT8ogvKFxvWhyoDRdSvthI/ZvujBojGyM5TmPkOTVGNkaq772Okddyh0uOR8E9Uc4SZLri1N9U3dN5D2yvvM0ClFfdbMOkRsDzJOyVF/LlQZEqGNTXwcFBPfTQQzu/z8CxfO6UeHic7VKFYVY18qTCSoDG9C0HHtSuNxETHXXFz3RsD5QEQmpLn0t2pIyz19yyv/ucL26cdyZcb5uAxxOt+usvdU19emWG47idaEMCC2VyWZikUszxmLdx23Fs+q/bdpYjfKHmFVG3jdvNda3P3HI1i3/3saSfpPcxdn/XyPn0udbIX4PrFW5f/FBvXOAl/dDn9GxMysvyHS4irwImTXNqjGyMbIxsjGyMvL8x8lrucDkz+szjrgAPgJQwSFKIO57O+XfywL9ugORAPo635/g+h8sxk0v7YTkfr5jpID63O5AnMe1nTuPMdOvnmTCY9LyP8+R9nFefd/bZbU15xJ+Szuw3P9Q/VVI4p9tO/bQ9hO1Y/VPgul6qNknBbZn8gv7BSjf/Uc6kr7WK8SxGkh0o+yyW04IiHfc48wowE/SyLDvVQdfPwcHBBbBw3hU7s4qo5ubYPgZlTv7v4My5U1zSn9ifff2zyAHzsvghz2qfPnMBlfIN23G8tChsun1qjGyM9D6NkY2RjZH3Dkau0V1fcNEhHDBcEBmFxz0Rsa0ndc7nBmQCUB83CIN/FtguR0qEKTns7e3t3HpMVRC29d/JcB1IHncY6tvH5oOOyfiSf3Zr2HWt7+l1rdQTg5j9UiJLTu5j+puxEph7JYXjUPczOzC5aXzZhAGmtpKPv4FxOwuRGYBxTh3j7faUoNmPbdnOkzj1m3zZ+fctCzo/k5HVK/LHeZO+vPqZkpq2FMwWZrPj1DM/e5VxllCdDwKDxmE+4eKFdqNtE+iKfPHkPkNd6vwMYBKAJHl8AcWxOecsbzRdTo2RG2qMbIxsjGyMvJ8x8tILrjHGu48xnnJJm81gKxWXyxJYUgydkQl05pj+eeZgPO4Gc2NQ2c7vzLHJfwKTlLAIiJf1pxNUXbyy9irWmnycKwHsZWO6g/rYfJ5AFTf6gn933XBxIFk5v1dEnIfZQiAlOspDnn2clBw8IbPv/v7+heTsSSf5mR9nQmXS9qTsScLlXvNbT24kBx7nlbfpPTm5bRg/TNJpsUkeCYhpbPfbFEMz3jS/L1YJdP77PdKB79/nZ/Z3G0lejeMLwWQz9z2X2T9LHvqq+5/GIx8zoGo6p6vg47ZdVTVG+tiNkY2RVY2RyTaNkfceRl76DNeyLD92WRu03fmcHNdBJQWiPtMICUg4Hx+8pLLJj7/q8jJek1wcS/PRWDSGy8mxXFZv5xUG58OdggGwluD53ZOUOzT1PrtV7ElY5GCb2s+SGudw21BGBmqq0iVwcOBy/hLN/I56mumCIOjA6HPws8Z2XYh4bs3XZvbhOLOYSN/dN8iLHxNo0HccgHyuZDMd95hIdve/szhLcqnNmn9yPvJwenr+FiUS+Ve/xFvihcept5k8CazofzNwozxpvLU573e6HXzctt/53Bi5KyfHaoxsjPQ5+LkxsjGSxx8JGPmwPMPFREWBnDzBsF0CELZJSTIZmv2593e2bYJj6rPfFuUc7JvAz+XxOVIFhOeS4SWrzns1LCWXpN/kzEzadN4UBGuB4Trl9gRPFl4l8qrWTN+s/Lls/Ot6c3+aAWgKagcWyqXvtI/rlvL7NqOZb5P35AueWP3cLH4cuL0vbbHGm9vEq7Ts4/rSeY93EcGIuqQ9+M/9NvkX+UhjsY94S+PqMxeRSd/pzkPKBzO/9Sqb93GbzPLBTE8c0+dNVeGmO6fGyMZIp8bIXd02RjZGPpIxco2u5S2FYlQM8NacztE5maBFnhR97JmwyQApsTFhpLkSv57I9Z0ONqMEjJxLfZlQ/Ao7yU++3KFnzql+DO6ZjtbkSAknJWz/uwbay3L+dpyktxmvHvCup1QlSmOlBY3zLkq38pOe9ZeJkH18zqTbJL/s7K/XdWBLx5jY5H/+VirKmxYb5DUtClzXLntK6smv0/wpCft4nMMTbPIR12+KrWQTArf7h1dcva/rxs9Rf+5Liadko1nbWT9W8D1PpJzQdGfUGLlLjZGNkY2RjZHe13Xj56i/RxpGXssdrhQgUjoTuBJwAgD/7NWP1IbjMLmvBfjMWRLAsS8/J74TuKT25M+NRSf1vmmeNX3OkuOMN4LpbCGgfkm3ntxSckh2STrwdq4/VoAY1Jw/BTSrKORLY+j1qs6bgn9Nn2v6mJHr2f85764L8bXms0k3HCf5iSdc10caexYTDg6XjeF8ypZ8DbDnF+pitmCgnhwQZsnfdaHvXqn1ahnbXZbUHRx9Lo3pNp75i+dbl8s/S5bUh1tAEnA3XZ0aIxsjGyPn+phRY+TFMRojH7kYeW0omhKO/vHHzDyx+2fRWnJec6hEM0f29hxT51KFjoanYzIRpVvX6sO2KaHQWT3wkp5cJzye2s3Go8wKFAcvPhDJ40lm119KPN4m8Z4SXkoAngxTQkzBN5vbwT0l+TQ37eigxPHTbe1ZTNDfdC69GSslJRH3T3u1V2OmpKLE4jJ4Wx2jvvxzkt115/7AOGCVyftyDPfHpPc121N29yfqyuV2mTg2eeUWrKSHtfHSIsWribM8cJnv8hj9d23upqtRY2RjZGNkY2Rj5K5MHPtewMg1uusLLk2ajM3AWktm7JNusXpQsa8L60pPylAy9wRBJ2RC4K9Oc440vhsk/fNfzua5lIw4J+cjOHmSS5Wtte0SDhocP+kv8V11EQB1zl/Nmvonn6FNFIDiK/0qudvHbx17wnR/ok45rm9RcBs58Ih86xB1Sr/yBYZIeqP9HZDdZ10PGlsPsftihX5M0HDg9rE1b6qa0R+og+TrrkPJItut6XiW2BPYyIcSuKVFQsotLqfriG19ET2rqjm/tAPP8zv16XHF6rbPzT4O4q4/+sEslzZdTo2RjZGNkY2RjZH3B0au0bXc4RIjclY6gQtBBp05d2SC1MxodF4PYneYdG7mHBqTtwx1Tj/25/0JEskR3GiJL5eFBve/Xhlz+Rw8PMnybwJB34rhOnbedCzZzZOW9/U+s+BiIpa/8W1YPn7yM87t56QT6sdB1f1H/eQT/qavo6OjC/Zz4Heg0vnj4+Od7+zvQOuLHo1Nnt3mnJNbEhxQPJZPT0/r1q1bceGgPvv7+3V4eLjDD5O524E+4GCuY5yffRMoc5HK8/wrvn0RS53RPvTFmzdvXsgdrj+Xze3v8eGvR55t5XA/SDFMoPZ+Dvbej6+q9opp0+1TY2RjJI81RjZGSg+NkfcPRt71SzNmQvBzEsaTUkr+vG3vCYhOoYD3/ing6CA06Fqid8ejAycHcUfSfMkxvK0fT3pLwKIENnM6yeHfKUOaz+Uiv5IrVVSTgzq4iQc6tCdZ/161G2CSiTzwvNvKdUl5We3gAmlmZ6/yyg4eePQ/90fN7UDPpMJqjAMr+fLxaEP3Rbe9gITklfhUWffku7e3d7bP3yuXx8fHO5Vmjp1s5lViP05wlL5OT093Xn/t8rCvJ3TJ5LkoxdXBwUEdHR3t2Jjt/XhaKLA9Fx6UW7qk7Er0XuX0eeg/iafE4yyXp5hpuho1RjZGNkY2Rqp9Y+S9jZFrdNcXXAwcVu3EwNrVpwOPB4AHWNVuQFXt/lo3nWMtqc6Up+9ySoKOjOpjM7BSNc0rac6L+Jk59wy4qnarROkK25NUSggJpDwROs2cl32SD5BntfMKCX2JCc+Tjcaqqp1KquwwA33Xz8nJSR0cHFxYCHgCdfJEx2PHx8dnx09OTnaqi5rT/T8lOepFOmACOTg4qP39/Z0fFHSefBzqheBD4lwpBhOY8zhlOT4+3rE5ZXBZnVd9Z1slU88rAhCXg/z4YsETPOdS+1kSPT4+3skJItepZE9VYI+3FEt8O5nrOOU06tYXCe7baSzqyP3NF4lNV6PGyMbIpN/GyMbIxsh7DyNn+qi6ptfCJydOzuLOx75Vu4qjMdxx2IeO4r9wrb7u5ExYa7zqOx+kJLCwXZItJWId55wpmJ1SkvH2BD8mjDF2Hx71fuQ7JVEHO8pGORwMfXzqSue9oid+Dw4O6vT0/M07HrRJnyL19aRK2dKCQG0EMAxMyeH6JoB5O/HiCXG2eHDAVCJ2/WjBw8UP3x6VfMtt7wtngrAqkO5j7iP6q0rW4eHhhfjl4s+TuNt8zaZ+LC0YXUfig3r1pJgWCzNf5jGPh9SGtqaeZ/K4HzIWuABJCwWvlnOx4Hz6oj/p0n088dh0dWqMbIxsjGyMbIy89zFyja5lS6Enh+QEIt7q1TkKQ6dm8PMqVIpg1cb7eMIWf8uyXEgW7uS6alUgcK6k5BRoTpSLx6jHGc0CQImPekxjU8d0pAR81MdMljSP28d50lhe+UsLhBS0M9CVf3jCZhLjcS0w3GfpC7IVKxdecXKe5CMaX3rxCkkCcPJDPREUyZP+qWJFPc4WTkwulEE8u27Fn1e+UuIXH9Q1+6ZE53Zn/Ign6legSh36oscXJjMgJS/8y3FmCyoCtvSWKqPkdea/VbsLVc5DfaSqKMdnW5HzlRbA9NMEZmtg2XR1aoxsjKxqjGyMbIy8HzByja5lS6E7WEoiqU/VbjLyvkw+7hypGuDJVpU7BZ+f41zJET34RQSGWUIUJbCSUfkAJpMNKzMEWZe9Klc8Ka9kY7J3QE8JguTJl0lXOlPw387Y7vypD4PYq1tMiBwvBZsDmMbyh3c5fgpiBy7Oy6qXjjFZJxDzWKH/sVInvVTVWTVnTUaOTR/yRZP057pT7Lifu8+77XzRxIVYSs4pEaak5/ZL7ZMdZxVIr3Sm+E7ypVznldDZOOwvu/oihXN5zFG3vmifLdCkf9fN7M6Fj8G2nKfp6tQY2RjZGNkY2Rh5ke5FjFyja7nDxb/87AaqOld+qi6wvx+XIql0oIjNyQAAIABJREFUJzqIB0DV7p5nv73sCc8TtMvgTkqnIGB5AHM+Jh2Ny+BJVQFP2EkPVRUrO+KJY/O4kiH33aZEPEtKLoe3pZ5mAaNjbju9iYigxblImtffXkR7Or9e3XCbM7H6P/EuHVLHlE/VuOQLDHzqXvPrYdoxxs7+d38I1vWfFmPuNx6LWujwTUBu57RI8KTMBZPrfsaL+3NKnnt7ezv78rmVgP08mafFCG3Eth7PfBYi5YvkY7PPCQi5uEm5h7pMgMr2tIM+M49KDp33cXysNFfT1akxsjGyMbIxUv0aI+9tjFy7+LqWZ7gSE7xqrzoP8JQw1I+K1XcfZ+aEnpQ8wTPgFYTsx8+uMB+Hyk1X354YnT/ONwswr2B5X+rSKxF0Rtcvv3v1yMfjeY7JhKfjqdKV5KRtHKSSPVnt4NiemD34Z0CusQlSDtwkJjOXg7zrn8YTEVyoQ98OoLl8QVJ1vhXFq5QOVJo/+Z+3d/LqsAOJ+if/T5Uu54Gyuq8k29PmvqhyX+JxLmLIDyn1d13pe3r9rPPsOYfnKavzSf91HTl/qthyIU79p7sPyb/Iiy80yD/Hc5003R01RjZGrsnZGNkY2Rj5yMXINbq2Cy46lQOFM1y1+2YSEpMDBXZFcB6Cj88tYh9Pojyuvgx2Vv0uU3LiLR13WfTdjeoAzCqb9OUO5kmDvLgdvH3i0/lL8nrF0vXsCYhBmhxfY3HuND/lTIuH1I9A5YuB5D+Shwsb9wWd51z6K+Bj9c154RjJPj62V7W8v4N6shvfNKY2DkIpjrnQSePO/Puqi6Pkg55k+fpcB1/q2nUxS7Scl/MwZrSIcn/SeJoz5b00n9q434r4Gmsuksgrc9/MDiTGlMb3rVQzXdwOsDRdpMbIi/M0RjZGVjVGJlm9b2PkIx8jr+UZLp/MkwiPiXjrT+ROSkdhwDh54NBAnMOdk4HhCWVt/OTEHvwOAD4er7h9m4LrwStLDigOJOzPwEoyqM3MlnRwBy3qjPL7w8G89S7n9yRIchBhhVB8c/uAv6WL4yZA8gSabKOxqCf/HY6U/FMwqqKXqiUkxoz7pffVvARlr3KLB/4mheuae8ddb57cU4WIOk/6pM+4Xznw0lf5cLVs7briZ9enf14DPbedz8WY0TaNGRCleE88UA/p910Sn75gczk9j+l8AjfPGYk3fp6BVtPl1BjZGNkY2RjZGHl/YOQaXTt6KmATEx7Y/MzEyKtWvnklEUGH4zuw+LweKGqXzs9ALJHasnowM3ACWyUIjsEELP14UvKH/jxRryVs8k49OWCxOpEcTu1cLvHjgEPe00OLSQ6nlDTII+3hfLjPUR6CFuehL7u/uc95HCSZ5ONpQcM+WkD4AosAQv/14HefdN1xm4XrXPOxikn5PcbcPrPYpV/Tn8k7fV1zCWzYn/YcY5xtH+Hiwuembjz+yTtl8C1gtJ8DrI+TFnVuGwellCN8DOczxb4fS4usGc1ySNOdUWNkY2RjZGNk0otTY+RF2zySMfJan+FKimRgJIUTOLxi4InRK1Gcl87ggc3+bJ8SF/+mc5QryTtrOzOIO6+3dxk5hssj4sOSnhBYoZjJr8+0mT8wS517NSxVTmkD3+7hcyfbiLzS4cl1pjPnI/EonTooJF7cl1M1RHP7w6SeVFJVzKvdrBrRHuQ7VTEdKD3x+rYVJh3/zH7+mQs69w/qKIEL/VJyut9yPuqQn+nzrLCRH7VX28PDwzMgTQnVfWgNMK7y2ft4FT0Buff3aqjzSDmUX/VbNxyLb4CjfSTbzNZNd06NkfO2jZGNkY2RjZH3MkZeywUXHdO3njBY2Dbd/vWEr79MWjrmSvcgdGMwOOmcbkApODkWg1XjXAYoHpQ8R/nUjv88QaX+rld3Vh5328wogYbLlWRkHw/4xA8TywxU3bmZMBwgZwCVAsEXHImHBGqpbQIkHWcCJfDOtqisAXzV7kJBWwrcnqzg+VYVxlgi8UtQcF7c1rN9224/f3tRSorehp+5mPR5HHxTjFD/6uMLIfeVxIfnt/R3Rq5X2cIXFOlOxFUWgEkOX3zyWAIjX0CwfdOdUWNkY2RjZGNkY+T9jZHXfocrKTcJl5KflOiGIqXkNBM0JSXOn273emXFDe88MZklXtWGPPt3BprGSwmfAUP+vQLg1Q7nidWLBBaeZC9LrP55BkDpdrF4cb1IFl9oOND5a0AZ6L5AEZ8p8B30mfCZwJIuHPxnicb1s5Z400Ip+bD49PNsJ7+hr1I28UT9zpKWx5T7alqAeKxfFdxpf771awZyrt80h+sj+d0auDggeZ5byx+ktPj0vrM4Sjy6b6Y7FYkXX6Snha3aNd09NUZe5FVtGiMbI10/jZG74zdGPrIx8touuNwRGNhkTLd1KbD6e+VN48yAJzk9gYIB7v2dV+dbPK05JZNHAlLniTrguP49JZFZ4iE/5JdB47KnYF9z+FkSE+/SgetIx91hRUzSs+cQvMLjweL8XyUpu4wEqFniW0v8Sac8R534gonycYxUqXFgnAW4y+8Vaso7Gyv5iycWyTFbLHmF3nVJnhwY0/wOYB4fM549Qc+qYAng+TktONyfEy9rwCo98hjnSAtLzuFx7jlP416WJ2ffU15oujNqjGyMdB3peGNk7bRpjGyMvBcx8touuOgUvCWYFJkcwJOjt+d3H4/9FCzce5kczQEhzT/jwR3BX03pQZr6zoDTkyIDODkE2yRnTLpiUJHPJLsDFNu4/WZJluM7L/SVtCDx+ZJPpQBd44tyscKX+ngC0S12Bzn/MceZv1Sd7wl3m+jvWn8HgttJ1Elv6U1oM59fAzOXcXY++WhKWMkX6EupajjzF+dlpj8HxWR/lzXpLsWXz8uxZ0C+BpDOx1rMzwBy7djs/O2AS9MuNUY2Rl4JI8eo4w/9tF2elqrTvW3fRYe2MtSoZTmtCrY+s+HYq9Pl9Kz9Ge+1HW9s+dueWyrbN+HZZoht3xFyd23mORt/nB/TfKfL6c6xpZY63p7TH/HJMWtZtofP+54uF18QQT7LU9io6EPL6WmNsbejizU6+Jkfrr2f/jeNkSZrY+Q5XfsdLk2u777vkkDii1ZRupWrMZzSMQcGOcravtxk5OSclMX5TcnewcMX1l7d9HHd2ejQY5wvmF129RO4p/ETgHrwpaDy8ZclPxfgCcCP8/Z82jubeE4BPQtMTxqzIOV5Xnh59YltXV8k+rx4mv0go4+R9Mx42dvbO3srlI8xq+ZxoecxWFVnY7KiSH6pG09atCNlSQujpEdv59thJJf7oc9FPXA+X9i6HOTF7XFZkk3gkHSQFnKe//wOwGwBl/hNPk7ZlSf0UPBVwJsyeS5oujNqjGyMvBJGVtXJMz51aoc7oZPLmzTdJY3/43Nr/NQPNUaC7keMXKNr25ifFpLucL6P1heBVRdfmcpjM2HoXHJs3n5Pzswkv+YInnCTwdPtztTHF7nUkwcU+3NfvMbgMXci7+vnrjJHugCaLbhnIOtzJjunvi6b2yHJQ1lSAMxk8jYJAAmYLhtfV+s+u5ZIki95hZYxw/6MG1LSJWVRXKRX50pWJRzfwuMLBo+BmWyJ57To4D/5xmwLjtvBdUm+mE+cl7XYph65PYSUchR1zUXmzDYJsDyfiAfqxfOs+8MMMMiL64Pz0yck04zfpqtRY2Rj5O1gZNMjixojGyMvi927vsPFRVtKtElhUgiZ5t0OtmOicwVyPl6l82FXvyqeKYUVQ0/4DhIzWfl5lnBdJy5Xkik5yRj518zp0NIFj7kdfFy1IwCPcfHNRz6Oyyr+OI/sqLcCJZtSbspJW3oQ0/Zqm/a7MyHSlmkRkHTkxDFm+uRbkiiX80253bbUAXXtx9Jvz7AvF1RMsvrRSS76aB/vQxn9GGOGcye7uu64vZHjpP3XabHHcZPd1N77O18+NvWW5nPdSg8zgEvxwq2qqS9lSDFDPaovXx+cqoMpdyaw8zzSdHvUGNkYeVsYebpUHR9Vffqfq3r046s+9ZvqAv3wd1R9z9dUfcBHV73n0y+ef0PQQ79V9Zl/sepNn1L1iV/7O8PDGj30+qrP/KDN5zf/vVV/68uq/tOPV33D83bbfdCzq97lT83H+ZT3r1qWqhsPVH3WP636r6+o+ry/sjn39u9W9Zc/46xpY2Rj5BpO3vUFV1okixFfkM62pohRH4N91xbFnMcX5TQ0xxHPPkaSy/vTAQmCBCIHHq+WufwJ9FyfdAoF38HBwc55OqfOpe0cM0Bj9TP9gJ22myWnmlUUq85/SV7POqXFgweixuDcp6enF5IOgSsFIn2ASY6y++9z8KIjBRn1PQN86kXHNebMDrNkxQTIX5hfluXMzkyE1BcXbrz4Ojg42LnYYnXI+fAFlPOfkq/sIXv7ooQJbZaUXd+u07TATG0ku2Tzi8okq/OlMZkLHCx9bo7p5DHAcf13Vmb8+XjyhVk//mUckOgvau/2aro6NUY2RlKfl2LkwX4dLadVL/qeqic85cIYVbVZ6D/uSVVv+Tb5/BuCTo43PL7FW//O8TCj46OqZ/+pqpf+XNUnfHXV8z+i6vS06j2etuH5vf9C1aMeU/V9X1f1J//ifJy/8yerfurfVH3RD1Z9/PtU/d2nby7cXvqzVR/8CVUv+t6qn/iXVb/4H6sq5+bGyMZI0bU8w8Uqkyu/ave3Rxx8EoOesHWMgnHuqvPnThKocQ62T9vOtCgmL+48Xnl0OWbgJUpX0un2anJyHiOw+I/h8TMX+QQrt5PGoX1E/vsx1JUHdrLzwcHBTgWAwHSZs/J2tV9geRLTXR5/Zor2k5zJBu4LM//zQKc+9FcXRloscE4uDmQvzkP9Ofmig3OuJXXZzuNKF17uC1wAaWwuMMQ3K22MCSYkze8JnHP6hbafd7t4bHobt2eqLCoPUK7ZwjKR+77nFM6dFlqzSl3yO/efdLdDY56cnFyQifp1sJyBu8eF3wFouho1RjZGzmx0ASMLMv3Gq6qe+U5VT327qs/89qr/5wer/re/fX7+GX+36ud/qurHvm9XgV/6b6s+8X03d3lGVS1V9eCbVH35j27Ov+w/be5OVVX9sfev+t1vX/Utn787xqd8Y9XbvvPm89/641Wvfc1mrK/56c3F1l//o5tzr/zlDY+//12qnvP1Vf/uu6q+ZvsM2jOeU/ULL6760e/ZHftLfqjqk59W9du/VTVG1Vf+RNWz/nDVY9+06ot/qOqX/3PVc//Cefs//SFVH/KcujItS9XPvmhzUfp277q5G/cL/6HqWc+v+tqfrnrsk6q++6suH+dnfmTz9x3+6OaC7ed+bPP913+l6tu+oOo1r6x68u+q+vBPr/ED33jhIryqMfJ+w8g1XVwLelJwCisGtKgmQIhJOgV/XV0K8M+z81W7iwG/xar5uI3AE6y2frlSVX3gHA6emtNBcub4M1B0PfKY91c/BorGPTw83BmHdzZ8fHdugrknD7WhLpON1F/tOb/4VkUv2ZcB7/7k+uQx+piDLeehTalPgfDh4eHOWORBixZPfGOc/1aMEhXJFyX+2f1X53VRpRdbHB4e7iTCqs2zVynQ6eunp6d1dHRUVXUhqe/v79fBwcHZXLQxfcP9n4sMyux5IFXmNSbtzEVIqgaTh2RPteE5T5bus4xZ6tztRn79u8ej5ybOxb5r7Umek7jgJY+yN+Xw3OA8uU45ZmrfdPvUGNkY6Ta6DCOrarOl8OO+tOpHvrPqeR9c9Qfevepz/s+qP/YBVS958eaC7IM/fnOh8pIXV33M51e99lVVf+UPVj3n6zZ3Yl7yM1Wf9e2bi4WPfreqV7y06tnvs9ki9+f/ZtULv3RzMfLO770Z42nPrHrcEzcXRL/0/1Z93HtVvfhHqp77gs0do2f8vqr9g6pPf8GGxye8+Yanj/rcqp/8V1Wf98zNRc4f+hNVX/o3q97ibaoe/6TN2Pr3V9+56pP+cdXLfm7zfVk2f1/6s1W/9rLNnaWXvLjq8U/ejP30j9rM9Q2fVfW/vnXVv//BCzrfoYPDqn/8M1W/8erNxaLoTR5b9TbvVPUT/6LqhV9S9eHPrXqvP78+ltOT3qrqG36u6qOev5Hzmc+retJbXci3jZG73+8XjFyjaytXMmFVnVfefUHNz0xuPO6A4U7J4yJPXE50Uu/v89Px04WC+qTkTl2IH78w8KC5zLlJHjBVdXYB4ORgSYDwcwpo8Uu+vT3BNS0evCLhQcRAkDy6ANPYnkykM1UlFDTUhQM45VKA8cLLE5l49Isp58ft7UmP7VndYozogkcyu8/TT5w32YmLH+mPcvGv+5jG5HNf9Dmfm7J7xdovzOQDbOe6Skk9Hee47keuUz/vsSX7eH/PR/Qhty0pxSLnmeUN12fSAY+57KndzB5smwoA1G0al3MnfTZdnRojGyOvhJEUaW9/cwfl+FbVq3616oE32Wzhe+wTz9s8/s2qbj64+fykt6raO9hctLzZUzd3j6o2zzAtS9UrXra5O/WqX6m6cXOzZfH1v7k59ujHb8d78ma8V7+86uhW1a/9UtVyWvWU37M5/6u/uBn3zbffDw42PD3xLare6b2qPuIzq/71t1R9z9duLnhuPlh181FVf/+7qp769lVf8X9X/fbrqp7y1Av2qKqqJ75V1ad+8+bzi3+46mPfverbvnDz/YOeXfWPfqzqf/hjua9ojKrf+weqXvjyqr/3zy+ef91vVP3mazYXqg8+en0sp4PDqlu/XfUFz9pciH7kO1Z99afsxGpj5Pn4jZHndO2/w8WFnC8OSek3QJJyfAGoz2xHRbmB3Jlp/DVHYeDwfLp16IbyIPIFte/vJj8z53TyRXQKMtmC+9RpB8qhisDR0dEOSMp2HnTen7xwC53O37hxY6eK686u89QrfSPZl+fdN5hkZreZvdJK25EP8pBs73fP9Ka/qt07SfzHZxLctyiDkk9VnVVkdWfQ/c+rWqzsyvZqc3x8vPMsWEo0HJdbDj1+bty4sZPoOIZXvVNVjP7A78wfmi9tBU18yZ+pD7e1b//wqh/jlluOkv+lZ/PUhnq4bOGcdOPysQ+rp6kffSzN6T7nOlB7r1I23T41RjZGUr4ZRp4+EkPsF15c9Te2F0EnR1Xv95FVj3ps1Qv+wXmbxzxhc2fssU+sGiv229/fXLh9z3+r+rHv39zZ+9bP31wYPePvbp69uoyOb1V9wJM2d+q+0O6G/eC3Vn3lJ2+2KH7Ax9y+rK94WdXnfnjVc7+16tu/sOpPfFDVz/9ULW/y6MbIxshY2Dkba3rmNkjKpJLpRFz8+ULXr+ZdaCqHbZmoWbFzUHAw0Tg6547pIEDlLcvFZ1jcOXy7gf7yH43DB/icV5+DPJM3r6akq/xULeB8rADSOTmXFt8JVNyGaQyNrd+REtF+HpAuD53ak0NVnW2L84B3XfLCx4PLFxwir15IJw60XlmV39AfPDDVhomMbalH/eVF1snJSfx9LsmopHN0dBSrt4kH15Xzpba3bt2K+9Y9VkmS33VA/9Fcrhf6Em0280vazWM/LfycL47l/2Z64hz+cC/9i7x6vnNbekwty/kdUrZPVTa/GOdf8jrjI1Vim65OjZGNkVfFyGVlwVb/7jur/syNqq/4xM33L/vbVf/6W8/Pf9Qf2tzd+qe/srkbthm86mmP2VywfNsvbbb4/e8/vnk+6TP+l6o/86FVH43ntz73L1f9+PdXffG/qXrrd6j6pv+8uUj6gDfd3An7/tdv2j34mM14L39J1bPeper3vdPmGa5bv131nV+5uUi6jL7vdVVLVT1te5fpNa/cyPe+D1Z94JOqPucZm+NPf1bVX/qkqi9/9ub8i753fdz9w6pv+cXN3bgPe7uqt37Hqi/615tzJ0dVRw9VHd6sOrxx3ufv/MnN2L/6i+fHvvc3N3ccn/bojT6//b9Uvdnvrvqyf1f1wKM2L8z4gr9WVbt3iBsj71+MXKPhk94u7e3tLXp+ZG9vb5psxVhamBJEBAwuFMfwcTWvK4CUXljgbX0uOp74Iwhqbg8Y/6624iEBX7p4mI23LMuFasLMKf2Cgo4v/pUYtChgsLqePcj84oT8C4B8kcHAZYVv5szLsnmjnp5j8kAmX15dTW15oeUXe6yQJP0mO6taeXh4eOEhTK+eJr7TXMme9PMbN26cPY9FOysW6Wdc0Om7+NEcrHT5Vk0lLfqB5FSi5++RccHkPkRiHHiskQ/GnPPu/uxtkt1SwvYFIPXkfj/zB9HMV/Q35USX0/tzAe7Ax2NruYAyuTzkMR2jTEdHRz+5LMu7XhC8KVJjZGPk7WDkyVJ19H2/tblQ2hzMn88mqKrn/M+bi6Sv+emqt37HTbuqzQXE6UnVDxxtjmlRuSwYc/vfP35u1Td99uZlGe/zIZu7UBqHMo1xfpzjTPkbtbmq2p4b+DvG7tgzGrXhZzndDgUeZuS8uew+hviYHecYPv6oOnjB8+vGN3/Otllj5P2KkdvCTDTotW0pZIVHzHBB50IlBVxmJG/LeZKzaO5kCAJburJVMGhB7TL4lgbnW+05d5rP5XIi4OpfuihQW1/Aq+LmthARSJgklBw84D1QEw/Op8/p+p4FpOY7PDzcufPE56Jc97QrK46sBM0CTDyPMc5eUOFz+gWcAHhvb+/sooTfdceNOqadXHavROsCU9sUJeNDDz101scTjD/bpTb+Q6ca0+1O/XEOr5oxFhgrrKalv253T6Yif7uXX0yzb4qtZC9997cuug9TJ7JVujj2vsmmbOPxk6pia77tdvHtrB4DfswXg2netOB20G66fWqMbIzUfGsYubn+sUX/7HNV1ed82OZlFfsHVX/tD1d9889vnrd6/ydsJ9yvevrjqr7vN3fH4Dgv/JKqFzx/M8Y/eObmLX7v/n5U8gW9x3ESf+dSnZ9jm9nYcb69qqumoMTb2vEZH1eVfW+/lr2DqlF1OkbV2Ob2cVx7Y9Te1l+Oj49r7KM4vLcUvXrU2F6bLjW2dVC1c7zY29/eadqOv7csmzukY1TtbS8Eauuby1JjOW2MfJgxco3u+g7XGGM5PDw8Yzqc30n2M7DwhfDawioBEft4P5Icyd9SwvNOHD/d5uR8lyk9GUbj0tFZBfXKo+aRIxEMpG9WDNkmJXo6tTuqHFCLc/5uE0FS310/qbqpMQiKiSSjXpfLu1sE9VkVQ3+TzxEcKTftwAVI8q0E9K6TlCyUxKgb+isvmtL2S/qGLwyks52kHCpeTGYug+uA/jR7zk5jzBKp2yHpi+2r6uyOZprL284WiilnUK4ZeVJ2v/ZFLWOXoJNyhuZOdxX1L1XwWGigv6ZcRzuqD3NAyrWuY7+TovO3bt3qO1y3QY2RjZG3hZF7e/X678bFUVPTNdHBV31S7f+zL22MrIcPI7dbGB+eO1wuPI9LaF+4UWne3hepHNf7pkWa33ZVOxqUlawZuPlCjsk8LRQT6FWdG0QVAb9oSJU87sf1Rbk7KOU9OjqqGzdu1MHBQd26desCT6J094Myux0FbjzvD3gncKWu0yLBb0NT39y3rwqjPu/t7V24+yE+6Wv865+p09kWEM3LbXokXySlhOX+oXjwB0jZxoOYCwza339HRhembu9094q64rNfvKDjAkl86OFf34YjmdPD4/RX+sasCi2biD//nbCZ3i47n+y1Rs4X77AxhqcLJ+jF8xp9Ly14XWfJj8kD27iuuRVqTW4WIBS7Kcc23R41RjZGsu+lGKm3ZixL1WtffWG+s3ntVs/ufZI6+/mtq9BV2nobfRcX/Lzb6qpkI6wNvJz9V1XjguzO77hwxhu5ZBt9jtBbmj4/t1zQwxsVLbX5CYAH36SW092ihKgx8g2Dkde2pVAkYVOVi7fueWtf/SSQHvL3BagvaF1AGlf/Dg4O4laomWPQOWZ3DDgXQdLPc4xl2X0wkPLy4kHHEn/sJx3RgV2n0h2f9aFTUQ53VjoV+yYg1zzkL/Gs77qY8gUAx0gAd3p6erbg98AiOLqd04Udx9SFgj4zkGc/ckm51I7+R/3xmMbz5wv82QVd9GgBQt50XMf8rYiMGz3ozaRSVWdbJqnvBP6ygVd7nd9ZH/qXvvOCzv2EvqW/3NLgvuHH/M7drB9tQ14oC2VIdwTF22VvXyKvJG05TdtcpSe/KE15jzwzDtbaej/ZlPqRPunLfdF199QYeX6eYzRGbnWyt+Xt9a+tB//S776Q9yWH46jePMvfaeQFfMpRHM8v9t1WfmfCL3STr3Icx8N0jHn7djGS5+TXaUs+5XOM5Dl/dMHjhrL5hQr1RL917PS1CfGG38kDY4U2SHyd8fuBH1snf+NLdo41Rr7hMfJaLrg4uSdRMuuM+UJJ3/XyAU/Svtjz+R0QWK3nlSiTjfgT8RkU3mZMbUV0sJSsXE7XiTu/VwHc8T3ZiUfqiHO6bjyx+jk6jS+O02LXkwLlFqXqLgM/BdIY42x+PcfFJOXJW8DMwEjVkZQ4HJQI8rOk4/ojMGvhVLUBDgE/jwk03VYM6mXZbFERmJ6entatW7fO9OZ80xas5mhebm1yHbgf8a1c9FfqNPk7+3sl3oHW558lbbaZgUoCRI8VzwEpd5FH8cFq+VWTtN9J0DGNq22yKbacJz9HWWfFBC5S5UfpItt1SaI/Jzs3XY0aIxsjr4yRp7t2a4xsjOT8d4KRPm9j5IYeDoxcw8mH5Q6XBPTJkwN5whHzM5pdzTKpucKrLiqJQeeGEv+s6Pj52RU7P7vRE8ixHSsBKegcBD0gWPnhOExUbOfnNLYSORMpkxb50zisOLpePQG7A3vydZ3zWR5PvOSBY7h8budkd7ajbj2QUuALBMbYbIF74IEHzsbTGxbVxsFFMno1zWOBOhegHB0dnb3qXYmeiwVPpAIwycRKmgOjqoP6y7loD/pESjzUUYpbkceE54rkV37nbWZ7XzBRxx4nbO+xO+vjC0+ku69/AAAgAElEQVQCottuVr30efWd47A/ZfDvugviVWj1T4tjl9X58vZNd0aNkY2Rl2GkU2NkY2TVnWPkSfCpxsiHByPX6K4vuDQJEwCFcod1B/Ereo2VBEsOlZTtvGluApzzq/Z0VBotAVwCKJ5zuQmWDgisSnmfBI6cw/W+t7d3tqdcSUbj+auBUxCx4pKSgAdzqrA6WHu7VN1Nr+f1Sp2/Wc/14qCncSgL5+RcmkO6c9B1P5Oe9fyUwOHmzZtnn9VGcghYpGMtWPTd3/aVZBtj1IMPPnjBf1ipJu9rFVh/OJ52pEzS59HR0Vk18fT0dOe7qkN88DTpmuQ+Qh/yRVnydX52u1M3M3/wsXjMFzWMI9nf50u5yGVPoOWLZJHHIiktNGa+yr5cOLhO0iKDczkPTZdTY+Q5NUZeHSNJjZGNkXeLkZsDF5+zkj4aI68HI9foWl6aQSY92biyXeFuGJJXsjhf6ucKYPB4EtZfnqMzM+kzuDknZVlzVg+Kmf7W9Mj2qfLIfrxNzXEcuBN4UibKxmoQdZbAiHrSX38OZ2ZT6kjJjmBC2yR9E6hdBudV5LeVPWkxoAQOAoX9/f26ceNGjbHZ5sOqHKt2XqWj/8yASvyICBI8xgUEZaCNCES8RS/Q0GduMyGp/c2bN+vGjRtnutQLN05OTs4qe69//et3tnfQZm5zki+aqCf3e9pU+koAxrESqM2+r+UMt5PnOB5PY3lecFl8oeztkm9W7VblvJ0vCF1HpKsAR9PVqTGyMfJ2MdKpMbIx0u1/Oxg586nGyDcsRl7blsLZ1SeZdePLQej4VbtVCncqUTKIxvKKD9tpvLXFG+d1ct7JjxuPxAqNJ2IFGpO+J9jZ55nzpnGczxkQUvfk2ykFp7fjGA4Ivh1lBpQEMY2VXlnMIHAfcP9Mi560cCB/AgIByMHBQR0eHp6BBrch6DN/9Ljq4gtDxKsvVFISlEwaj3HhIEs7eNWaPPDhe26zSbbwxQ3B9caNG2eg+qhHPeqsmnfr1q2zSp8Ahtsi6CcpefP7LHZmY/D82lshExjxvM/nVbyrgJQoLW6cD8+JyQbk0V8okPqs6c1lvWq+bbo9aoxsjExyX8DI/d0XRzRGNkaKl7vFyFHzLcuNkdeDkWt0bRdcXEin25oUSMbwqo6USKOlwBexLYmK9IpUApdZotQcHNcTkDtUOn+ZIZJTs9+sclZ1vh+WuksA6/pzZ3cAXwswH29WlaGOuWXAtyGwrQOAfIRjqWIkfSgZeuUu+WICX77tiCS/EWAIIPb3988qdTzHyh33mxPs9AaplJgT0Yfox6mKtyzL2Rt93DYzvUjPBBUHV/VJMe4VQ4138+bNeuCBB862U6i6d3R0VLdu3dqp7DkYyq6eBFMCpg59EeJ5gMdSrPpCjfP7fLSJt+M4s9y19p12TgDrfPO32hwAGFcph8xA0PPBZcDbdDk1RjZGcnzOQYw8wbCNkY2R14qR21fdN0bu8in/uA6MXKNrueBaS8ZM4p6oPBHPFOFjUkBXjPfzpK7vXtlKVSpPPAywFPwOADw+q5SpbXLaJHdKBGOMnVebul45TkoIHJ8An+zHBMO/qdLp8q2Bb+rH755UOS+rWklf4iEtPEge/Pv7+3Xz5s2zv16p01+9HYpJ0HWl6hG3JiSf8O+6QJ0lyJk89N3L9LqW1GaLLq8ySkb54eHh4dnxmzdv1snJyQ7oHh8fX3iQWePSngS2mbyMi5kcPMY48TZpEeV5YvYGuBnN8tjsePo+m2+2YE19Z/pIx9h/zc+arkaNkeftGyMv8r2LkVlfqR+/N0Y2Rl4lV/N8Y+Ru34cbI6/ttfBiwhWUvsugnnDpnDQujTcDHPZPwTfjh2P5ea+yeNB7X8rnPCbZOGcCSB+LgOcJicmLOmDVIt0GVXsFsctAnlzPlHEGximhs58DgficAT2BTrfnvdJAvc8AJOlvjHFWkavavB72wQcfPKvg6Z+2SrCyRz1ybsqnql7yB7cJFweevJTQPDFSDk+WKSHSJgQ+6WT22xfO/97e3tnDwBybthMPBJPDw8OzbRUClhRvlF8+QJD0O6aXJXe15V9RinPGEXVAvXJ+n2OWA1IuY6zwOOea5TAfK/F1O2OR56vqtSlTY2Rj5FUxcm9/f6dfY2Rj5N1i5Dlzu363Ro2Rd4aRa3RtWwrlNK6wFOSeqHScfZ14m5iJ0RO+b8Fwg3F8T67kOSVtn1Nz6ZzfXqbT+IOW1FWqiFEeOrLf9qUMnPPk5OQsKaZtFNTrzGYCfCVLfXZ+Lwsg8SEdcPuD610ye5WLiUsy7e/v19HR0Y5+OR7H9YTs8vIBXwGKgMPBhFU832Kg79Kt/IN7iGeJTvzNEgn9IP2mDG3KcTQ3v+s87Z/8whcw7n+UVe24KOH2DbVnlZM6Pzo6qtPT0wu/L0M5KR/H9O8JJPzzDBBmCTct/pwcPD2e1uImLUI5L4+tLZTczzyPpIUh4+EyP226M2qMbIwkX1OMHPm5uMbIxsg7xcizNjUv/DRGPvwYea2/wyUnmN1OdONU5QdlPTDo1OpPgdme7ZRwWN2RQdN8XoHysZksCCqemJIc6u8A5m0INvruCdrbOyB5wLqTSg+udwK1SD86KNlVqeF4/Os8+DnakiBFmdlGPOp3LjTeyclJ3bp1awfImbwdOLgHXef1WcBw48aNnYd9b968eXaLX6+x5VuVpJuq8wTtCyTagrZPt6CZmBOgCMxpe7c79Uy/ZiKWnsTTrVu3dt4MpTc0cX4CO6t6GttlYvXYEyr14tVT2U/VQC2MpJOUgFNSZ6xRJ74AJH+0wwwAOL4nY0/2GivZmvaajc04neWMJPvMr/h5toBMvLuMTXdOjZGNkZdh5OlyftGgbWeNkbu6bIy8PYx0/elcY+TF73eLkWu0esE1xniPqnrJsiyvuKRdTLxOzpgb3BVPYdOYPpaTnIVJKhmbx2f8e9XCgz0p2xMBA43Jx3n2ZDDjiXwQBKs2IMDAdfnZl4DB+dMFEYNrxht1qjFUxeGtbo6rW+jiIemSSVby6fWr+qy3/TB5qL/G1F5pgYP2mOstQgIPVe4IKuJD9lsLMso6WzzMwJiy6jMfJvaErr4eDz6+qnhMJgSolLQIFvxHHyYAqS1lIdgQ2Jks3e6q5Gkc34aR9Oix6Hr0pD0jgoXnKR+H7al7LWTT4oF+kfIm7ZCqcbM5Pa/R7ul8IvcrzpvennU/U2PkLu883hi5S1fFSOa2xsjGyOvAyDPfWxojH26MXNPZ6gXXsiwvWjuPdjtGI8OerF3Zs3M+DhVHR3HHSoEkQ84CzJO3VwBdVlbM5OyetGbA5T9c6M7KsRSw7hju3CkBsPoxAyV3EiYAJa61Koy+swqYbMTXnKqtxhYAVJ2DmsZiNXiMcQYgriPJJLBK+5fFl8bSQ757e3t148aNeuCBB+rg4GDnsyp1euBXAETfk/4kp87pOPVH4mtnkz9wDq/SVe3Gjj7TF2lT8uI+oPkfeuihs2TNrSds61VFgjYBSW9zEj9aSLA/F+60O2NVf/mgsfuYX8Crvcc+5fB4SsmTunZfEjmfM53RpjzvzyrQHs6z5vNj/O650+NS/Lj8nDeBxUyGpsZIfXZZGyPvAiPxWvjGyMZIt/udYKS0crqcViFm5B+NkW8YjLzWLYVkzBlIxvHkyc9Vu0b14E1tHCQUhB4E+qu+npiTI9IocgS/5Ur52cfnIm8Mek8Ya07s8mtsJTVWQtYSvwLv+Pg4JhONq0QuHn1LTAI3T7DLstmrrm0Oquwx+aovgUxyPfTQQzuAoGTCoDw+Pj5LTn6RN8Y4q87p7Uk3btyoBx988GzP+Y0bN872oN+8eXNHZwQSjus+xy064o3APgMM2pRJlQsYJVYnrw75AoJ2TQDDyuDR0dGFhQh9X33VTm1YnZXN5AOerGVD394i33rwwQfr1q1bF/hVG5G23NCXZxXm2RiMUfcb2ojtNZbrhDFPvlP+Sjx6vvHj5Nf7M4/yeYhUbSSvzgurqaR0rOnOqDGyMXKOkTfqdds+ym+NkY2Rd4ORx2PUaVXt7+3XwVaOxsiHByOTX4mu7S2FnrQTE1T0DGicvG3VbnVNbbwPqzo+P4+pPZM9kyDHk3weYMmwLteaMRmQPOeVQY3j35P8+sukQL4V0L5/n3vQqYfZPBzX+fO/h4eHdXR0dNbOq0nuP+KPYMDP5EEXcqq6qSJIYBNQ7O3tnQHIAw88cLYvnSAivfGHGj0puA/RLvIjbSMQqAhsqFfqk4swJh9/sJZyafxUxdbxVNHc2zvfpiJdayuJ+3VKIrJXAgRuc0j+JL64MOCiRfF2cHBQR0dHZ8e5eKCc7pMeJ+JP86dFoo5Jp7PKH4l65wIt5cEE0PqbYnqNh8sWqtQJt9b44lVjc6HhY3HbR9OdUWNkY2TiL2Iknv9qjGyMvA6MPNNJZcwQD42RDy9GXssFFwNATEmAlMzpKLpCZyJhRWxmRCpIbVxo9fFKiN+m5Lxqn+Zk0tUc6RZr6jcDPzqMgyb15Qnb5fPqAC9o6LTJIfxhWX/g18HZg1RjcGwlJ/U5Ojq6oDcfi3pwgK3a/FaF/IU860cD1Ye+oYR548aNs33m2oeuPejcl672SqhMsvIX+af0xcTLc1Wb1/IqOYpfJkTy7VtcaEdP2JqH/u5VGtnNQZug7PvKE6iLB4LcWkV4jHEGAF695Zj8S73IF/whZfkVeUmxkRK2+qc40nz0XS5YCG6kFK8+VuLN56YcvuhLRL1zPsaPLx5muk/PjfjieLYYabo6NUY2Rl4ZIzGvMMH12xjZGNkY+cjDyGv9HS4JQXLlMPB0t4NXzd7HydvKEL7Xne0dqDgO50vGc8MpWXFMzqFxFchMvu4kvIL2hyTJK48zUDQWK3DkWXMkINWYBB1Vcvgr9QpkypWCUH/1I4gnJydnD+dW1dmDuxpDiZtys9JUdb5vmZVTBbrulp2cnJw9uCveuajZ39/f2XvOH2nUVgnxot8s4dxcqNDGBDvZybdG6Dv9ixUvT1rSmydezV11EbRJBPzkiwJZgpkvFCiTdCkefRuBJxlfVPqC0B+QTn7Eaie/J/BM41Be+jnlpE8xnmlvH1f8+NgpLryK5rnM49NlSbnPF9i+UHS9emXOn6Ok3imT88W9/t636erUGNkYeXWMPOf98PCwMbIxcof3u8LIpXZkaoy8foxco2u7w8Xqho5ROVK2J1SeS4lSbXwsnqciRepDB3JQm13dktKtQipY5/w2rPOS5qCOKINX+pgcEs9MTNo24HqjbhmsStDan1x1XnGiAztYkV/plr8/4lUj6UcAIlkeeuihszln82hMBgMrPQqcMcZO5VFbH1i103dV6x544IHa29vb2Rah30Shvv9/9t6lR5LkOts87nG/ZURmZXdXsSlSoogP2gjiguvB7PUL9C8G80Pmh4y2Wmg9m5GWI0H4BAgCRVHdXd1VeY27x8VnEfmceP20R1ZmVRaJTzQDEhnh4W5udm6v+WvHzBUwNHDEgF2nt2jHtFX1gEy4tz6Mal15ftgRilz9GPSi3REs6+QTB3c6+FHgoz1Rd9iO1olNqVzo36nBHrbDix6zLKu8O0aZPAagMZjG/sc4ooBSF4O0Tm1brDfGHJWjxpqotzjwqLtPBAGtQ9Nr1N7rdBivV+BUm4gxO8YH6mOQpYPDVJ5XEkYmjHwyRnY69CxhZMLIhJH2vxZGPlZebIZLHS86WF1g1SdcPTeeV2d8MQBHkMBB42/arnhNPE+dgOM6/V8HZKecOoLkqXvVteNUUePWa3A2M6ts/6rOxOLXzWZTCWaa021mHtjoO9dTp6ZEaLtUPxoIcAp1Dt0uXtupzqkBnTZst1s3ctpOwAOAYOpg6/r9vvV6PWfsyFfPsswZQGW1YD2i7tSuCfpqlzHnX1mXaEfIpQ6o1Ue0Lk0zoZ3K0iqDrv7HvZke1yCjdlM3MIr9QHcRUBQwVX91cSCCV91grw7UtG8xiNYNdPT3GDi1nIoVMUBzb7VJ/ayDO+qru2dsT53t151fFxN1sFk3MI7xKIJWPFftS+NCnJ1J5WklYWTCSG3XhzDy4UxfL5QwMmHkJ2Ek+snqyYCEkS+HkXV1U170gUuFVjc1x7lm9Qar50Tj0rrUsfT+Kux4bTxHj+l5UcAoJ6YkxDZovWZWa/x6LxwwBqpTgBIBgfNVJtpXsyrTpPdR5zKzygsTtc/oSHVFnQTeyALo1D5BHXvQoMAWqQT1oih+1FdNUVCmE5DT9qnRdzoda7Vafn2v17N+v2/9ft86nY51u11PoWARMICiOuHefCcA056oF5V1nf2pbdTpCj3EoK160yCg91TgiQMcBToNznHAhG0ha02XqGO91I7wDwVkBiz8HtuiclWfUMDVQUZkj7QdsU49Tt/0f6wjXlMnJ60/zk7UAVAM7Kf0GW2Mcgo4om4ViJBlnQ1G2cf2ah+jHdXJLpWnl4SRCSOfipG7XXUgmTAyYeRLYKSZWbmvxzv6pv9jHfGahJHPx8gX3RYewdSxoHUBLXZWGSI9JwZmFQaGFwWldT729B1BiGPartgmSp1w9ZrYX47jGHWGFsFXr8F59bzYbhyXBa2AQl0OOywOMoxORNE88izLrNvt+vUAhtlxy1P6BrumgGJ2ZH1UD6RrKLuU57kzjBo44kBFA46+gJF0iX6/b91u1/93u93KTkv6ssZOp1OxDb0fhfbpfdGNsij0Txddqo3WBe0YFKKdxQFIXZCNQUxtTtuljBYsZGTa1Df0/Lo8efqw3W6t0+n8aLGxyqnONxgUcH/0gE1oPIifI6tH25XVfMy/o6wi28j5detgYtyJ/Yzsahz41Q0oI6OppS4man+i/8Y+RH1RZ0yF0TSbeG0qH1cSRiaM/BBGRpkkjEwY+RIYaWaW5T8mQRJGVsvnxMhPfuCKN4hGEqeSzawSgDQoqXHUCZsAcwoYtA3R2FAo56tS6wCA43WKVeOnP9GgIjhof+K91Di0/VHOMchp2/R8DTD0VQ1FnRPQ0XrViLiG93JEQCN45PkhVYEAEgOe5pPrAJm+60BC26m6J+2BNBCOmZkDA6kgnU7Her2e56R3u13/T7pEo9Hw7W5VDnERJPeiYEtqq/zpsQg2cfATbTMGPtqgKTs6kCIAxEFWtBMNajGYK9BooNT+1gXOeA6FHHNtJ23kmlMDIwWPOLDQwVxM3YmDjRic1Z8i0MZ+xd/1e51fkvKjRZkvlZ/GnVNxQevXeFcXP/Q8jU1Zlnmb4kCoThZxYKDAXQfCqTyvJIxMGPksjMyqG4EkjEwY+akYmee5KRIkjPzDYOQnP3DFjsYbx6BCUaOKxhSv0XrV4fTcqIRTDsmT6SnB1PUlDuJgYyKo1BlxlFXdZ2QQnT+2J16rDEMETwVUfUt5PB5nn2IbCeplWVpRFN4+8r75rkEtAj4Ao/cgRSH2nWChU+4apAES9LjfHxcsAgoKfu12uwIkyu7pCx5VPtp/DXr8VwdVO1QAj31iVg85cB9lWgBdPmtdeg5/yEP9QXWiYK3BGFnRbvVFtUe1MfWHyOqojuNgkHoAqxgctd8MONQH4kCQ3yLwnAq2sZ1RDqpr9Y1Yr+obv4iBOgK9tlfbpHGwDqDi4KMuxsUBZV3spJ0aE3SwEeurk0es90OAksqPS8LIhJHPwsjsQW9WfSlwwsiEkR+LkcfVZwkj/5AY+WIphdp5sx8/9aqyYoPjtVqnsjkRuPT6yCxRVJh8N6susqQ+VU68LrY59jveUx2K6+rOj8ZSZzhcX2d00VFUHrpdaAwOFKbq6XdsM9cqK6cAk2WHvG7qwsF0NosAr30muNCmOgOv03XUG+1kITDgwZ/mo7MAWF/iqECr9akeNCDqb3VBsSzLCnulQKhBUfsY+xl1oXKPQBbrjIwY1+pLp/W4BhwK4Kqgp/KBVdO6Go2GbTYbt5eYVx4HS9Gfox9GcKtrdwQW1U1kmOOAKfqo6rkuzuj9qV/jTvRbBfJoJ1Gvdf3UojZfFyNjKcvTm1tEZjfqV+UaGfW6WJfK00vCyOo9E0aewEiYfas+yCSMTBip8ngORpbS9oSRnxcjHysv+sClzoByTjFlOgDXTuh3rSsqQO9r9mMGjWtxCAWbOkZcr/1Q+dDWj48JXRWIUz12XZ3D1TGYmnKiIEy9ek2Uqd5H/7Isq+TRx0AedaEpFRpIMEymcKlT86XR336/9wDPd7NqcFTZESi73a6nUwAomo/OVreACgwewS/eLwY32sBxGEZsSXWpwVaBUN+foWlE1B2ZV+pSue9rAqfKkOvr9B8HFNqOGLw3m02ljmh7dcFfWVDOiywidWk/aLOuqdA2K1MX9RR9Q2VTp8MYVyJTpnVzfhy4qgzjvevqODUAVp/V8yLo1V2rJYJClHUscRZF/V3PqYsPcTCUytNLwsiqLB777Y8aI6W/CSMTRqqMPhYj67wtYeTnwcjHYtuLvYdLZyxUOHUDobqgFQOkCo/vlFivOmQ89zGDitfXtb1OwBxXR4wlGmhsR+wXdUYwPCVD/a+BQhd1KnOBc34IPDWYb7dbf/eDyhZA1r6fAhllDmmfnqOMFAFF8301tzkCiqY96EsayUEfDod+XFk9WDvuQ/11LJjaKSkPkVnTzwp+cVClwZx+q35i8FcGlN9jW9XOog1pXTqg0mP7/d4XY9MeZXSpo64d9AXw0bZhe7rIty4YRp9H3+SlRz/Rdj4WYGPsiAyl9ieWU21UuZ4CzSiDeL1eF2cStN4oF9Vzna7VBnX74RhPYh9OxT69XgGvLmak8uGSMDJh5HMxUn9LGJkwkvo/BiPzLLO9mWX2Ywzj2oSRnx8jX2yGSwNt3U1PKY3/dQBRJ4x4vgLJKYD40P3rjtUxa6cA6RRgqVyoMxq4GldsawwMsT/xCVuL1h37F9uo9zQz6/f7VhSFb1eKI8J26W5CWmfcvED7EGcklNHRa5WBjAENsKFNpD6wm1K73bbBYODvERkMBg4kAA7XkDJBG+qClAZ7Dci6AYhZdecg5AObRWCOtvShAYK2S0FJjyPHOhtQ/4k2GAO7LrrlexyEaD1al4IL/3UNgfpBtIs624vBjv+6iF3tKvb/Qz6qdcdgWqeHCPKR7Ytpsafacyq2RaYwxrb4Wa+NvmZWXYejJcaz2I5T8TH+/hiYpPJ4SRiZMPIpGHncubu6TiZhZMLIePypGLnnmqxaX8LIY3kpjHysvNh7uPRJlIY8Jkg+x2OUuinWuvvGe0VAgwWpA5dTiqkzpDpwjM4WFa/lMfA55VgRUGNwjfepq6suQMc2qAz1v077R5atTmdRXvo5z3Nbr9dmVt3elrqUXYnyRI9lWTrzZmb+npA8zysAoe8OUQDhOAAEMO12x5dCok+YGO6rbYsyjzKokzNBR1+Qp/dT0KDEAcZ+vz/5no264HKqbVFfcYcn1XW0CUq0M/qi7Jv6Rl3R3yPw1fkgcqzLM9dUkbo+1+nt1D10QKnH6nyHYxFE6mQW/T36i4L0qXIKSPR/9PdT8bbuuPYxpmCd0mMqHy4JIxNG1smrDiPLh10KM7NKuxJGJoz8WIw8da72KWHk58fIF30PlxoepU4J+rmOtXrsOj1PjV9/ix2PqRgaoPTaCEpaZzTy2N/ouKf6FJ3rFFhEY4gyiEAb7183PR3/n7p3URS+qLUucGZZZp1Ox8yqufrqWLFeZK1MmU7fRzkRKLRuDbqx/8o8KQACKuTOAxK6SJnvyE3voTamMtXFsqeCVN3v2u6o37rjWmfUaV0bsyyrXT+BbKLOYzBT+6zbYSjaWbT/2Hb0GFNJo+9Gu499rNuNUG2rrmhfTvlKXYA8FXfqmDltr9rTqT5pe7Rv2p86f9b66uSm8SnGtThI07Y/9pnvyhQ/BnSpfLgkjKyXidb/R4+RnGvVOJcwMmGk1vccjPQ2BRdMGHmUQ5THx2LkqThn9oJruD4UCGMjomHqlKyew/+6IK4CoA1xyr4OwLTdj/0WnSTeXxVad5+6Ptcdr7v/KcDUa1W5BP/IdkbHUCPW4+goz3Pfqef8/Nyur69rQa3X63ngWq1Wld/rgiC6UceKjIrZcVGrOpiZ/ejaaD9xZyJ1HMBC66hjjXRgw0Mh56rMVBd1wVWDZzwv2qfaUtQt99Vtf6MOqSPuiBQDdgxSdcE1BjT1gTqwiz7BeXGRcYwBKke1S87RhcrRPlU2db/V+Zzadjz/Q9/rZBb1rf81kMdBgraHPsQUhrqYRp8/1I4Yl2JRu43xTeup01fduak8vSSMTBj5ZIyUdTYJIxNGvgRGakkY+YfDyBd54FLHpLF1ASsGXbMf57XT0br3GMT6Yp11xlYXVKNR6++x3aecUa/RPkcl1QHOqXrqFBs/xyDC/Tqdju+aw2/KwEXjrJOd3m+z2Xi6AaABs0XqQ2T4uF5zifU33YIXwNJgzvRslGcMKpwLG6cLgzWdYrvd+hvdYe3UUTQNLQKm6pHPKtv4vc7BdVGr7jAVByLx4YHfdH2Atg0dRKCLM4fRTuqCfGyrsmR1dnlqUKL915xtvU5lTx8jM6Q2hlx0MFL30lBKBD8F9VPsu7atrr8awOOATtcoRNDU79wj+kOUnQJCXTzVcup+dXGK/ut1dTp9DGQ+xNyl8nhJGJkwUtv4KEZ6XKnOuCSMTBj5sRjpssura6ASRh7LS2Fk3diE8skPXDEIo/wYxPmMUZxi67g+vtwtKrJOOfE+MYByTd20Yd158ZpTCqkDOT1WF2iiPOoMjXbFp3/9zG/L5dKyLHOHi307taON9kX1ws5LZXlkjzRwFkVhRVGYWTXViBdeaps1tcDs+KZ11TFAyFayql9kT4oD7xNBRiwGZutbficw7ff7ysJcgESDAX3P87zy3o4YEFRv8fOp3wiGdXJRG4m60fZpO9EJYFhnI3qvyHbFQQRtUNaJe+luPtHe0eA/0oQAACAASURBVAG7r8X0Cr139K8oqwgQ2CM2puCv+tJ+xT7Guk+t84qgF48zENFztD9aZwSluKbjlL60nacGkSpTBZDYBq5jcPYUplDbWGf7dd9TeVpJGJkw8lkY6btmZJWZm4SRCSM/FiON6+QZIWHk58HIx8qLruGqazDCq2ucCkR/U8HVBb+oWI5rPfFN91oiGxLvGdkU/c9n+kodui1sFLoac/ytDsRwJJVRXRvUMHe7nbNWdWCY53nlfSF1hqJ96vf7tl6vnQGL8iaIw+RR4rbHnU7Hdrudp1TQ58iIKstVlqXXTTBBp5TBYODvFOHhDRDS93/sdjtbr9fORsZ3i9BfHbToAJ7jEZijHdEHfov56xqg+T0CkTJadUFP26GgX2evcfClJQY76jA7vu0+9iHajMoJuWCLGjT13Bj8VPYxOHJ/zon57U8p8Vz1tRjQY50RIGJ7uJb66mKftuEpA1baRb0aU+qK6lF1GHWuA8HYN21njA3RPulnKh9fEkYmjPwQRsqjScLIhJEvipFlWMSVMPJ43kthZJ09UT75gSsaSZ2C6pg6ijpOnJLTTsU69bM6mBpp3K1Fgy+Fe6oiIsDUgY46UxSwGltk/OJ56oDqqBFo+a5MmgIUQQ6WK74xvQ7IYlsUyJAV9bbbbQeWVqtlw+HQdzQaDAa2WCysKArb7XbW7/dttVrZZrOxTqfj12mb1MCz7PBiRrNjkN1sNp4O0Wg0HDDYuhYQ0YW+KlP9HVAgcOqAQ5mZGCCj7akeY8BUXW82G/9dA2U8ho3UgUe0eXRPH7bbbe1ufcq86EBD7ardbjuj2Wq1almxOtuI/gkYab8i46cyi+CrMtN6NIhFVlD7pTpStvYU4Kje4gA26jh+xy/UX7XOeF4MxBEste3aB87V+KH6OcXAxbYQC+JAVPt9CijUl+rsMJXnlYSRCSM/BiOj7hNGJoz8WIw0kVXCyM+LkY+VF5vhUuHFxhJcNW81CloHTFqHGqSyAHotRYUXn2SjoUZWINYRnTv2R+8RDVzbX8c6aF+j48JA1U09x37VgRwOrkyQtq9OJtqHeJ3e69WrVzafz83ssCCYoN3pdGw4HNr9/b3NZjMrisK3ns2yzLrdrs3nc9vv987AFUVheZ5br9ezVqtlvV7P1uu1zWazylqv4XDoQEJaBEChoDAcDq3X61m/33fGkKDGNvQ4KYCr98FpsdMYOOvSQHSApIP9GExU1/xpAFbmkjpjnrgCnv7XFzDGQK3BIfqM6lbboX4RfZI+85/24tcx0OkAqI7xVBvT64ui8N+LoqjITQNdZOejPvT4qbjxnAeKOn+NJcaJukEh/YixC+aUc+vAQIsypiobjbXR1yOrq21Qm1OWOsoxlY8rCSMTRj4HI80Og8KEkQkjKQkj/9fGyBd54NKnUApGqU+hkZWOwZBjUdCqhGgYUTiUU/dSgUZj1nr0eDxWd09VTgzwauTadpgn3kCOQaBQ3a0nBjeVDbLWN5m32+1KUKC/sQ5yuWHeaENRFNbtdm0ymTjrdnZ2Zo1Gw9brtS0WC2u1WjaZTKzb7dpsNrNut2tmh1x53umxXC6t0+lUGEUAYDAY2HA4rNhMv9/3lIbRaFQJWOSgNxoNz0MfjUY2Go2ckaLvb968sV/+8pd2c3Nj//RP/1RhhzQQoQ/uo7pSHWrbI0jU2YCZOVipDutYKu4XmbM4aMEeFAT1GPeONhPvmWXHBd70GQBVljemdKicon9qXTCmCkjRP+hTzBnnemV6WUugwU0/q13XxRbVSQTDCIBaYlzTgB11rT6vwVwLdemCbWVYY2xT2aNPBSjaFO/NZ7Ur5BkHHFpiXdEPHgPaVB4vCSMTRj4dIx9ilGU2Go0SRiaM/GSMVM9PGPmHw8gXe+DSTtZ1Ts9VRik2Ujsag7KCTAQcDLgu+GKIUVkUzq1z5DohxoCsCjoV7NXItP+9Xs8DtLIg0fm1XmVZVKaq/F6vZ4vFonIdsuB8AKvT6ThTwq5K3W7Xut2uDYdDB4Tdbuegs1gsrNPp2GKxsKurK8uyzNMTCHKz2cyyLLPFYmHn5+fWbrft7u7Oer2e7fd7u7y89Bz4fr/vQaPb7Vqv1zOzQ9DgRYwwd4PBwM7OzhykcMavv/7afv3rX9tf/MVf2OXlpf3ud7+zf/iHf6joTx03sol6jsq9TqcRMLAdzeXH7gjSMWUFm4CFiQ6sTGJsewz61If9xAcStRPO0YBOW5X1jMErBtZ4z1h0kIOcI5DqOgL6S8qFDiA5X9NEVB86KKDeU0y06kf7FeNQ1HVcqK5sl7aD+2gd2vd4rtqZtiGCnYKMglpsvwIUrGC0Ydhrzo+ghSzRodpOKs8vCSMTRj4ZI++nh7bkWcLIhJEvgpEqz4SRnxcjH3voepEHLhVSnCrWhkbDUwHGzur58Tx1Co6rk0QBK+NQ93SqDEI0hseAMrYpOpQey7LMgyELaAnO1MO56uhaV1276s6DyTo/P7eyLG2xWDizp6BqZr7gttPpWL/ftzzP7e7uzvr9vr8QkXsAOO122waDgQdy8scViNQG9vu9p1dwv1arZYvFwkajkc1mMzs7O7PBYODpEvP53EGEAsBlWWar1cr2+7397Gc/s7/8y7+0X//61/b69WubzWb2z//8z/a3f/u39u2339p+v3eGUvsOcBN0sizz9A6+x7zgaCcK4KQExDURylorQxsdH/3j/Pqb2l9c+Kmgwvl1v6udaOoC2+qy+FQXU2uAisCjOfIq07rBYWQtdYAV7RGfMKumgtAfZFXnn9HfonzUf2Lb6nxNi14XU0kUVFRm+l/7zLE4eIj9Upavrh3RNmiHMqlxMKly1DgTBwkUZSLr5JLK00rCyISRT8XI9WZri4d+JIxMGPkSGJkHzONzwsiXx8jHcPJFdylUFqIuQNORmKcen6L1XGXhuIcqgd+iUPisCuQ8FFXHZESDiMapQUmdRhkCzmfaX5/6dZtY2kCO9/39vX9nOjvKF5mhZPK61ZE0z7Xf75vZIYVBp/wjgwpbNp1ObTgcWrfbtc1m44t6lR29vr62drvteiZlwuwASqvVytbrtfX7fbu7u3O2r91uV9IhyGvvdruW54eccdhBjrXb7crv3W7XfvGLX9hf/dVf2a9+9St78+aNFUVh//Iv/2J///d/b//+7//uAY/3oygrpHZBnfwWg7/mVWuwiOCAPmJAjQ8BahsE7xhQIoOsQZw6uRcLj7FbAgl/GrC4Z/RHbYOyNWr38b/aftz1SwO+Au6pABuZR9qgwInPKFBqP2IaiRZ0qwFU5ax+G31Y45deH2OH6isOSPU3jUPIOTLFWk7FOGKmzkrE+9XpTAcRkTXWdun9Y1x+DExS+XBJGJkw8kMYOej37doOKYX9fj9hZMLIyvGPwUjFiISRx/I5MPKx8iIPXOpU2pE64RCAzI5TdirQSuPCdp5m1W04IyjofWKwr1Mk37XNtAMmJ7ILajCnlEgQhM3a7XYemJkK1h2HyOUeDAbOsrHjEUWDFNdmWebnERAwhsFg4CkYsIaLxcID63a7teVyaWZmFxcXtl6vbT6f2/n5ue12O5vP5/bll18600gfR6ORb4e7Wq2s2+36FDsAY2a+C1Ov13O5ttttG41GNp/PrdFo2MXFheeg6zT4cDi0drvt/Wm329ZoNGy5XNpf//Vf29/8zd/Y3d2d/cd//If93d/9nf3mN7+x1WrlC4tJ21Cdax9g7PjPnw7gzY6gHJli6onHYcPQM/eMqQuRyYoBLQ5oOEZKgbIzBDnkhy/qgKfOZ/QBRYEk+oQe3+12lUGFBrs4wFIQjz6i7Vfw5joFmDgI4xyVF+dG0I7AFa/nT/WoQf4UgJpVY96pOKN9PRWw9R5RRqqPGCPrUkbqgLDue52s9J46YKm7NpXnl4SRCSOfipHDs6797qHtCSMTRr4ERio5orE/YeTLY+Rj5cVSCinKMkenMKsKTdkEFZjZ8clVmRKtQ5++Y/0Ytk4tR0DgCRjB6T1VcJyjQq5ThBpcWR62bCVoUD+BCQfj2Hw+93uQ6oDTt1ot34WG882OC3nZfnaz2fi11MOiYK4bjUbO4PH0D/tycXHheerD4dD2+31l1yFAq9k8vK1+vV7bcDisBCbaCdumej0/P7d+v29FUdhkMnFQjMzF+fm56wJA0J2U/vEf/9H+7d/+zd6/f2/r9doBpN/vu6y1zshOKJCoHOgHxzVwqgz5HtMQGBihE71Ot5UlcNPHGJB0AGFWZW7U1vU4Oz4iK/UHBQMN4lE2yCQCH3LhHAUsDWzq18oqKfBE345+CwhSv26TrP3W/tcNWlXfnKf31LikA0/awLEIsMpmaZuV/Y9t4XsEhtiWyHAq0Or96oK69kMHv7EewDbODMSBNedFm41xOJWnl4SRCSOfjJGbBwzILGFkwsiK/3wsRnodVn0wShj58hj5WHnRB64o7NhY7Tif46BJv+sONjqVq05Rdw+cPualR4OkRBBRhauhxPNRkA5GYnBQY4U14zxl3ii73WEBbL/f9/YQ5HkxoQIBwXG3OyzmvL+/t7Ozsx/lZJMOYGb+gsXhcGi73c5ub2+draON3W7XWbPVauXXw0SSjoC8V6uVFUVhvV7PVquVbbdbm0wmdnV1ZUVRWKfTcT0Oh0ObTqcOPvSH1A89hr4IUNfX13Zzc2O9Xs/bF4MguogDAAUOZe0AVAV5rlXGuc5+Tg2EYtCK4EO7tF61PQ3WEWA0QOt5apf8KTMJMGgg1Tp0gEWbou3redTfarU8QOm9CFw6aNP6dGCmLD7y0f5Hf9fgq8dUHmoL2ufIZkag4d4sblc5aLsioJ0CEj0/6kltJaZ1nKpX69d4SL06mFa9aL9PDc4pMYbVgXcqTy8JIxNGPgcjD0qwhJEJI18EI91v8/pZ64SRL4eRj5UXSymMiq27eQzqdYMmFSABTM9B6ByPT6FqDNq+unbU1a2BQR1RX1QXFaJMUJ0ctN8xvUNBCPZNGT5SKxqNhg0GA39BIiDEAl0CVq/XcyZvMBjY7e2tg9N6vXZmT8FhNBrZdru1xWJhg8HA70NbSV2AySPvG3Ajvx4GDxlut1s7OzurrNf69ttv7d27dzYYDJzlUmAEzEih6HQ6/rsuWkUv6EblCrggewUOrkfuyDYO1DlWluWPXkZpVg0uce2QBkRsFJnE9AG1WW2DHov3pB70oYMh+otu1VY1aPAbIKx2Dcip/0SA4Xw9D71oUVDRdmjghqXV/iGDGMTjQE91zP3U5+r8NQ42drud254OQuP9VE7IIvp83UCjDkyUGY2/x77rNbHoIEHBSe1dz9X/pwZHek9+1zpTeX5JGJkw8skY+fbtQ8ctYaQljHwpjDyYVJYw8jNj5GPlxVMKtVF1wuX8qCQVany61Xrq6mRqW3/DSKmDe8Xro8C0LRgaCtO6tC/6pF33pBsZPq6Jgxh97wPv8KBfq9Wq0l5daEyAv7+/dzDRwA8Y0KZGo1GZ0p9MJrbf7221WtlqtfI0CICs3W5bURS+wDnLMme8SMngN+pkQTEB+ebmxn7+85/bdru1u7s7azQO2/yen597oFEHo6/b7daZOpWt2gjAhjwjQCkA8Z/fsAkNhOqYtA37gCUliEbGiWuRvwbgyDCiizjgUP/IsqyyiNzsx+/+iEDIudofPa72xyJsBRL1FexS70UbGDBowNQArECnYB8HXJvNpjKFrwOXKLMoozgQiHWrXEj/YX2H/o5/qI2dAgcNrPQ1Low+BRD6exzssDYl1qHX8llloNcoS49fYsdRvnWyijEae8Y/6mJ9Kh8uCSMTRj4ZI3cPOi8tYaQljDT7dIz0vmc/xsaEkS+LkY+VF9ulUDtOo3DsaJzKvCh7oB3gPK1THSjWx3cdgNUF9WgMWpfWQ4lK04ClDEA8JwY9s2p+c7fbtWazaavVqtLudrttu111209ABZah2+06e0N/2J0IAyJnnSDP1qbtdtud1+y4gw71wuzRT34HKFi8bGaVzxRSO5rNpi8Khl3bbDZ2dnbmrF+WZZ6jrjZCPnfUgzJQ1ElbNY+ZPnC+pkdEvaJL2qH3igMMZZewW47H4K6faS/61OCPvqhH+xEDONfpIIJ7aSDgevVDDUBlWVZSHGjHKXYoBka9pzKFMd1E2ST1SfVv5KjX0i8GBHzmfjqw00GSykh/x1a4p96LPqrM6/yWunUwqQFcdUGdMVhHYKkDCLWNOnulPWoLEdzMrLKhAcexsyhzHbSjL2X3VVdx0JLK00vCyISRZk/DyIOQzTNBEkYmjPwUjHT8y6rrBhNGvjxGPlZe9MXH+rSuzlIXuCk61Xfq6VGFXje4imxfPM/sCCSxbVqPKpuiThkdNDIvyhLqYBD2g9+URTgFfBjMdru11WpV2fo13os2NRqH3ZbYaakoCn/fR3RQwEVZLZUnKRcYagQYDSQwOKROwNo1GsdtZ3u9nt3d3dlwOHS2cL/f+1a4yIIFx8p+RUYUh6AN0WGQh+pX7SIGXpVlBA/awGf+lP1QpirqRlNpCN4VxqnGzkgVMTPXE/fhPGWL1I/qAqPattpMZPiQs/YpDvAo6q/q308JwnGQybkMInQNB/YV260lBmvsOuqzjhk7FWfioFXlqPFGz4l2qm2ra6fKiD5rbIr9iSksWmd88IwDoSh31bsOJNQOtH9xwJTK80rCyISRT8XIwWh0aLNl3taEkQkjPwUjS/txWnLCyM+DkY+VF30PVwQDPc5/GqmGS4lsnSpZz4lCQhjRUKIRRGHXKULvqQYY/9QIYZFUKZQIICgVkNA6ooPqvTGEaMywbto3ArqmAZHfbXZ8GzgAENktNTC9P4uMFUgAGoBMdbzb7ZwRzPPclsult58F3vriRuyCxcPr9bqyEFztivZp0AaINFVCUyZUrpHNU/tRAIn/deAJoCjTSD0qO154qbtkqW1Qh9om/1VeAA39VYYsglO0cwUd9SWO6YLlaOcRmJC/+pgyWGq72Kj2R+/NoEoHLKQNYEONRsMXsWuQVSZO05qU1eb+0W649jF2SgGiDlSjLLRf2l+NF1pHlEcM+gpe9E2vqdOH9lHbo3XFGY7YR42peq94n1SeXxJGJoz8MEZW9Z0wMmHkp2Jks/Hw8JlXZ0cTRr48Rj5WPvmBq+4mMaA+1hgVoH6vq+cUWMTPdUqkvsjinKo7sol155tVgQglxfxhNSA9Nyo6Ojy/a9AltzYCMu0lHxUnjKwS94tsT57nDnhlWTqDRhqBglKdkQNQ1Mk0936/d7Do9XqW57m/+0RZrt3u+K6VqMuY269MDm0CFEkP4U/bpn/UjewiIOBIuvia82KKj9aP3tXW0I0yNZEZi2DFvSjUEe0l3iPamgKSyo7+a8BQxrRualzlrjaq9hDTXPR3vRd9VfuhTo4VReFpNvqb1ht9UnWrbCH3RG5q/zogif4U5afnqx5iMK7Th8okDpK1Dj1HQToCkYJLHGCrDKKvq5wU/HQAQIkzFKk8vySMTBj5HIzMGPBnxzRFs4SRCSM/HiNdLpYw8g+JkZ/8wKWdrzNAZZ0oKuS6/+qIsRMqEDXUGPj1nrQNVkEZhnhcjU3vF8FBDTL2TduurJ4GYj7jMFGJ3EMBgzx1WC1SEXR6OTpQXfsJvtGZNWh3Oh1brVbebpw6OpKCVgz6qhtSKpAZ2+Xy0kiYtwj21M29CHjKwClwRHBTp4nMCiWCvy5QLoqiNhc9y47Mp96T34ui8POpgzbAMKEvdpzSYE8AA8jovwYYBXS1m1M+o3YZg1YcnGhwqwuYWh82oDrjeAzIyFUHTrCb3Bdbo8/YYEwXiPYewTIOIiP4K3hr7KB+jVNKDiD/CEjR5zSmaHu5v54f64uBW+WocUdlim41dik4qq3ENsZ76Dl1Mkjl6SVhZMLI52AksxH0J2FkwshPxcg9DxuWMPJzY+Rj5UXXcNEZ/uvNVTHxWJZlbjQ4pSpAp1spavgoQAWD46pycGRVPOfp9apoVQJ10ybeeo/DE9h0AWKWHff6px1qVCzubTQOu/YxVazBT4MGzA/5yWog6vwApS7YVTmo4apcKNyHdrKYkAXKZuZtpa/qEDG/nYClLA3nahvVmHFE/lTO1BPBBHlENlFtVIMfdsfxyGDpblYU5AHbCDjrYGG5XFpRFLZYLGy9Xju4ANR5nvvOVtpHBQ1NYdDAzX8FhDiAqvNNztXgUTeAUTnFc9SmVWY6kFBgj6ClzCVAwvmnWDGu13toneq7Kqs4uNJ3FmlQxu50IKSDSo1Nmq5CG8zM/YO6+K6DXtWD+kUcOKtPsT4hxk6Nf6o7PRb7pIBTNwCJthQH7nosleeVhJEJI1Uuj2Fk3niIg7KFd8LIhJGfgpFGn8tq3EkY+fIY+Vh5sQcuvVF8yqsTxCnB8p3fMUquo5MqPFWY2Y9zwakHIyZ/W+tXwel1OA7BHqM2M0+N03dl0AdybvmOcdA++txut322h75FoND3jOz3exsMBt5vXRiq/dC2qPzUsON0qxodjqeG3mgc3/9R54gK+vEY/VZ5wmiRkqHOTq77brerpCwo80H9Coy6y4zaTt1gpy4VAuYOhlQX5CrIozf+KyByTVEUdn9/b0VRWFEUDkxmZpPJxPI8t7dv31qn0/F3sRRFYXmeO6u7XC4dmOiHDpjqmHEt6heqX3SMzelxM6uwhvym9WCvHNeBltqLDgr1XSw6SNOgqrao9wKkuC4OCLWeeB33ZNcpbAa/QvcKgOo7cRCmfWRgp3LnGGkeer4yxaq/CCgMQKkLMNQ+xTZq0fjIPfEnPV/1qu2I8UTjbCrPLwkjE0Y+ByPNjrMRCSMTRn4qRmbi43XXJYx8OYyM99HyYptmqJPWgYmeF8FAp5K1EKDV8OqKCkZnT5R9wpDqghxt1vurwRIY+MyiTlgL6ucaAku/37csy+z+/t72+71vhR4DQrfb9TfNKwtAm2ADMHZd8Esb9cWGyiIpI0K7kAH16HQ9sqON9AkGI8uOaQJch6OrI5ZlaavVygaDQeXJH6Aoy+rLEumn5sTv98dUAx1gqO1E9ikyE1pvnYPRN44BItvt1gGAvjUaxx2lAEHef0JBTpvNxubzucsHWa1WK+v1erZYLOzt27fWbDat1+tVrtnv99btdm0ymdh6vbbxeGzb7daurq4qgBrTBxTE9bjKTfVMcEImfMeudWARBxcKvBqQlNWiLchWfTUOAh9jo+L/siwdBNS+ARPemaIDDlKKFHB0UKf3AAQjaMcBivYHX9G4o36HrKiP39THTwEZJfovnxngUl8EuwjscXCg/Yz31u/pYevTSsLIhJFPwci9HWNFfEhPGJkwUnX5HIw0M2s0D/pJGPmHwcgX2zRDAyAOpI3Rc/V8Da50mo6q0DXQ6RMwymLqnrr1M/VFRksVXyc0ZeT0dx4aYFhYrEvQ56k7vh9EUxIajeNCRwI60+44H3LkfST0VaexeaeVypTAoEaEk1N/NBTkoKwI7ad+8qqpH7nptTq9iwxJCeA7W/jifLwwUq+DOYuBIbJE9EOBmrpoD/dRW9V6YNW4X1EUtt1uK+9iAThgcQHV6LjUs1qtvC5AifrH47HXfXZ2ZmVZ2vX1dSXQLxYL++GHH6zVatlwOLTdbmfn5+e2Xq9tPp//yLEVxNXf0FEEetU16w6wK80NBzQ0aCvYwKip/FUv2ibAWtfzMUgidkRWMfYxstXEGxguzqEuZEBdGvC5lmsUnCNTqDKM9anMsRUFjxjU9bj6lAIs10S/iDGUvwhisU0K5jrorqsv9kt9V2WUytNKwsiEkc/ByHJfnelMGJkw8lMxsgwP3QkjPx9Gxnq1vMimGQi5rgH8ro1WBalToMB4fpZlHoz1CVzf0g6bYnbMrY5PuziIpkjQZjVo2sGOR8pkKLPU7Xa9TWqUGBNt1QWgOkCEtYMVoux2OwcrfcKPLAMGouANixef5DEg+q8OolO6ej3nKiupqYBqvJqmAJtFINLUB10EnGWHl1vynW1NqSPaEv1AfmbV91hoX7Rv/M4xBT/YwaIobLVaORCsVqsKQwljq87LNdhNWR7y0mezmU2nU69nNptVZPLDDz9Ys9m0s7Mzr4dATv91UHNzc+O67vV6NplM7P37935fDdzqT6p7ftOUFGRG3disgghBkfO328NWzdg5dqUDCw182rbo23zmuw584hS/ppJwLx2k6cMWdepAUEEPe40DvAjQgJ3qnfNUluo/6jP6m+arUxRANOVH45ACMXV+SN/aHo0Bek48HgfM6lN6bZrlen5JGJkwEt09BSObMhtUlmXCyISRn4yRTTbcsCxhpH1ejHysvEhKoXZSWTMapn8Yq1n1bdA63a5CpC6cVXNcqT8GPH0S16d1vReC0cFaBLyyLJ2NghHQc3kXRmQGolIAF9ITYOTMjlulw+I1m00PZBg+dUew1qAwHA4tz3O7vr42s8N7RgAwgrY+scfgity73a73mfeD8LsGljw/5GivVqsK69RoHBfqEpBgrVhAjeECzARQ/mtAU90TDGBLK/nuD+1TxlcBWJkZBhvKJi2XS1+QrYOHLMscEMgRR1/L5bIy0KceZf8Wi0VFn/1+39m/Xq/n0/sRMOm3+sput7PFYmG73c4uLy+tKAq7urqy6XTq98RWdQtg6kH/yrzyXYGTvscBIjInqOvghHe70FbtSwzGAIaeRztUd9hnHFgQyKmDNsf3i9Bv/FTBCX/M89zf9aPxJg42eUcMcijLA4MfU0aIQ8SBuL6BuokJDFQYdGHDdUClMSXGKuQOa89xHSBEecd6+K6gr/5Vd34qTysJIxNGPhUjC2f+j+utEkYmjKQdqrunYmT+YAPNVrPyAJow8veLkS+SUqjMp3acYwyCNJhpoxRUNHAom0ddKEtBhQCEcWu9WZZ5kKZobnfcKQjl81mfkjHe5XLpT/2wbDABGuRxbNqtv8HmBwasyQAAIABJREFU5PnhvVTr9dodFSBQppHvGsg6nY71+327u7uzxWLhTo1h6n1hqABbDE+NGD3xH70WReFgpEEIWTNVruBUFIUHNILner2unGNmtlwuKwNndKq7USmzSioi7dBBA7pG95o/r4wEx3e7nYMIC2811YXgic0CPgxASNMheAAeMHir1cq63a51Oh1rtVrW7XZtsVjYYDDwdnMtwUqBW4OSWRXE7+/v7fz83H71q1/Zv/7rv9r79+8dXFerlU2nU5eF6gZ5Ktir/DSQRhaKulqtlgdodK0PPLqrUgyGajsKNBrky7J05hofVbvFLyMbiF9rHFBbQR7qX5QY3DVo01bdeUxtOOpJfVZ9jQEMII+9qt8hI+KfxhCNd3EwqLrTmQIdkMTBI21XsKP9+J6CcnrY+riSMDJh5HMwEutA+wkjE0Z+KkZqnbQ1YeTvHyNfZIZLDTDLju+80CdmDEufirUD8QlXpw8xUoo+Javh81mDP4pWBgAhI0xVHO3vdDqudD1Hc+G5J8bHPbIsq+yqhPKQQcwdV2UpKGLAODJ16xN+s9m0fr/vzsH5WXZg36gH+SkDR5uVJcQIqYMgsVqtfIGzmXkAYYGsMj7UQ/vqBhNmVsmthxnTz5FRjfaDfcECapCEYYrMLABEWoTaEGAOy/bVV1/ZbDaz0WjkC4WRIfej77CApFCQAlKWpQ0GA+97URQ2Go1st9vZbDarAKUCJ/JTW6ad6Prt27fW7/ftl7/8pduK2gg58ciaAVi327XpdOrrD9DlaDSy7XZri8WiYkeAnQKP2jl6IDAqq4dN0B/N+UeW7GSl9qN+0u/3HUCwH7UnjTP6hz9wb/qDPFlsrfaEjyPD2PfIMMK+6iCF2EGMMjPrdrsOvJxLDEKmGthhXbEFZKIgovWrXNSe4kMp5yk7S1EGmdik91PfTeV5JWFkwsgnY+SDG5ZWJoxMGPkiGOljL0sY+bkx8rHyYrsUUmLn+E+jMTycUQusBU//BBV9us3zYx66PlFyX4wMISCwyqLUsrpzD3USGHBWNVpYEwIghgnrosFK2TnaBEMGoGgaBkGKYziBMgy0AxlxbWRbVJZmVgnoMDEALOfBrnEN8o16m8/n1u12PWArM4PhKlsJS0KfzMzZN/ShjBMBQ0GAgNjv971N1KdOgqOqDumvDjb0PAAfmXa7XRuPxzYej22xWFiWZR740d9wOLTFYlGxI+RQFIWt12t79eqV9Xo9++abb9yuaDvpKHxnRyaVt4IzrA/HsUGzAwv1n//5n/bLX/7SfvKTn9jbt2/9OAMovTdMNTIhP34+n7vfcRzAhaWkHuTOIEPZKbPqDkzKxEXWlGCtYIOt6e6VcWDFMWW0dLCELeBL9EuBmdQWBooaULEtQIU2qn8oq4nv4I8wjfiosp+qRx3A0l98iHPU97hGAVyvo90KCBGMdLACk89gV2VHH+vAJj1wfXpJGJkwku+1GNnpehtpW8LIhJGfgpFu9w/hO2Gk+XHkwfdPxcjHyout4dL/UfAc0wbB/PA0r4wAzo1SKARirscINRVCQSeyXjrAV0UiUBxNmTYMYbFYWFmWNh6PbTqdOlOCce73e893JXVOn7KVjcTZ1+u19Xo9Z340OPf7fRuNRv5d2blWq2Wz2eygQMmbx2F7vV4FJDTgaXBQPen6APLJ6Y+ZeeDFkaOTKbjAaKBD+k8wMTvuBEWAIEjSRlI5Go3jYmmcHNniPLQDfamtqD2p/VH4Tn41YIlt9no9m8/nHlzIU4eRw5Gpa7Va2VdffWVff/21fffdd/4+GH5nIKIpLfQbW0Ff9BVZA86bzcbZoGazab/97W/t8vLSgzspPMhM2V4GQDB1AEar1bLFYuGyRi/YEHLRaXTWJmhQVnviN7MjI6tpIMqOo0u1S2WClWGmXwpcyuRyjf6u9osvjUYjlz9AjV1pWowOSrFT2qhBnwGJDlyRHfbMMfwNP1Bd8x996yBRwVhtGbkp+04cpN8UfZEo1yrgRAaVc+hjKs8vCSMTRj4XI7GXhJEJIz8VI50oKy1h5GfGyMceuh594Mqy7H8zs38uy/LmsfNwBDquCqBR2nAz82lMhBCfNAlY1EUdKtzIUGnAxPhQuE4/UqiTKX8Ur31pNBqeJsB9er2e7XY7Z5T0KZg2afBXNgn5wOKUZWnD4dCNFSDC+XACBUO+K4gSwJUJQm7KLuq5yFenvEkX6HQ6tlwu/RhtRmaRIVXwVPZJAw0sDDInrUFBnQCjDo48kaXam5lVnJmgjz7UCQBbdQi1mdFo5O1otY67KbbbbZtOpxU2ErCO6QKbzcbu7u7s4uLCQV5Zlmaz6QtQsWNsHv0iJ2VvdFCFLQJkm83Grq6ubDAYVPpNQFG5KTu+3x92EaM9g8HAzMzZNnShAQm7BuSVNcdmGcwoG6UsloKrDkIiANQxewAUflTHruGT6iPqPwTuzWbjawewkxholdkiZnE96Uir1aoSq5AL59BXGHntF4vbNT7yR50qGwZIyl4rCKFXfFTZQrMjcKoeVD6xKLBrnE7lUBJGJox8aYzc7auvXkgYmTDyUzGyjihLGPl5MPKx8ugDV1mW/8+jVz8UGkYnECr/eTJXpkOns5X144lWAwHnq/AADDU2zUFlarjT6XiARnkKWLSBa2A4yJmFLcmywzSymdlwOKwEaGUZYVkwVhS63++t1+u5MxNYMJCzszObzWa+IxPGpPViKCzQVTnTR/KN0Qvy5LvKHKNS48bwYIHoP/duNBrucOqgOg2PE9DvLMtssVh4kKG/i8XC5vN5ZSGsphLgZAoe6F2ZPGSquldw16Cudofs0RP95EEQ9mIwGPhCXphNAivXrNdrZ7P2+719//33vkNVHOxgp9iNpv2UZem52gpCZlZZLI4cleVDHzoVrlP3DJR0gKEMK+sQkAFtiaCA/xA0ucdgMLD5fO4+q4NKZZcAMw10BH1ld3UgyOd+v++61AGTp+M82KzeDxvAZ3SggC5YpI2tKUBxb37H9tCLsn7IjtiELnSNjcqQtrbbbR+cKJuLz0VWVwFWByN6rC74Kzun/dNr8Tkd2Gl8TuVYEkYmjHxpjDxumnGQccLIhJGfipF542GGvZEnjPwDYuSLreEiQMHSaJCmE6c6iNBgt/RJlmCl4IQTwW5jiHHaVp9qCaqcQ1F2StkUrs3zvLLgl/qHw6GtVit/8kfgAAAsAQphin02m7kMCPL8jnOqrHa7nTMAtDsypDApGCaANhqNbDab+TSu9hXWjvYiH6bDcYiiKHzBsQ5wdQpfdQWrCfunMxDqhBpAYYvMDo7Z7XZtNpu53jBsdNnr9fwa7st/dUIKTgxbqiwIQQMWBz0CRvw+HA4dFDUQmh3Yqvv7+8q7Sdh5CbDBB0h1wDewr/1+b+v12nUTwRnWDN/QAKPn9Xq9CnvHfXkXCqCrdWG/XENApj1cw3fsRgeJyvBpAGSARoDs9XqVtRfKKqEXvisocF+NKzrwVH+g//iVDiR18NNut20+n1tZHhZtMyCgXmSqg1UGp+v12hqNho3HY7u7u/PBIXYOKwc7jY9p/5SB5T4UjXXYIsdiepnGUHSqa3j0WvUJdKay06Ky5Ht64Pr4kjAyYeRTMLIhqWEJIxNGvgRGuj1Z1QYSRr48Rj5WXnTTDASJ8vW4OrzuaIIxIghViB7nWjq02+1cQfwRCBVgzI4bNfAH28N1OLOmNOjTq+adA3babnVm2sRv5KITQPr9vrM/Zla5L8ZK/jlOiyPSF3Jku92uvxRxMBh4CgT1wjQxDY68kaUuStZcaWQHA9rr9Wy1WnlwQKcEBtpIXerk6kxmh2BDXRi6pjdosFEHwFk2m41Np9MK4CnTlWWZgxEyQ8bqVDAugDigyba0Wi9T+tvt1nWHrWEXRVHYYrGo9Au9EzjK8rCb0Ha79fsrM8o13E+ZKLY3pm30SwG3KAqbTCZmZr5jFsER+8J3Go3qzkLcS4MHDDa7SuGfMQ1IwY7fVWZq49gIMlRZIQ/Ai7aaVXdN4/4cj2shOA4zRzuQ02Kx8FQHBnAEaK7VmIJN3d3dubx6vd6P9EYa1Waz8RSr9XrtslV71T5wL/oMk4dMkCvt53xNvaDdtEdjqN5DjytzT2yIJcZeBbxUnl8SRiaM/DBGPryM+mGXwoSRCSM/FSNbzcNnZo45njDy5THysYeuF3kPl+Zd0mCz43S92fEN3So4TTOgLv3M+Rq8AX0MUoGLNAWupz5lxlC8thWD4zetE4CIQmW6lXti7ICG2dG48zz3haEYHE/maizcQ41cA6tZ9ame72ogGD4AyXahKnOz45bspCzgfN1u15mH5XLpdaO/3W7nfYBh4lqCTVEUlel45IT8zY5gx2faRz0K/DgRZbPZOHsKo0S/aY+yXVwL88EUMMDcbret3+9bs9m05XLpqY/7/eFlmTrdzb2xnf1+7+8T2W63/pu2q9Pp+MJi9KYPB+qoyNDsyLAQJLBTArWmNcAWk7YQ0zBgPBl8aIAlqOmaBPUJBoAarOgD9RMgkS3nqR4mk4nleW6z2cztjJx+M6u0i/7xnYEMQKNMv7LgyFvlijw11UAZMey01Wr57lO8sFPXkcBg6gJ1lSE+zICQ9wbhKxr/sFEd9FIn8Y1UC2U10XF88MGHYBGVbdffFWwUGDTmxWuIUfqAkMrTS8LIhJHPwcgyYJ1ZwsiEkZ+Gke4bjdxne1WuCSN/Pxj5YjNc+gSpHUTxCA+h8PRPIYjHJ1TtIMrAkPRpXp9OFVBQNsGDtiiLoKyhBmumWmFYlE2hr/oUHNsbn3YxAAJCnucVwIGpwHjW67WnORDcABzuT9qAsp8qJ7MDk8MOQ8hNZW1mFdaR9vf7fZvP564r2sp9NNggz3b78CLExWLhU+AAEvceDocVecDicAxdq2OpjeGE5H8TsMwOaQ13d3cVMINBoc2AJbnO2Ol6vbb9fu8LnwmCWZbZeDy2b775xrrdboVhRoaLxcImk0llkSwARO4zbNhyuXSwp8Am0z4YM4IZQZ7petYxsDMYAVhTT/ABZYrUB7EBTV/o9/tuawykYCHRh9YDcOpgBSaROpVlZ8F6q9Wq7Ex0c3PjekKm2As7kdG/0Wjkete+YI/Yi7Kv2EKz2fRti7vdrg+YAL7oXwqsLJ7Gb/VdMrqoWNO3suy4UJogrzFAGT0dhCrTrCDOoAHAjucgC/2vtqDxSGcfNOYCHnqNxtdUPq4kjEwY+RSMzPKjj+nDX8LIhJEfi5HuD1lesauEkb9fjHyRBy46oUBCJ1EQ382OT9E4rwa4yEphACoIZWR4WiXgYlwYc7PZ9G1lOYf7xKdpNQYVcAQ6gmhkVriOQKaMCwyEgpfZkUXTttAvBUyCeWQvYR2U3QOEcNZer+e7TLnixeFIUaDt1IPTZFnm+dYs+AS0lIkkR562q23AupAmQHCEIdGApFPkjUbDJpOJTadTT1Ug0BGop9OplWVpt7e3P8ozx8YovV7PXr165e0gWPKOE/qgdtBut52V++KLL7xO5EVeM+8WWS6XDh6bzcbOzs5sPp974EIP6JSAzz3RM4wvcl6tVjYcDq3VatlyubRms+myw5Zgl/A3wAd7ZK3DbDbzwRWDJA16sEf6/hlsDlslWCuTqAMrbJXPpIkgA9pSFIXNZjNnppSZL8vSt5HVtB78SO0LX6Tv2BNrMpQxZ1Cgvo4MFOhg0JWx1H7pMXRI0RkNHVwSFxjgMPjCZ3QQFXPRSZPqdDqVnbz0Phr4iREak4kVOgDWgSUDOv40NioYpfL0kjAyYeSTMVLew4UME0YmjPwUjNyF2Z6EkZ8PIx8rL7ppBg2nUzFYI8g8z63X61Wm8QEKdXwFh6hMfsfI9ToFIM7DqQhCmt/Kee12u5KeQJ6oAkdkBkmXoI88ZSu4qNLokyqW81TxOA6BRgENhoc24/BaJ2kHGCMBCflpbjLTv3xXAD07O6vk8+t/wAYZq1NpTq4yRuiBfHCuVfAyqy54xflvb28dfJj+ZwHuer225XJp0+nUBoOBdbtdu76+tv3+kDIyGAys3W7bZDLxdIiiKDzX3+yYc0zuOXKYzWb21Vdf2Zs3b2yz2di7d+887x4WaTgcOqDSZg1EvV7PmTbsFduBwWLKHZnA4iFv0gmU9QJwI/vKOdyv0+l4wKKdysrOZrNK0OQdKrBvkbHFLtkKeDwe2/v3792XlP1BttyPRcuwdeT9A2C8WLPdbttisfA1HQAgbBx9pV3q68rE4ZP4GrLX/jBAAayIDXEAir0om42+uUaDsv6m37U+7qvrUBQYIlunaTSco+1U0IwDcI232i6V1Sm2juOpfFxJGJkw8ikY2e4cY5BemzAyYeTHYmSrWX0JcsLIz4eRj5VPfuCikQhAAzg3V2ZIWWgcAcMgmCmDpwJFiNopNRazat6v2fFdH9wLB8GoED7GhzA1gOuUpgZTnYqlXgxN20d7MFxlHnq9njUaDVsul34+AQXGi8WYtF+ZtzzPnZnThcYEGK7TaxQMYGjIaScvd7fbVXKIR6NRZaEs8lQWj7YrSAJ0yJM0geFw6CyTgp8Cq5m5zZyfn9v19bUtl0tf9Jplmc1mMxsMBt6Xdrttt7e31u12bTKZ2H6/t9Fo5FPu2CGsLo6KrM3MQbwoCpvP53Z7e2v9ft+Wy6X1+/0KEGdZ5jpkCp3peO612Ww8mDJIgfXSIMRgxuz48kNdAzCbzWw+n9vr16/t7u7OmanBYOD58Z1Ox8F2u936FrYsTNf+sdibB2BYI2VvaBsskzJQAAb3++KLL5xR4r6NRsOm06n1+33r9/se7MvyMMsJqOT5IdV0Op3aZDKxdrtt5+fnDkDY8Xa79XfB6BvslcHWXHEd4AGg3BcdwFriN1mWOfOqi5bVlwB5ZeMYiGrsgoXjfhrc6Q+D6rIs3TaIo/ibDrjQI78Rn3RRPHER3+Se2uYYp5AzctRzI7Ck8vSSMDJh5LMwsnUY0GaWeVpcwsiEkZ+KkWZmjebxQTJh5O8fI190DZcqj4CsncMoeeongGiAoi6CCkEII2B6m0JHKQRcnnYRqhqYWXWXFb3v3d2d5y6jHH5TFk4Xs+p3nU6lb8o+KsvIdQpG+mSP4dAWNSRNs9Cnct1mVY0SGcBgwnDqeWowMEUwY69evfLAwBvcleWjP8iKhba0D3aJevmdNnCOTqGX5SFHnnSPr7/+2nc5ItASpEejkX377bd+72azaT/96U99IIHMdAq73+97PWzrSz5+p9OxTqdj7969s263a/f3986+6QAAffHwyM5ZBHjs3+zAYGXZgfWFFSUQEdwbjYYtFgu/DgaPYLxer+3s7Myur689vaPRaPguW8hLWWEdEJHTT447AY33qGDH+Fr0XV2sO5vNbDgc2tnZmWVZZufn5/btt99aq9WyL7/80mazmRVFYa9evbI8z309QFked/8i2F5cXDhTuNlsbDabeToMgfjVq1fOcpK3T9GUVR3EmFlFRjCYeZ77Am0AXAsxhKCPnlSOGmNoV2Tq8T0N5HoPYiSDONYHMDiD3dN6GQw3m023MYAmDo6JyXzX2BNjpz4UUDQuKDOZyvNLwsiEkU/CSAa3mSWMTBj5IhiJTzfyho1Go4SRnxEjHysv8sClN+eGOtWogkA4sB84kE75mx3ZCwIuggJcNHczdhphau4v1+EUCiooJz4NYyRxoEFQItiuVqtKUMBwqIOigEJ+qdavhqksJ/1Fnhi5tovfCAD0RQFVHQf2AVYPkGKb3bIs3WEB//Pzc5tOp84UUC+fkS1yxeC1XrUTBUCVy3q9doas2WzaaDQyM7Of//znVhSFfffdd3Z2duZ963Q6zvhkWWaXl5dWlsfUMuwDGSjrSmDo9/t2dXVl33zzjXU6HRsOh/bnf/7n1mw27erqysF1t9vZeDy25XJZm+4Da4Zdj0YjT+XA1gBN6ms0Gg6i6IJBF+woW96yQxDpH3me2/X1tW/Xi/4BTeSK7Nkaud/v28XFhb19+9ZznpfLpdsH7Bu6Jqiu12ubzWZ2dnbmbeGdMpvNxgaDgf3ud7+z8/NzazabNp1O7eLiwnVAv5QtZ6E0dcGmkU6hjCZpHMiQOKOpSTrQwZZ1gEg+OH3UXdPMzNumrDssogZpZa65DhujnxofOa5+Tb/4XePTfr+vvBSSe8bCuRrsub8OEBXgsFn6yTnKSlM3dvoh9i6V0yVhZMLIp2NklVVPGJkw8lMxUmVHfxNG/v4x8kW2ha9rZGTiCJDK1PFkzvsgGDSZHXdVQniwMPokSTDXYGx2ZNR0UK9CI+grQKEQnsrn87mzDZqfDbvAAlGAJDIAtA1gMjsyUvyGbOgjv3NP+kM+L8ZFoMS4YQeQM3KnHciPaXP6oQAU2UXdPrbX69nV1ZU7OTsIoXcFVmUXabO2lynlqAPWc8EmwhDCUAHQ7Xbbg8h4PLbdbmeTycRTIshXVyYD/YzHY2s2m3Z9fV1JjyDokOIwGo38t/F4bGVZ+pQ/djgcDm2/Py6wxQ7IbwY8VquVjcdju76+9kCCnna7nQcL0lGQrcqLY+12287OzqzRaNhwOLTJZOILabH5LMv83jDWBFDaRi79aDSyq6sr2+12njIDYKBb6lHWE712Oh27v7+3brdr/X7f3r5964GXvPXz83O3OXQD6LOombQEdKKDDOya9QZFUdj5+bm3EZuDyeYlptQDsAO2q9WqsrsVg0f6qSkN3FdjCqkSCga6IDvPc5tMJpUHQHxdGXMAlRgXAZEdr7DPLKu+eFMHCNSPr2nfaaOCCn02qzLuGs8pCjh8TuV5JWFkwsjnYmS0nYSRCSM/BSOxqUbzuOYsYeTnwcjHyovMcNEZNTgaxneEq0+gvEAPxSjYoGCMT5+aIyOnBqhMlzJfBEIN5vqdJ3EErNufcl673fYdWprNpucIDwYD76tZNSVCmR1VGEY7GAwqObg4qhqE5vVrXTGPVZkEjB8QQW6R1aCNgJ4GE4yRQMY7EwASZVcIGKQLKOuKXSA3ghrBl/sTBMk3z7JDjjjOSxCmncPh0K6vrx2M9vu9b6mqQVmBlIDSbB7eJzIajez8/Nzu7+/tiy++sMvLS7dNrp9MJs4aTadTWy6XdnFx4SkV2Ba2MRgMbLM5vshxu916XjupENgIL3nUrXuVeQRABoOBM2QscKYecsyz7JCyAIOHzxHsdWB1cXFhi8XCptOpffnll1YUhQ2HQ38xqA6CYNzJs8d/eHcPfeTdMrB1MKMxxQlfR2aA7mKx8PoBhU6nY7PZzBqN40sT0R8+wKyArivAxwD+drtt8/nc13owSFqv194vZRd1DUGjcVgUju8ysMzz3Ad3AK4GXvTFwFbBA/9CV8Q45M0AkvbqoJEYhT9rqpbGHmIS8iGu6gNAjE1xEBZBhjam8rySMDJh5JMx8uHFx5kdt5FPGJkw8lMwEozo9/s2Go0SRj6Uz4GRj5VPfuDi5oCGPuFpZ5V9UYaKRawYOdfo0y+dUIZOF7/xNIvxE/ipi+MEFT5zvQrMzLyNOLc+ILD48fr6unIuDkD9ZVm6o+BQtItzzY5sFWCAI9NnzUHnN2SlhgDA8hATWUrOXy6XFdYKGWE8HKeN6/XamUy2c4WB4t5cp9O62jYWgmLQgA2BiyDBlrB8pw86+GBR7mQysSzLfMeju7s72+/3Np/P7Sc/+YkHJXKuAT2cnAHG2dlZhaFloSmAdXd35zLa7Xa+qFQHLgR09YNXr155Lj+LvRl0sCUu/oM+8jz3rYrpOy9i5D0l3377resUJhMW5+Liwtrtti/cZj2CLk7udrs2m80cSJbLpQ0GA88tz7LM39OC/eFD2Lqyq8jl+++/91gA88RWwJeXl64DZdApsIa3t7e2Xq9tOBxar9ersPqNRsPG47Gz/fjqYDBweaInwFsHiv1+36bTqbedrYIvLy899YW4hL/AtOHHxB/6ooyY2XH9AaBC3GEQpLEHGcByYwcK4sQc9U+NDTHgE4+IE5HV5TP3pGj8U7ZOUyTibEoqTy8JIxNGPgcjd/vD8U2zbf/3//5/VgZ4MQUqs8z2pa55a1hZ7m2321ueH/S6+h+sm66+IFpnRNWu8jy3zc82ZpbZfr+rDH4552BzDcvzTOKGmVlp2y3x77g+qtyXtt0dX4590I9ZWR7TPc2oexd8JLey3Hv9+31peX7UGb838oaVZrbfV/ulM4kHLGn6fQ/3Ky3LDrIzO64jImVz+T+Wjndm9tAXHhwys9Isy3WH0cxoflnqOr7cNpvCZaAy5zMxIstyMysrthOJBrVF9K22jlz2+9KKweShQcfdQxNGfh6M1P7G8iIphToNrIxQZJyUjTI7TskTsJXBIs8XBaJgAiGDN2V9uBeCbDQa/qSPEmKw0SdrVQoGAIOk+cRv3761VqvlO/hwPAYzdSYGz9qGwWBg6/Xa1uu1sztm5oZNSoYGC7PjCzF54GD3HGUlaYOy/mbm8kGex6B1ZAGYgqbu+Xxuu93OnWiz2dibN2/s/v7erq+vf8SQwbTRV7aYLcvDgmuYiclkUnnh4mZz2Omp2+3afD6vLMhGtzyQdbtdm06n9vXXXzvAIRd2TIKNYnEr98DZLi4u/N0zDDrIQ9/v966v6XRaYZGRBYMA7JMHMZjNPM+9H7A4bPXc7/d9gTHvu2AqX6fh2R0JxpUUC+736tUrtx+YVwYqPIgxMNvtdnZ1dWWTycT+67/+y0ajkf30pz+tsGEsGMe2o+3t93tPbQGcaZsOAGHLsHuYWmxJWXHSOYbDoX8m/9zswPBNJhOvp9vtVkAL5piHeR4SkXO/36+wcchrs9nYt99+a1988YX7XZ7nvsMTgV19C6ZSg7EO+BQ46Z/GStqMTqkfEgCbZ/DINfqQCjhwLjYIa4nfcb0OwgE5jikrB1AokBMjtB2pPK8kjEwY+RyMnN/fH9qR5fb2/OefboDDj7gLP2MlAAAgAElEQVSm9+m3/W9Tzv7QDXi5kjDy82PkY+VF6EoEQkc12MF8KWPnU+cPgycGngzcUAwDOJ1K5BxlWjCg1WpVUSyDPIyB/zowBOB0BoY0BMARlpEdXJiSNzM/V9MP6INOX+rTOlO+5M3u93tn9mkfebUMoOkLBkN7maIHLMhnNjPfiUcBJc9zlxd/9/f3ngpBHeRVb7db32WH/GLSGXRgDuuy2+1cPoDWfD73Rb9/8id/4jMfWZb5GoAsyxywAS3kDkAp27FYLGy9XttoNLKyLB2wYFizLLObmxtrt9s2Ho8dFIuicNvAeZELKQecn2WH2R7sigdBbI/ZJWychw5YHOxiu936g5PZ8aWJahNM2TPLpouWi6Kwq6srtyt2T7q/v7dWq2VfffWV3d3dVRaP0n6m7Klrv997/9SXsIHxeGzT6dTy/LgbEA+p2Af9YMDIA/BisXB9XF5eWq/Xs/l8boPBwF9qiU2gQ3xQdUJqRL/f95k2mD3iAEz4YrFwRpxr0elkMnE/Wa1WnvJAmgTtx6eZ3US3zOrRTmIbumbAh9z1HS5m5nYR0/Doi85MMNgibmIngIqmm1FHZOj1uLJ0Git10A/JACuojCt11zGGqTy/JIxMGPlUjPzZT39q/8f/+38dtk8fjayR55ZlueWN3FbLlXU6bbed5XL5kBrYsSzPrN/r2f391Pb7nXW7PVuvV3Z+fvGgk8MszWq1tn6/9zAg3TqBRcbJYZBaPmQVTGy7O+g2z9ip9BD31uv1wz379u7dD9ZstqzbPbz4ebff26A/sG63Y9vtzprNhhWbzUGnrbb3f7s9HFsul7Zare3VqwsrzWxTHMjM+Xx2iGXbrVlp1uv3bL/be1sPNnWwx31Z2m67tWazZfv9znq9vm22G7s4v7Bms2k3tzfWyBvWajUfiMvDYLzT7dhquZK0wMJ+9rOf2fXNjWWW2Xa7sSzPbbvZ2t3drZ2NxzabzSzPHh6UrbTtZutkYZaBkVkFI6+u3ttisXzAyL29enVpd/d3Np/PbTgc2nw2t9VqaZvt1tqtlrXbHVsuF2H2K/e43el2rN/r2939nS3mC+sP+mblYYbvQG7sH94tNvfUz1F2fNl0wsjPg5GPlU9+4OKpkc4z6KXjCF+fFnlqpTMITQdUu93OLi8vrdE45IayYJGtRKkPICN4I3Rd8KpgpzndOivCQBnD6na7dnd3Z7vdzrcaZXvJ4XDoAdvMfDYDxatBsGvO+/fvfTDNVqk8KACQ2+3WA7jZEYCRE4bKQwVOwoATufT7/cruMsPh0C4uLuz6+tpubm5cVrSTN7NjqDjBbDazsix9Ia3Z8X0STD0TpGBI5vO5LRYLl5uZeYogW4+ioyzLfEZlPB77gl76Rwoezs/7RAi2rVbLAwQDb3bwIW8aBodBAc5Pf29ubszMfJ1Ap9Oxm5sbazQO+e95nvuD1WAwOATGh1krHgyw/cvLS9vtdj4QIB+a3HLYUH3A0Rki2COCE4WdsFarlfeFh6PlcmmvX7/2mS+z4+5XzN4xgAPof/Ob31iz2fT3rqCXs7Mz7wvMDay0pgLRJlIJNpuNb+GrgzACFD6V53llG2BshwGTzm7yrpHBYOCywo7I84dp06C83W59O2C2yKUN7XbbZzs5Tp+Wy6W3kf5in8hO04iwPw26+iDJjAGDW61LZ/cgABiAFkXhM8AM2GDYOaYpHbQV5hC9Af5m5jn++Bzn0wZl/OIsRoxDLH5O5eklYWTCyOdi5Hkxs/lqbuOOOd6Nx2NrtBq2Wtx4//aNvS3yjfVah3hfXN3Ym27Xil1htlzZsCytcbO2sZntlwcC8rzVsmJ6GORv9hsrH9IhV/uVbfdba7QPM42tvGWd4t7evXtnLTPHsSzLbPb2vXUfMLK33Vtma9uu5nY5vLRsciDaNqtbG7VGDy9y3tpmv7X+F2cP8fOgk/lmZdNiapOG2bZT2ig7rEdbF2tb3i2t/WA3vQf928ysledWFoX1mseNFXhQv7+/9wf0xvrevhiPrbz73l6/fm29xtaKYmHZNnNcXK/X1lg2LH+Is43dzia9nl3/6//nGLlarXw8MWjuzObX1lgd0k6b+4XPbDYf1lOZmbUeZnybWdOKVWHNommv871dF/fWnR/Izv7MbHlzZbvl0rLFjV12u7Yu17barMw2Zu1t2zoPZOePMPLu2rZXWxv/yZ9Ya7ew726+s+aq5y+75iFpMBjYMMtsv1paa9eyVrtt27JMGPkZMfIxUvJFHrhYuIcwEbzZkcGho/yWZZlNp1Nfh8JCSWXk5/O5r9VBgff395U0AJ3Sg8XSVAWCuj7lwtLRLn3Dtz5ZY2Q6vcp5BHIGegzMsyzz4zBXBHUGjew8tVwu7erqygfq9JOpUxaJan59p3NgkLSdrDm6uLiobCKAUU+nUxuNRnZ5eVl5CidXl/VNu93OZrPZgb2RKVJYS1L/MDrYDwbSOJumpcEisIB1s9lUFnjCtKxWKzs7O3NwwqZYxNvpdHyND2DVbDbt5z//eWVXncFgYNfX174odzKZHALlYGDj8djm87nvWmRmntbIbNFoNPIAwAMSqS66FS+yL8vD4tr37997/7///nsPVqT9xN1+OI48CSxm5vJutVr25s0bWy6Xdnd359P6u93OU20ajYbN53Pr9XqerqnMOQ+XvV7PhsOh/fDDD5VFvNgefoWdEoCVwdHgiy0SA169euUDlOVy6e8XwadJb1ImyqyaQsCMI+u3vvvuO99+mGBMXfjvYDCw2WzmtswDOrtoETh1RpFreTjH1zmP2UhSuNjxi/N0VgGbw050MMx3nXkkDmL3rPHTmQ0GggwqGTjTdmXnkKHGPM21xx+Jx/SdazSFlbbD9OliZcpTWLxUqiVhZMLIhJEJIxNG/nFg5GPlkx+4mLHgqQ4n16c8gjTHuIYdcBCUmXlgw3l1V56404hO+TFl2mgcNjoAXAhMsIT6hI0RDAYDOzs7s81mY4vForJGBmYNxhG2CUURHOgTyl4ul842wtTt93sbDod2c3NjFxcXHsD1BYqz2cza7banTiCPPM+dQcQYCObMUvA0v91u/Q3k9Afm8auvvrLpdGrz+dxT+AaDgW/wYHZg6H744Qdrt9suI1jQxWLhMz7oWvUNMNM3Zj8AqPF47OmEGD0MMO+4wFHW67UHezZQYLBBwCCgbbdbu729td1uZ19//XUF2KgLXeu7ONrtto1GI2u32/bu3Tvb7/f2+vVrD67MaBFQmCUClIuisMViYdvt1t9FUpalp9b98MMPlXRB2k16Sp7ntlwuffMPmGre2/Hq1Svf9GK9Xtt0OrXhcGjD4dB1cHt7a5eXlzYej51xRo7Y/WQy8W1eAcrtdmvT6dTKsqwMYNhYAx3iZwzyNHWPwHN9fe222Ov17He/+50DM35IIOM+sN6wYJpPjS/p7lsMHM2OazjYAhhbI9WBwQygeHt76+wy/WeQwuCMQSpsM/cirgA82Cf3wueYgVTA1JQH/Av2nlhIypEOThVkdYCFz3MO8RAfZNCnzB56BCzQBTLVGQf8Bn8H9PQ+qTyvJIxMGJkwMmFkwsg/DozknnXlRTbN0MBGUNPFrxhIbIgGSaYXy7L0dAR2mFP2g86jCAJ+u932p3qO4ZT65IyzMyXJdphMletuS3me2/n5uTtRo3F4L8Pbt289X/zVq1d2dXVVUQT18F9T6JrNpn355ZeVdTIKfOSkAwhmx4W97CIDY8cUM44FIGdZZtfX125ITDNfXV3Z+fl55f0FbFrBVC1OTdoGwUOn7pvN47tKYCbYtAEWBkYKBnM4HNr9/b1Pz+sb20nBwFZUZo3G4W318/khD/ns7MzvDbjf3NzYcrm0V69eef1m5jsXwox89913NhwOK9vHsuGG2QEIz8/PHfzu7u783SUELu6H7YxGI5dlo9GogA66geEh9xl2ldxg7A3bJ8hST7fb9XQ/Ahdte//+vQP8119/bWZWWStFkND3p9zd3dnt7a0zbtgMqZS6xoPBDumdBCsYZdit29tbm0wm1ul07O7uztN05vO5Dwqm06k1m4fNMbAp1n50Oh3fiXEwGNgPP/zgMuD9LpPJxNMd8C/WdbA2Rdlz4gn+ix/BMBJj2DyFQR11MwjEDkllAEiVpadNsKgw9cQe9EycRD8cB5wAGV1rg3/zO9dqeho2pMAKm3lcW1CNv5piEe0Q3ersBPaXyvNKwsiEkQkjE0YmjPzjwMjHyic/cNEZfdIn2CNobQwBlanwLMsqa2x4UR95uPp0T7DkqRtWjjYwZchTMouBUQpToCgXsAHs2ApTc9gJ8mxkkGWZffHFF/5uC9INGo2GByhdWKzpJGVZ2vv37+2rr76yV69eWZZl9v79e2cDJ5OJB1ReJkhbYd8wlOvrazs/P7cvv/zSzMyZJ/qhC7EJZrB1+/3eAyR1mpkHVl1vA1vF2qfhcOh9QY+NRsNz0VnfBKMKqNMWmIOyLH2DBgIaoDefz/16wJN0EGzBzOybb76xP/3TP/VNGRh0vH371n7xi1/Y+/fvnd0hkHLvm5sbm0wmnkKx3R7XaJEWQX7zzc2Np8sAvmbm6SgXFxfONDUaDXv79q31+327vb2129vbii02Gg2/jjSh/X7vgcjM7P7+3uW73W5tNpvZaDSyd+/e+QJn1jlh6+jx8vLS1wjQb+7farVcP2V5eFHlYDDwtIHValVZkwZQADCwSEyzn5+f22Kx8MHCV199Ze/evbPb21t/6aWCIsCYZYfNPAi8eZ47A4qO8KGyLO38/NzX6lEnvgZYKFDDgusAju2aGbwSbFutll1fX7tda7oDA0b6jVxgdRnkjMdjH0gxuCINjHQNwJf4hU8zsNBNCLSNuu4DENHADuNJm7XPgCMgxL2oRxlK6uBc1iiScgOYpRmu55eEkQkjE0YmjEwY+ceBkY+VF0nIh/EhUGEMBA+YFtIems1mZWMAjAa2A2AyO+5MUml0fszNRICca2YuiAhEbIKgwUun6nF2lDUajX6UpkAghs2AeTA7BoHRaGTj8djZHViJ4XBo3W7Xvv/+e39q1sDPFC/b3yrzQNBeLBaeX10UhU2nU1ssFh74JpOJvXnzxqdO7+7u3MgBP/5/8cUXzriwK5PKkDe1M206Go0qL/7r9Xo2Ho99Uwsz87YjR9oKe0T6Bgu7W62W5zLDSO52O1ssFh5gAGMAGscbDAYedLRdrGP48ssv3dFhGJE1LzUkjQPnL4rCc7oXi4XvDkj6BFPMBMT1em3fffedB4zIgpCOgYzPzs78XAZE+AepK6RW0FbYJNjGPD++qLDVavmGFaxDgF3iPVuALLbAoImBFi/iHA6Hvvsj7YMNAmh1LcPV1ZX7IWk4bGyC78NEYht5ntvd3Z2vB7i4uPDBCJuA6CCPgWWWZXZ7e2uj0cjMzK/B9wj6+AzH2ISD2QP6NplM/NUBulYAWROMiTc6OwCYjsdje/PmjTN0yIfBKT6tAZsATkAnHSH6H+DG2gRsQ4GE2KcsIHFFB3qkzHAN//lMncraKbgAPMoupvK8kjAyYWTCyISRCSP/+2PkY+VF6Mr9fl/ZwYRjmibB1C3TikyTYmCkReDMpCZQL84JUwdAAGQIMc8PC3NZoApDggMRlGgjAczsmIeK0tnRyMwcEBaLhTMV3333nQMkOacwjDqlz/QsaQHIBaPWNALkQyoAyoZ9BJD4g/nAaM7Ozuzdu3ceAF+/fu3MA7LCeDVnmCBDOglgzwJr2FQWM9/f33twp25NN8IACVDsFEXQP2yTemUXFxduF2y5HtMvCGikuwCc5D6PRiNrNpu+hmG5XFam70k1gYHDjugnwZLc4kaj4awiThwZSMBEbfnNmzeeyw6wYN+73c5f3Gtmns6iudCwJGZWGSwBFMj/+vragxnXwsCx66L6oZlVdh6CJYPZxdbb7XZlZ0AGSwRpmGFe8kmud7vdttvbW7u/v/e0AuyR/psdAQDbnE6nnhcPuwXzxgYDgBb31WCJTtCZMsoEWFKYYPhg92BrYfEAFII816BzBqj4yXw+dznA0GOn2h4GPrB4mppHHGCwBcNGrFMA2Gw2Pjhmh0ZNlcD32Ipf+42NIGdtI/WorRFvdGExKVUppfDjSsLIhJEJIxNGJoz874+Rj5UXWcOlU5tMZTLtj/IRNAoFDPgddkkNud1u2/39vQdNAqqZeQ4zSoaNaDabdn5+bt9++60LYDKZVKbpCeQIWBVMkGEbWKaNefJn2vXy8tJGo5HvIgQrRhDRtANYQdIuAAqC93g8doWxowwBXJnl1WrlQQCGJM+PC0qLorD37997ELy8vHR28P7+3kGXgA0o6xM9xtxqtVzuyG23O+T3Xl9f+7QxDkEfcZ48zytTzJeXl754li15zY4gfX9/70CNE+k6A9qw2Wzs5ubG2VHYEuyF9zaxqJVBCVO/ZuaLdFm7AOPEwEDXRHAtIEsKDMEPO+Y9U69fv7a3b99almU2mUzst7/9rY3HY9tsNm7XyJSgS6BAbzoo4x6bzcbTaWA9Ybvxiel0aqvVys7Pz32g1mgct0JG57DSk8nEg7UOqOgjQIkNT6dT97Esy/xFxTDzjUbD1zWQ8nNzc+O2ExezKzuYZZkvhmcr3pgWcXl56XahgdnMPChijxp38C30p0DLehBsmDpg83RwyUCRGAaLBxNGWhL3pk4Nyjpjwf3R6WAw8BhBG2CUiZ2ApbLMpMyovyFzAJJ+YCvRRwFeXSeksieVBBmk8vSSMDJhZMLIhJEJI/84MPKxWa4XmeGCgUA46nAEBDqA8NTACJYoAeYLoSI0s+PWtcrYcS7sFmwZaQYsRsRwUShOTduXy6ULGYWU5SFHnPxXXsY6n8/tz/7sz3xxqLJ3WZZ5v1BCnufOTlGvBkFYOcCKQG9mdn5+7qxRURT+LgZlz3AQnB/w4+melzJ2Oh2fRgcEYY1wPt6pQvups9U6vOyRnWZYRAmrQz46aRoKhjgXAXQ8HjsARzkNBgMHX+on1YHceAwfO4JhG4/HdnV15b+RrpFlmQdfZUfoG7bR6/Xs3bt3PnAhUPD+GV7ua2ZuczBj//N//k9PB2LRsTrxcDj09pGCQoDDofEP5K4+AvPGi4thCbHfsix9G2mCKHK7u7vztAqA6Ouvv7b1eu1sL77H9tPb7dbZc4LO/8/emy3JkR3X2isi56ycxxpQQIMtyiTe6FZ6OMn6mKiXOI+jm99MN5JJZuxDgmyMNeQ8z9N/Efy8PMEWRLChm0aEGQxAVWYMe7v78lh7uW/m5XA4WCBjPvF3xqBQKGg+n9vnYShJ0nxiyP1zHfw0lUqpXC5b0Oc+kZMgwyFBkmSa/9PpZGzq6XQ6A0dJZju73c7azAI2Phn2UhgAhmdCfpFOpy0G+DkBwEmWiYOcDzaQGEnM8zHTA48/D1IRWEzs2cdSVgOwK5JqD8bEGn9/3APJCFp3bDU+Pu+IMTLGyBgjY4yMMfLnj5Gshv3Y8UX24fIbw/ng4ZfPk8mk6T8Jzhg2rBCBxmuRKVD0A+iXvAkax2PUDtVLLDCi4/Foes/pdGr3g4EzcQyevx5aaoxwPp9bsSfsFsV7LP0iNfAsGcBFkKZ9KRpUJCEEST7PZ/2bP8XC/CGY0rEJA5JkARinzmaztmzvAxmOk81mlc/nNRwODQCYA3TTXAPQ5v+JRML2wUCiArMFG8KYJJNPbU9hkVgypoATFpN9QmC2ABlJJhPgvATQMAxNF57NZv+4k320Fwm2Anh9vFmypDO2hGdkfpDYrNdru6/r62s9Pj5qvV6rXC6bnIQNlZEJUAwsRcXTxWLRgjzBz88L4JfNZjWdTi2Y4UMcMMLUY8AYMmbb7dZsNZ2O9sb44YcfDDjwP+Q3JIgwcwRAlutJvAAt6jV2u52azaYuLi6MFaR98263s4CHHeGL+/3e2FSeq1gsmiTBs1YwqYwZvoHvkfgATB6ckdtMJpOzQM05ZrOZpCeNNnOBjRBQKXj2SS0rCCQnjBvsIffiE0V8GFDy0hFszidN/vs8H2Po/ZJz+5oJH599ss5z8m/P0Hk5BiAbH593xBgZYyT/jzEyxsgYI3/eGPmp44tICk+nk+1N4H/G3/6heSv3gcUDAwbnHwpg4W2Uif/49/l83jb8gwE6Ho9WMMvEco9+4nFaWBda0gKWOC+AOJvN9PDwoKurKwNAgJFiShwAI/VLo4lE1MqVJWdJxu4R9Anu/B9W6OLiQp1O58z4NpuNarXaWZtXAh/jgD6f702nU3Mu7smzZbTsxOna7bYxFDgW8wQYAkJop70uHdYKx8rlctbCFoaEYIexe7ZkOp1ae1L02Ywjwc8nEgRrxhiWDcaWAA2zAxMnyeaQgFAsFq3dqyQrWkX3nEgkbD+Wer2u0WikzWZjAYZEZT6fW/DxDJ5nEFkKB7xItnBs7MUztel02toZkwDgA9Qt4GfINhaLhe1NMhwOjTWXnvaTQEYCKOOnzBXJz2azMc02BemMMfaDBIhx8JsvMu8EVsAfsEWq4QMlrC8AijyA8T2dnpb8+TmJEd9PJpO2v9DpdLI59SsNrASQGBKkkfqQTEqy1QtWDnzM8nPm2Wn8kbknbsDWMyc8n5fWeBDAZhhvxgEA8dIIXp6wPX8Nzskz8dycMz4+74gxMsbIGCNjjIwx8uvAyE8dP5muxGj8kq0fcNiwj9kmSWcPxORhFIAMb+Ocg+/BJvA7H/QoluW8DILXufN9z4b4t9xEImEadc8qsuRIIIOZwVB84Oc6gB3sYq1WM60xBoAB9no9SU+tgb3khKCMc/A52BmWtwmYjCdsHsC22+3OCgVhSZFrnE5Pu85vNhsDgWw2q16vdzaPOEaxWDSGw++gzlzgPKlU1KlGko0/oM3P0E/jNMfjUbPZzDTsjPN+vzdWEskHzuGDDKBMoOV6u93OOgt5LXwQBGeMIUwN5y4UCtZKV4qcczweW1KFLXuWmSV8xgJ7Aui9Q0vnm5sGQWDsM8kM54KpSqfTarVaBur+c3RVgg3dbDZaLpcqlUrGnPrNHUkSYOXoUsRYetYcOQMSotlsZjbAfZEIAlZe/8wGlVwDG8QmYf4ovF0sFjYmjCO2iA9weN+SZPMFQ4o0BHvHFr0sYLvdmq/AXu92O9s8k3MCmh/bLayen2vsg+cNgqc23cRRL0/xjBtxwCfU/I0/eoDgD4w1Y+YTfv8iBWiQXHMPnNePb3z8eUeMkTFGxhgZY2SMkV8HRn6KlPxim6owqDgqE+wfKAyf+trzEPzs42I3HprP8PbNeQhE0rmUArYLA2cQptOpMV8UG7LEj7aZgMR3uFcKitFbAwywAb6ugUll0pFIwKAcj0fTPUvn7SUlWQDc7/dmvNwnUgH2lvAAylI47ITXv3qmAJmAl4rAeDEHBFS/x0ipVNLj46PG47GxJjgBrCXj4cERB4XdxZEIRP7zPhGguw/PTptUAjmFwbAe3W5X5XJZyWTUzYbn9xp9NvKr1Wo2XjwL7CUJA3tapNNpsynmCVaOACDJwNPLZggcXuJBwSuHl4ggE6EGAABfr9cmP6EQHt21l1lIsj1SkKewcSHs436/V6vVMtkBAIDfjsdjY1yDIDC2DfaN5MYz7DCvyWRU+H04RAXeAC5Mq7c3/JvkhaSG59nv9xb8kKVwTYqQSYy4N/yAtsu0V/YsP3YH24XEAvD29wgIkmh4iRUJGdIcbAfGktoNn1D6ecY2qKcBHHxXL8+WcQ3iH/HJM7ych6SJ7yOjARSw5Y9jtvQkXfmxlRfGJj7+siPGSNm9xBgZY2SMkTFG/hwx0seqj4+f/MIVhqEFTQaNm+aBPCgAAARTfs7PeKsmCPEQDJ5nTBhMbyj+bRjWwu/ZgVFjlBgXhYVe8kHhKw4OG+AHnkkhEMIqYfCS7No8H+wKzuALY+ng4jXv3iBhKXyXHhxzuVzq8vLS9lTAaXECKQqoBHOW95PJpOl1MVien7FPJBLq9XpmlBgwDCoJgGfIYEEI7q1WS6PRSOv12tio/X5/dh3GCnb1cIh069PpVLlczphKxpFx8nPJdz4GRHTq3Bc6Z5wQ1m+329k9E5T528tdPMMqPXXAYVy87eIj/E0xJ7bGGHMNnodkhY0HPeNNEORZCM71el3T6dSYb6/fR0LB/PhgyrN5f95utyZpgSWFueMz/lnm87kFeLpseRaZcYG9IjgRuACmQqFgdQ0AEgwbSaqfR+4lmUxae2NAkvOS6PlElGflufg89wwAADbj8dieget5OQTjSmzimswFdoMUyLN2xFLm2GvNsS1sknHjPolHxCTPsFIQ7Fcn8C/P2vnf8efHXgri4/OOGCNjjIwxMsZIjhgjf94Y+anji9RwYZAMHjf0Mcj4G/TLw/5z9MeHUcG42MzOD9Zu99S+0t8D7EyhUDAGgr8pZIQFIQAhr4BZ8/fEvVLMyQQkEgnr8ON/xj3550Nbyhs6Y4Rml6JdQBDHw1D5PhsZeuOhZSvBNZfLWatRDAY20H8OZiadTqvb7SoMQ2srOp1Oz5bsuR4MBU7tl4cxyiAIzrT8y+VSlUrFgofXNjPmBCsA1rMzUtRBii4/6/VatVpNo9HIAis6Z0lWDAvLRULjGRbmDBvyy960XoWFSSQSpm9HykEhLSCNvfDZRCKh+/t7NZtNhWFoRb/eLiQZe8XYLRYL6wbkgZx7WSwWKpVKkqIAc3l5eTZXtE7NZDK2wzsBl8CFL1WrVdVqNQMAScYeMSaAbz6ft7bJ2exT1y4va5Kk0Wik6+tr1ev1MzsiWAHgJCEUV3uAgNF6fHzUbhdtJurBAlkEMiX8gzhDYun3lIGVxo8Ads4Jc8g9wLDB0MEG+3jA+bbbqLsWSSTMI4kNBc4kwx4IGWfiHIkEz8C1GCPP0uEzHpgYG87rgc4zoNiWBzxiEjbn2UP/ufj4vCPGyBgjY4yMMTLGyK8DI7UxMccAACAASURBVD91/OQXLm4qmXwqUGPgPHPHjfg3Q/9GilFkMtHO5HTn8WDhAxcDwJsykw9DwYSynNntdlUqlWxygiAwOQL/RorgmS3unZa5TDZBA/YNGQXnY1mc9rU8lxQFLJgFwMGzEIwdIMC4+rGcz+c6Ho969uyZut2ujseoyxSb+mEI3tEymYwVqzJPBAKKZz0jkkwmbd+KXC6n3/3ud9Y6F8cDCDkfgYhg0+/3dTgc9Ktf/cpAkgJkxgaGiO9LT62NYZ6azaYFWwJeNps1vf7xeLT9SwBZAshutzP2BIfEFj3LRrEuzKhv5cy4kIwwptgTbDX6Za7NPIxGIwMs78zYP85MMoOdE/hgZzkvdoRfYYOwlfV63ca2Wq2q3++b/foWprDOi8VChULBfMrr+0lYkHYASvgcYzabzUxHzpiyySAAwnwyTjBZ+NXhcFCtVlMikVCpVFKn07Fgjmbd6/59suTZU3yXMfJzTkzZbqN9WHwi6GMSDDCSnu12a/IV5huQX61WllDx7LD5nslPp5/aEXOvJETMOT+DKSe+ekAB1GH5sFPA+uPA72MkcZTxJl74331sox8nyPHx5x8xRsYYGWNkjJExRn4dGPmp44vUcBHEWdbEwDAav8Tr3xZ506VIkiBP0PDgRLDHyaWnNqsYKM7Id/xyZBAEevfunVqtlv2OzwNIiUTC7vd0OhnrBAMEG4WD4SRoxQG8bDarZrN5FozpZOOXRGEQUqmUhsOhsQUYPACSTCbNgD3TgQHBqjE+vV7PWEE2KcSR1+u1SqWSttutBXMpYrxwfFhJNLbFYlGz2UyDwcACKvcJK0QgBbwB2u12q1qtZuwkwC5JlUrFAiP3kM1mTSayXC5tyRzWzRdDA/jcI0FmPp+rVCqZU/qd3NnDxbOL6Hd3u51JZA6Hg4bDocrlssbjsTFoFPwCNCQ8zD3a8CAIdHl5qcPhYEXLXM9LG0iMGAfsY7vd2uaGUqRjL5fL1h0sl8spnU7rw4cPZ52RJpOJJpOJEomoSLvT6ZhvSE8MDuz4YrEwX8D3CDzeDrkv5p3uSJLMZ9D0k5SRLOVyOfNvfLtarZ6x54lEwjT3o9FImUxGt7e3ZwCCz1QqFWOpAFvGkmJ0Aie1IbCQ3C+2AUgz9gCK12UDoqvVymoEdrudFdnzTAT8YrFo88kqAYDEtRlHfJiNWUmofTtlEm/uD7vDdzkHwCXJYpKXbjC3fp79ixTxmTn3gOLtID4+/4gxMsbIGCNjjIwx8uePkZ86vkiXwtPpZI7olxT9Uh9vir7gjwlkwP2bPAMPW8FDw2T4t08GPJlMajgcmhQAp8HIt9utJpOJyRc8i8HAs1xLsLm4uFCpVDIwoBCUiSsUCrZ8TwEj18ZImUi0z0z44XCwYOkdkU5NjGGhUFClUrEN+uhMw1J7JpNRJpNRv99XMplUs9lUuVy2MeLajAHjzc/Zn4RCRACCbk6FQkHdbtc6FdGq9ObmxjpJYXiMC8xou922AtRUKmV7bhyPRwsaNzc39n0AiIDFPEhREKjX67a87XXpzBXMG86CNne73dqzcy0AYLfbWbE0CU02m9VgMFC/37eEAeAmIGAvMI4wVwRXClKx8f1+r9FoZM/GPeHMsEWMMYGLg4SK+wAUpadOV8ViUel0Wnd3dzbmQRCo0WgoDEPb+ySdTqtUKpndIa/g4P4A2cViYXbL5/f7vUmT1uu1KpWKJS4UdEv6k2SNcccffGEudgmj7JMw/AP2PJVKnbFh/Iz9Tg6HqLPUcDjUbDZTr9ezZwB0jseouxf7phC/PLj7hAFfD4LAJDC5XE5XV1eq1+vWXS2ZTFotBkwc88z4+RoTnoN4QdIISMBInk4nS0BJXBgTEi0SLMaLOQDAfNzm3yTD+LBnuaWnNsteQhEff94RY2SMkTFGxhgZY+TXgZGfOr7IChcBENbOSyi4WZxU0pkj8kAsjTOYDCLOy9u6X3LmYfkZn314eFC9XjdnQ0LQbDZt8AmudGXirRZWBOPM5/N6fHzUarWyzRBxxtvbW6XTaU2nUx0O0eZ17FHBsjdFjLzBAzAsVWMIaG1Pp6jjE113ADoC0Hw+N6CAzatUKtaKlsAFC5FKRXtl0GVJkkktJFlXIzaqhKkrFouaz+cGkKPRyDTHlUpF8/lclUpFlUpF9/f3xiix1A7DwvWRlSCFAZA/fPigVqtlUhY/zwTOyWSifD5vAZGkgyTlxYsXGo1Gmkwm1iKVYAbrBvvHsjaMEGwFQQ5GJgwjrT6BDSYIO4Z1orZBepKKHA4HK2olmLBZoNc5Ax74C8mWpDN9MwmU1z6Px+MzxhhGJgwj3flgMNBqtVIqldJ8Ple73dZoNDKbQCIBMKOxhjXd7/eaTCa2x8dyudR4PFY6He2vQ+F6t9s1W4AxJLkLw6f9OXzA9yw0NRLMJ5s4ksggGUEihN+zjwnsJjGGZAE/YL5SqZTu7u5MTgDTBgBJOmu1C3NGvGJul8ulyXWoTVmv17q5uTFAHQ6HxkSSIHmmDf/3MUySyZKIIT4+Ajp+ZQLGj+5ax+PRElliwW4XFbez2SqyHOzGH35uvJ2zQhKvbv3lR4yRMUbGGBljZIyRXzdGfpEXLiZOeurA4o2AwMnNsEzHz1jS5CF8AbCks7d1v/QL48ZSIqzLfh9tAOidHKCivS1Mo3fWZDJp+msCHm/0BFve4oMgMKctFAqazWa2zM9E4TwEtEqlYsGsUChY1yMcYb/fn20qR3CmaxLARWArFotn7B1BGwaOQuh0Oq3VamXFwDAASEIAFwzw8vJSqVTUonc6nSoMQ11fX6tYLOr169fK5/NqtVoWDNBGUw9AS1oAYjqdWsACvDDOq6srJZNJG8cgCCzwM0cwgdgFxanYHswtNoWTsnM91yIRIEgw7pwfpnQ0GhlLSgI0n8/VbDa1Xq9tfgABOjKFYWjBWpIFExIItNIAlyTT5SNRkJ52Qoc1AtCRnqxWKyuG5pqw3tgzbG6r1bI5YAw9g0zx8X6/V7FY1OFwMMnCfr83qQ9MOMA4mUz0/PlzK0D3hd/ISkjYAGHsjsDsOyeRdDBmJBIUkp9OJ/NpAinnI1kiJgBMxAcpYlZrtZrJYQA62DeYW2ITzOBsNjPdPQwxjCctfe/v7/XhwwdVKhUDegDYs8LMFXaDPIr4x1iVSiX1+31JEcDN53OrCQG8JJlNIIsCIPEHCrEBfnwEJtn7j2eR+ZmP18R37jM+Pu+IMTLGyBgjY4yMMfLnj5H4w48dX6RLoSRb4ibYwsDAKjCIOAYsxmazsWDHm6xn7wAZut/w4DBUXJM3ZJg4mK/RaGQdiYrForbbrbFofmnWG/DFxYVp0tnB+3Q66eXLl7YUud1u1el0NJlMdH19rYuLC+tQI8lai3qtby6XU7lc1v39ver1uoIg0jCPx2N7u6d7DmMDoyFFoAjYoFemRSkGCKDClHFez2IQDHHw0Wikfr+vFy9eGIOAfALwu7y8NIDCgRuNhna73VkbVdqyMn+VSsXGj4JK5h4G5+rqSkEQaDKZqFQqqVAoqNfr2fjgaBQOs0/IaDRSu922APjNN9+o1+vp8vLSWFpYz1KpZI7IUjKF49zrdrvV1dWVBfD9fm8FwOPx2Ngsvo8NYe++OJquUj7In04nA3j2EMEn0OJj+9JToMdH8I31eq1qtapyuazD4WBafOwFJgcfLBQKFuCn06nJOpCBoLWGucpkMhZcCK6wqrCSjBEAhQSJmgZA1ssECKyegQVAmUPkDqfTSe/fv9fhcFC73TZpAIzVaDTS5eWlfvGLX2g6nWo+n6vT6VjiiT8DCLvdzuQcnvEcDocGeswRMWsymZgchzoWEh/sHtnEZDLRZrOxZLVQKNgmmvg/SRLJm0+yAVX+JgZQiI1EhkQAUKawnphKPJBkY0YsAGCIm8QCAIax8zGbFRHpiXGMj887YoyMMTLGyBgjY4z8OjCSldofO75Il0KMnODJhLAEDCvE2yWslF+qxnh5aIoEfXBmGRoDxCm9NANWAhbLvylTZLlarSwoJZNJK9jM5/PmAMlkVPB7fX1tRaDrdbQ3Bl2MaCnqi+8wUpxpsVioUqnY2/x+v7dC1vl8bnpl6al4sNVqqdfrWeDG4RlHzrHb7VSr1czQmYNEImGMiiR7PmQeOBnj32q1NJ1O1e12Va1WTZIxGo0MzI7HqMORZxPYtR0WAcaGgJFOpzWZTKwzEu1kmWOWyXHCRCLawI/rYhepVMra2MIEZjIZlctlc9xXr14ZiLCsjqaeYIojbTYbffjwwZhBXxyL89/c3Gg2m+nVq1cql8v2LIDMdDq1cb64uNBsNjtLREiOYGolmRQgm83avjUwz6dT1K3Ia46x7Xw+r2azqcPhoLu7O9MmX15emg8iU2Cjy1QqpUqlYqANg8k4UOMgSZ1O54xZXCwWZ9IP7BZ75NmOx6MeHx/14sULpdNRcXI+nzetN/NNcEdmUK/XLaACUNwbMg2KxWGxkD4g+clkMmq325rP55pOp3bufD5vvsVzl0ol259mMpmcsf8wz1zf10Ngy4VCQcvl8sy3AbRms2mShdlspiAIjOkDvBgrYtPHLy1+VQIA2e/3VogMKwiowWqHYWjSEuIrjOLpdLIkFXDGv2DjPLvppSk8K8/r45ov4I+PP++IMTLGyBgjY4yMMfLrwEjG7seOxHffffc52PEnxz//8z9/R9Eeg8xE+eVB2AKMO51On71dUuzI2zUTjSHx8JLMMT7WwfuAfnV1ZTIJHIXB99/hPhKJhCqVijnhZrPR3d2dgQdBjMEfDAa6vb21JGSxWFiBK07hHYag9urVK4VhqEqlYufEqWezmdrttrLZrN6/f29Lx7zNo4lGg804sezLGLF0jGF7yQpjAjilUikVi0VdXV2pVqsZ0D08PEiSMQcvXrzQ69evLSBfXV2dGWIYhtaRh/tgTmHJPl4yR1oCgwBbxf3R5SgMQ9srAyeaTCaSIglCPp9XoVCwhEF6Sj6SyeTZHi5IOgAxv3SczUb7u8xmM41GI83ncw0GA1WrVQM1ltthSSn+RAdO8CDg0q6VMUKDDnOHJIJ797UTjAeF2HSZajab2mw2ms1mxrRR7H48HtVoNKwd6m63M+kP7Blz+vLlS00mE5PjsHGm9LSnBIEPBhcw9EF3vV6r0WgolUpZ+2WkLel0WtVq1YI4z899odH3rPvhcLAW1cfjUdVq1WIBQf/58+dKJBL613/9Vwu+3Ct1C9gOBewUHBOAYRyJI8QIEg5fWwFTCbtF3JjNZqpUKga+u93O2EISAf7PuPA77lV66mJFfCOWpVJRVzOSKVYPmENkVsQHruH31vErIMwpbCCxF3/wsdFL1LBfSZrP5w/ffffd//0LIeOrO2KMjDEyxsgYI2OM/DowcrFY6B//8R//z49hwReRFHpGjQlEisCNMBG8gbLsyRJutVq1pVcmVJIFFK/hPp2e9nrwg88So1/ChnWByeL7Pvh69gQ9qiTrPFQsFs2JmMT1eq1er6dqtWpjwLIxb9OtVku73c426isWi1YoixQAQx2Px2YQu91O1WrVOtDw/FLESqZSKdVqNVueRn6ADjaRSFiQaTQatsx8PB6t4xNGAivXaDTUbre13+/1m9/8RtVq1aQTjN3t7a3JAMrl8hmIEQiRQFxcXJh9cF8AQa1WsyA6HA5N/kDR8mw2s3mBufIdc4IgUKvVsj1BeD7YTeyDc7CjPDaaSqVMrnI6RfvJ0C0J6QyMKZsy8hyr1coKLAEAEhbYuIuLC0taZrOZyRoADQAUOc5kMjGdMXIiWG4AbLlc/onEYrVaaTKZmJ58u432E0H///DwoFQqpffv35t9w8xKsqLiMAzNzrBdnoNAQ9JDspJIJM6KwgnSpVLprPYhnU4bgyfJWCXGGfAaDAYGCL7+AXZ8Op2qXC7b2CyXS719+9Y6k1EfwbjV63UNBgMb97dv31pSWSwWNRgMrE1uGIbGYNZqNQMi4hX+zbggZyEpHg6Hur6+1v39vZ0TP2Ds8G1qXGCuOfdyubTNLAEs5pjidiRZyCUymYwVEHO/zFsymdR0OjWgxE+IkyRNJC4AKf/GX30CzfPEx+cdMUbGGBljZIyRMUZ+HRj5qRWuL9I0gwtzURgrBpSJ4IF4y4SB2mw2xuAkk0lzcD7LQ/LGT1BgkFiOxBGQPbBfBsvOLLnyXYptJalYLGoymdhyIUV2yAFwUIzrb/7mbyyA5vN5LRYLW2b0hojcAaZSiooTR6ORnj17pvV6bYG5UChoPB5bNyaW5gGfdDptDuU1pP1+/2xJ1BsBrUoJUrvdTvV6/ewNH4aCotFKpWKgD7vlmYNqtWrjAMBy3ePxaSM9pAy9Xk/1et3kSDwX48TeGdw/bNV2u9VgMDDpBcxvuVxWv9+3BCWXy1mBMNKPxWJhLC9ButPpmBxlsVhoOByaXCeZTKrX65k0o1Qq6XSKilCHw6HNM4Xc4/FYjUbDJBYEAvadoSiajj0Utt7f31sgIJjCONJ5iwAjyRgTAjSJA9IFkplMJqNut2vJGclMv983sGk2m5pMJnr37p1evnyp2Wym+XxuTBsgiDQIe8XfGH+W0jebjbrdrhW2Z7NZvXz5Uh8+fNDV1ZU6nY7tdxMEUccpJDcENc/sMxYkQ7CNl5eXms/nVhgsSW/evDEGEoCn9oQ4ks/n9e7dO7Nv2NLJZKKLiwuTk6xWK7VaLZP5YKfcF3bqW+PiE8SaTqejWq1mSSE2SdKLD5Kc4aMkXsvl0uYN9hB5F0kJqwQ+hjE/xAJaZSeTT8XLJI7o2UkMKFgGJJBaMe+cH0aQpCo+Pv+IMTLGyBgjY4yMMfLrxsifLCn89a9//R2tYAly/M0bIRPCm3IikTCjZQk/DEPrrgNIECAJypIsoNNFJQzDP2GH0PLytsxyLEvlsCGpVMqWlTFkggHaZ/YYoX0ogDOfz/XixQt7U8aZCEI4JG0xmQQMBWBjgztJ6vV6plvmbR75CMuY6IsxaJaXYacAbMZ+tVppOByaQ8McwUjCIMDMUDhL15xMJqPr62trkQqQcA2ewS83A8LT6VRBEJgG/HA4WH0ADCvjIcmAk+Rgv98bm4ctSE/ymcFgoGQyqVqtZoCdz+eNeYTRq1QqlgxkMhnreoQ8YDqdqtFoGHNK0S/sJQW00hNjw/3T5QnwRVbw+Pho381kon1cKGgPw1ClUumsY8/xeFS5XDaQq1arxmwhF0H/T2BOpVK6vLy0+gjsisJa2Em/1A973O/3Va1WLThKsoJ15tOzTwR47HixWJg/IB2CaQfs2Lwzn8+bvwKAAAMSlpubG9VqNfV6PRWLRSu2pWAeP3///r2xWhSkw94SVEke+R0SBd/FiGswbqVSSdfX18b8kih6uQ3JAZKscrlsoEhCgJSH5O10OhnTTuxjHLxMiJUJZCh+7LfbrUajkUaj0dk+P8w7gMz9jsfjP/FPYiN6fOI1sZfPE0uIt8RfAOWPfhNLCj/jiDEyxsgYI2OMjDHy68DI6XT630oKv9jGxyyZc1GcD6NmIGFKWHakeM5/lomTZMt5sICwaZJMW0nXF78MySDzOS+F4a18u91quVya8xF89/u9aXoPh4NNdiIR7aFAQPzNb34jSXr27JkuLy+1XC71+PhoQQrmkE3lYI6y2awFrFQqpfF4bEEKffh+vzfj8BpT9MS8edNutVgs2vhVq1WTKxQKBduEjrdyxhOG0hfdZjIZ042fTidVq1U9e/ZMp9NJ9Xr9rE4gmUwamwBDQfcnH3xgX2FWYGsLhYIt7+J4nJe9LegGxfz6pV/kJJvNRoPBwGQpSDjy+bwqlYqurq7MLilY7Xa7FjCpT0DyAuh6h/I2TFBmztrttjGbMGGwIIC3FIHldDrVaDSyWghJZyDSbDZt7khg6DLl75f7A8ByuZwajYYlRNgLjFGpVDJpx8uXL60dcavVMvshSYLJgwUCjEiQkLXApF5fX6vRaEiKis+z2az6/b4FL+a2WCyqXq+bj2KD2WxWw+FQ2+1W5XLZJCHsW4JcAHAlMaK4tVAo2POEYVT7weah7XbbJBWeRUV2QZIGqABabJqKPGS9XqvZbKpUKlkSip8WCgVjiy8vL21vGmIZbC3g5kEF34PVA3RhiqltKBaLajQalqjhgyRIMP50XSNuMfbEwM1mY3GNPzB3jCkrJsQB4iVJbnx83hFjZIyRMUbGGBlj5NeBkcT1Hzu+iKQQw4BBY4L88jg3y9Ijxsn3JJkRSbIle79cyYHMIplMajabmVPzVgwbwZ4aBDMCy2QysQmVZMEJ7TfLpkx8Mpk8A0pJqtfrWiwW+v7775VMJtVoNFQul/Xw8GCTNZvNDKSy2awKhYKBIgGKiWRDQgIPBiDJOrHAhBFAOZcH70ajoc1mo/v7+zPJBs/EmLIRJcCL7CCXy9ny/OFwMIYnl8upVqtpMBgYU8HbPwwly7EUBgdBYM7H+OZyOXW7XWWzWZufy8tLdTodY+4+TlAANuaRPR4ImABlMpk0zboUBelisWh1C2EY7Y+xWCysaw/ShH6/bw7MHMAsMW4EVoI5y9Gwn17/TwAmadlsNhaAgyDQu3fvjJEMgkC1Ws0SJ7Ti6N7DMNR8Pjfw95r3h4cHtVotq/fguhQIS08F2qlUyoo+W62W+SjJHZrm4/Fo2mb8bDKZWBAdjUaWBBKU+/2+Fb4yFjwPgIof7Pd7Y/BJKglWMPyw7YlEQre3t8aasU8Pc4rPwnjBAGPjgHsmkzFpD2w+oCnJkkfsm32DmEvkRNgggXYwGKjdbiuTyahWq6nT6RjjzL0hlfBMHT7E2AMIJGYAgU+0pSf2GPtAElYqlVQqlWxzV4AJBj0IAtuoFVmJPzeJBD7FWMLqwYJyH/HxeUeMkTFGxhgZY2SMkT9/jPyUpPAnr3BhrNJTK0eW7mBBeFMl+BDEeQh282ai/Jslb7ecFyPEwQlyLMmzMSDnQi+OE8JCwdThDDAgOCLBGUkCOlY0s6VSSZeXl6ZDhj2pVqs2EbVazZ4xCCK99uPjo00yLWOvrq5suZ+xIIBSOEowZDzpzuSXQQnCyCOQZOC8LLPzOQybNqHr9dq0xrCVODpyCgAKrSpBhaC42+1MpsBzAMyJRMIKGtk8czqdWuHm4XA4m39JtqQ+Ho+NOQvD0OYEGQTzLsnOK+lM/51IJIyhhxnxmlyCO0EA2Uw+n7exoOMTQet4PGo2m2k8HluR8Xw+tyQnk8lYO1rsj6C+3W41HA41GAwk6azLEBIbxgemx88ZLK1nXDabjV0f2ygUClasi+3QxQu5EiwZOni6GlEbQWDHX4fDoY7HqNsTzHGtVlO327XAOpvNtFqtjBHcbrcmAcA+qWVYLBYmGzkej8aOwvACHNg0/i3JlvcZI/yCLknISEqlku2dw2oNANzr9c6A9XA4nHVA8ywkLZBJlmEWSR5JriQZiBYKBbsHbJGAj2/ia/6axA4YOcbCs4OS1O/3NZlMjAX0sg8fK47H458kxpwbf/OSE/wAZjQ+/rIjxsgYI2OMjDEyxsivGyO/CIJS9EYBJgcT4JflmHx0l3yO72G8sCywK37ZFkfzLGAqlbKAwoCMx2MVi0ULdtPpVH/4wx90cXFhA84gMkm+CJJB5M2W3bTX67V1w6nVaqpUKlosFvbGzvl5w2evgUQioU6nYzr8w+GgVqulZrOpZDKpcrlsoIhOH6DDOHlz53e85QfBU8Efy/aAPEEDYORNnPmARZ1Op3r79q16vZ6kiO30S/6wWiy7wh5+nCzAChJoGc/VanXW4ef+/l7L5VIPDw/WrhWmloCGQ3BO5seD4/F4tOBM8Gf8fUet/X5vS96ACOzpbhe1KvW2iXQBG2IJGUddr6PdzJvNphUKExwZM8YE5gZ7ISACuh8XaKK3TqWiDkww0+xf8v3332u32+ny8tLuj6JXLwFBioF8hda0nnllw07+sGEkrBB2TLAulUrWgQqGCzBqtVo6Ho9WHF8oFKy7GnECP4ZZ5WelUkn1ev1MooRtwghTt0HiwvP6836czMJI8z2kIdPpVB8+fLC4tVwuLTGFbUUShH3BBDI2lUpFw+HQfANAYO5JumAMmUekQ+l0WoVCwXwD2wMAYPA4RxAEJuk6HA6W4MBmesYXKYUHEcabueZeATA+SzJNAo+txi9df9kRY2SMkTFGxhgZY+TPHyP/V1e4GCyWv/0bNAHreDwaGDDxfnnT68r9pmM8BG/UBJuPHwhgYfmXgWYAGPR0OmpPiYaWCWVJ2z8HTBWBjPsloByPR/V6PQveLJuz9AmQssfE6RQVZtbrdWNWaPWLI/AmzjMzPoDaxcWFaekxAl9AjbOgrYVpwdm90eFsAM5yuVS/37ciZron+etjyABSGIam+WZuT6eTaXWliEmj049n39iThaDgwQLnQWuOY/Ecw+HQ7p1kgPkFXFjiHgwGZ5IGGDLmR5KxKIPBQP1+X4lE1JmLscZWjsejRqORJpOJPSOgRevR8XisarWqFy9eGMgxByyrwxIlk0lVq1WTkiDLkaKkCjtMJpNWE8ExnU716tUrGzvsl1qE6XRqXXtIcB4fHyU97WuxWCw0mUwMaJCTeIaYsSUIAXCeafUJIIXjBEzmEm04TCaMczqdNp/BvrAbpAZorz14AMKw8/hmsVg8C6beJxKJhIHUxcWFLi8vlclkrA0wcYtE0kuIAEiKw73kgCSTJNAnuKVSyWychJtx5Nlh0WB+N5uNptOpFfsTz7ALJEQkTZLMltiHxtsP44YPEV+YAwCbuWbOfCwlZpCAxseff8QYGWNkjJExRsYY+XVg5KeOT9ZwBUHwD5J+OJ1OnU+e5I9vyMlkUv1+34I9b4AwahiCf6vlLR8ddyKRMGeD0WNZnAfjPFyTCWICKexjCZPzMyBIMCgohcUhcAIKBDgGnefkzZzAPJ/PVa1WZdzJ1AAAIABJREFUzaBYpgZAAAYmFacNw1DValUPDw/GuFGgy/NIsuXe0ylqfYmsA/Zyu91aVxuCMLpUNL/ofSuViiaTiRUGsrxLkSdBAGfykg1YFMCsXq+bgXFvdJvBmGG6GDtAZTQa6Xg82n4XjBFBulKpSJLpidGqkzQcDgdjOyUZeCCH4N5JDjabjWq1mgVZggdjW61WFYaher2eMSI+cFH8SWDEidPpqH3pZDKx5f1er2eF0UEQWFchWFQfuCgGDcPQGOdms2lBCR/IZrNqNBoGsM+ePTO2EeAPgkCj0UiJREL1et2Alf06DoeDyWUYx2KxqLu7O2PF8UvshiDjGRxJZi9BEEkdcrmctf1NJqMi0nw+b9IBpA7z+Vzr9doAIgxD27AQNpTvk3DREhb9NAXP2AM1JV5iBbNcLpfPWiBT94I/wvpTK8GcELMYD+aOmBaGoSWmsNx0bOP32B/sLX9gJ2HT0+m0JUbYAUEd5paADpvm2TXsEomDX8FIJqOubZ4NBWhJ8Hlu2DuembhJHCIOxMfTEWNkjJExRsYYyXjGGBlj5KdIyU+ucJ1Op//vfwISAokkC8IEOm4MNoCgxE1LOnMkjCWbzVqrWgycN0quB/PC+fwbN2/FyC1gpDgoCGaAWCL3AwpD5XW5yARg8NAcs6laoVBQoVAw7asv7uX+MJhEItJqj8dj/eEPfzjrAgRYYRQYs39eWCMYA/Y5mc/nFqg9owUbwOeZE5yJwOULWblfZCA4KMHrcDhoNBpZgS1MgyTTZ/M9kgGCBQBHp6V0Om2B1Y8Z5yBRQK8tRQW/JAeAPkwvGyXC0sE6zudzu3eYDJyOBOR0OlkRLQGF5MEzNATJ+Xxuth2GoTF26XS0Mz3dtjqdjk6nqF0zCQHA5Vmu0ynao2Q8Htt4+eCNk9Me2fsUzFShUFC9XlepVFI2m9WbN2/U7/cN2Pk8NRL7fdS1qlwuWxAkEOInngX3ARibqFQqarVaqlarFtCJDYyjJJMTEQRh7bh3bAP2k+8tFouzPXYAcgI3AMN8eMDnGTk/97zb7UyKsVqtLGkCqHwSQfJ6OBwsQUkkErYfEn7zMTjAjiFP+Fh2gOQJ1p0ECP/0Bd3YDGNLnCXWsIcIdsi5ATQPhkigOJ8fS+mJ4eY6zEN8PB0xRsYYGWNkjJH4SYyRMUZ+6vgiCMqFfFtSJpw3av9G7m+cIMlSOF1qGDQCqWdXMehE4kn7TuAl8LBsT0BE95vNZk3zDLvEEqQkA0Imx3fJCcPQ9rVgfw5JBloEJfTPDD5L0kgB0LUej0f98MMP5njr9dqCPy1A+Sz7kOBgjBHM4ng8tmJb9q6ALeB8dNVJpVI23sdjVHzJPiowPLACdL5hPsbjsX2f8eW+0FKfTieTSex2u7NA7oNhOp02EOca9XpdtVrN2tcyxwR6GCK/e/vpdLLAut/vbck9mUxae1uujwYau0KGAttxeXlpc8n1YVPX67XG47HJH2CivX4eIBiNRprP5za+kuwz7Osxn8/t2UgOYFpgVGCy0UbD8mJL4/FY3W7XwBxmer/fazgcajKZ6PHx0YLPYDAwGQQSDfZgQW4AswiwM5bcO2wxQQx2jfHEt/m+Z72xXfTe2+3WvkuCA1hPp9OzvYQYa+7fjymxAekL7CvfRSozHo9NigG7StJ4OBys6xZjSMJB8KddbiKRsFqI5XJ5VkALKw6bTxtaYiVj7wEgDKOaDTqWkRRJT0CMnIq4ig8TB7xEDPv0KxF815/Xy234vpdI8B0fe+Pj848YI2OMjDEyxsgYI3/+GPmp44u8cCEJ8B1JGDQvbwBUYFkIPgxMPp8/Y8J4u8aheDuGrZNkxXyewcBgMQImA01oGEbFmZlMxt7mmSiviyfgEBxhnebzuV0TdgTDG41GtkTp2ScfmAEAlvhhHabTqTkTrA4FrJ4Z7Pf7xnQyNr1ez1gwCjC5d/Zb8PNBFx6CFsbI73ByNMeMKfONLAaWjWvAZjCXLJvDfsJc7XY7AwxYIs+sEGABHhgn9MKwXMhAjsejaZOZT+wQXfRoNDI2xt9LrVazcxF4Li4ujJmbTqdnen4fEDgfwRPmjNapQRDo7u7OgjBsKDpkmDCSr0KhYHZRLpctOCCBOf1RgoN8BlbT2z5FulKUyKzXa11cXJi8BBDwDLZP7giGFJ6SKGA/2+3W2HbGGpCXnlo0cz6uD1Di49wvyRnsZLFYtMSIjRMJ5uw3QlICCPhngmFlZSAInoqHOVen0zm7P0CAOWOcSFzxURjnwyGqoanVambnACRxjgSEuSAgE8Q9synJ7CubzZpf8nc2m7UCamRN2CT+wr2yISZsI3HCJ/mMnx83fx7iNQff+ZRcIj7++yPGyBgjY4yMMTLGyJ8/Rn7q+GIrXCwvMoGSzlgaDNAvNzP4vDEy6bzx8pbPmy5vlTCBfmmQJWX/e9gp/s8SM/cjPTEL3ANvvp7dIVjBqtzd3anT6RijxdIp3+33+8pmsyqVSgZqgKIku4disaharaZ8Pm+72k8mE9XrdTsnu6XjcIwxzs9mdiy3wlax3A7bWK1WbSNBAIfnhWkggOKgAG6/39d0OjVWjNatAKQ3asYUOQkOwjhUq1UDMozUM7i0y4WVYMmWv5lDxkaKdmmnKNNLajgPz+brFWD2PPNDAPStbzudSC0EU4NTkxAEQWBzj4OTtHQ6HSUSCesahJMTiPADxsknWtg1Nug3JhwOhwZ+tL6FTUbTvNvtrL0rMgRY5e12ay1wkfsgIULHTIcezrXb7Qx4V6uVBXv8HLYpm80aI4SEgHvgngErGDtYO5hb/ANQPR6PNr/ITEg0ADds0Etg8FtsEdlEKhXVZAB62+3WCvthrSWZjwVBtFcOmnqC7WAwsHnn57PZzJjrzWZjQLPf7y2BIAnwIJ7LRZuXApaSzlYwCPTYFr/zqyH8gV30rBvJHtf0unO/OvFxcgnocV7uLT4+74gxMsbIGCNjjIwx8uePkZ86vgh6ws4R6Ah+BClYJPSk/oZ94OHt2rN6PLAfeAzIv3XyGQzBs7EYL4aWzWZtoHzBLgesBawcy5k4NrtmZzJRm050qplMRpeXl+ZsXJfvYhR8hx3DN5uN2u22bSQHe0k72Pl8bp1VWI4noNbrdevkBGvYaDSM9SkUCrq8vNT19bXq9brpjnFU5o57Y8xgKLzRAqYELQK2D1R+WZfNCcMwNOZjOp0qmUwaY+U7C3lNM110YC15nkQiYQCayWTOWMbtdmsMnU8gJNn3JJm+Hvv0XXEI/qlUyroClcvls3tgTBg7SWf20Wg0rI2sJP3VX/2VSqWS2u22druddeDy7DDBzcsIJJkm+erqykD1dDppMplYsOTzMJ504iFhAcByuZwVdHO/fA/G+HQ6mQbcL5cz5wADjNx2uzXZDOOCj3rdNXbgZVPef0+nk9kBdomm/P7+3u4RUIX1Q9rg2Us6F3kGDO09gEmSAZggESKoEl+wZf69Xq81GAzsXgFdgjrX84Ef+RVBm3n3f5PMrlYrzWYzY98+BkiAxL/4EL+QucC4cl4+433IM4bYMfMShqFd72N7jF+4/rIjxsgYI2OMjDEyxsifP0Z+6vjJ6OnZF/82z0377iNMOEEjCJ42S5NkQMBnJdlbPIPDH9gwDJuOQ5yHyZWeikvR+GLEDD4MFIwKz1IoFDQej+37OAyBSJJt3BgEUacddm6n4wtSAIIZOvD9fm/Fp+y7gXRgPB7rcIg6DG02G3348MHe/kejkXa7p70reGZYUfaHyOfzevbsmarVqjEyfJ4gDGvR7/dtI8hkMtpTAV05QIY8gSCD0RaLRTWbTW02UZtOlodhI3FU5tnrxykMTafTVkQL04mGG+2wZx7YowOg73a72mw25nTFYtFACAkGyUs2mzVAazQaqlQqxubSnQldNQEIttTrmZkvmDWfRG23W9VqNTWbTS2XSyu69VIQLxOSIhkC7DNFocwxchPmDxvj+kEQWEA/HiN5Sq1Ws/09ADFJKpVKBrLD4VCSDNxvbm6s+JTPYafosvEB9NgEw8lkYoHZF53DkuZyOdXrdQM5QAUwww/ZzwOfgPHCdxqNhvm1lzXxHEhxkB14ZhH213eXYvy9rCGReKp7AbzH47HJdACeer1uNTkwiujLaUfr4x6g4rXlJADYPbbIygV+5+UexBPOh397/ToJugdwWGOeidjoYyvf98wef/uEOz7+/CPGyBgjY4yMMTLGyBgjP9kW/s89PFhgXLvdU9ccBsnLHDabjS3h8WbIsvpmszF2iiJOJpk3UgYF5+ah/VstzJx/Q6VwVHraXJLvMMG86QJybNjH23QqFbXSDIJoLwf0uW/fvlWz2VSlUrHWnplMxtrAFgoFTadTM5JisWhvyIlEwjahTKVSBrYUXlJci8Oxe/x0OtX19bV6vZ5ms5kymYzJSkqlkkkCxuOxsafIHDBamE3G0G++J0Xs52g0suV3lqABSt9xCEPknO1220B7sVioWq0ac3pxcaHxeGxzxDI912DeYONg2ViSRy7Bhnjz+dwKSLl3xpH6CQCU1rKMK8xvLpdTuVy2wlDPRH1cKJlMJlWr1ZROp23jyEQiYSBYqVQ0m83U6XTOADyXy50B5eFwMOkEc4Jteh9hXLCrYrFoncpghrzch8BH8gEDBzBh3xQ4M2YEno/rQQAvpBmr1cruDY03Y891SZi4N+IAkpDRaGTMP2wcLBWsWCKRsLklyfD6bM+gHo9RzQjJDM89Go20Wq1sfxq6JnEt2GySNj+mxAwAtl6vGzCFYWigTCLqmTkkI8Qbv4KBzeM3nhXmb/97bAOb4Zx8F/+RZHZA8gdg4+P4nGf5SLjCMLSx93Izfh8fn3/EGBljZIyRMUbGGPnzx0hexH7s+CIvXBgSLAdvwbw9w7JgQJvNxibaGy8DTWtW2j7O53PV63ULKBgOg5FOp8+67vA7lhYJBpLMAP2E8sYMu5dIRN1VYLV4FsAEiUKhUDDDa7VaSiSiDQeRToRhpMeezWbGrtBlCXau0+nYsvDNzY0VoDI2xWJRV1dXZtTPnj2zDel6vZ7q9boWi8WZThcgZzkYTbEUvZlzzVarZe1xKWadTCbGQuAYsGbX19cG9BglQME+C35eCF78DEOFhSAwAtw4BE5J8MYJADDm5/7+3rrxEFxKpZLNYy6XM8BFugLDAxCR+DC36XTaGNJKpaL1eq1er2cgRpCTZEXr2CrM5HA4tN3qAZPb21urMdhut6ZTBvzRno/HYwM35Ckwh7Bqu93OQJNuVVLEOKdS0R4cnt0BJJkPGNVkMqmbmxttt9szucZ6vbZr+/lfr9fGZgJGMG347m63s81OG42G1RvwTMfj0drqMm+TyUT7/d725zidos5T1Hk0m00lk0l1u13bX6RQKBhIwiquVitjnkkyYR0bjYZpx/FD5htGDYYVOwNMAdbVaqVOp6N2u63Hx0e7z3K5bOfG94mLk8lEjUbDmGoSNaQqHmgA1XQ6bewuPsj1PbNHLQRsIHOAvySTSYuv/JF0xggCNCST2DFsoV/58AlmfHzeEWNkjJExRsYYGWPkzx8j/9dfuBgA3/mIN+nZbGaOS7DjDZ5J9B1rTqeTOZzXdRL4AQfeYmERWK6WZPuJXF1dqVKpWDBgfwWkC6vVyoI7y7NMVqFQ0P39vb2BUwzqHZFAMh6PbUnXs0uA5Gg0ssA4Go1sOZuC3fV6bc5B61LYHN7oh8OhCoWCarWaFYT61q2eJUB2kEql1O/3bXm8Xq9rvV7r/fv36na7ajabZ3s5oDlmiRWARbMNcwXTAkCzZAwrhdHiqEgWGLv5fG6B/ng8GtChsefe+bcHfZzQM5FolmHekB5gRxQzU5Drn+/m5kavX782pnQ0Ghn4+aJR7KZSqRirfDhERaHz+Vw3Nzcaj8cKw9BYSoC81+uZJr5er6vf71u73t1uZ3UQPC/AwX4o2L8UgR/68eFwaEADkOfzeQ2HQ81ms7OldVg1mG9/bRIEOv+Uy2Utl0vT6fvlfh+YPIuEtAR/Apzn87kajYbZ6Hw+13w+V7PZtMSARDObzVqXMkADaUmz2VStVlOtVrMYwjx5uRRzCwsMU+ylA8Ss/X6vWq0WBcI/SlmQcAA2AAGyjcvLS3348MHGaTQaGYjgO8fj0RjR0+mk+/t7BUFgdSUEfLqUscrgVzVIPj1z5nXqnnHjgGUlznJ9Yqb/PrEJhg/G0fs1QAPr7eVZ8fF5R4yRMUbGGBljZIyRP3+M/NTxk1+4CGbpdNpaYEoymQQ3y43yHc/wwdjwto2BUoQHc4Bx8XaJzIG/uaZfPiQYJpNPG8V5xm4+n6vdblvXHe4JJyU4AQgsz1arVe12UdtW3siLxaL6/b6azaa2260Z4Wq1UqVSsU0IMd5ut6vT6aR2u60wDPXw8KBEIqGXL19ae1PGigApRW1MCZbIBnh2SSaxQHNMsexsNlMiEXW6IaidTidVq1XVajULEOi3JRmwstw/mUxUKBQ0HA4NxNlDpFAoWFIGOBIQYWOurq5ULpdNgoH2nWV82DoYPAqjkSvA5gBIzDE2gLSFfVZge/f7vR4fH22viXw+r36/r0QioevrawNgtO+A4vEYddjC0Tm2260xbg8PD6rValYw+uLFC9MwJxIJ/e3f/q3ZMAlrOp22lsOweUgzptOppKjrUyaTMXaXoLJer/X4+GiMCsw29ktSx1j6DmPdbtfGhoBMB6sgCMwO8Bnud7PZqNVqaT6fazAYWCclEj403clk0gpv5/O5bm9vVSqVNJlMzP/G47G14uU74/H4jKUkISPh2G63KpfLNo4A9d3dne7u7pTP5/X8+XNLxgALAHCxWOj29lbD4dBiA2zhbrczX8zlcup2u5rNZvrmm2+sTuXq6kqn08n2C1qtVhoMBmo2m+bPsHm5XM5Ak/jE2GcyGauJIEjDPmIP+Knf6BL7wJb99SheJxFmXg6Hpw1ZPVO8XC7PWGjmm2TUgy4+QM1G/ML1+UeMkTFGxhgZY2SMkTFGfhFBfjabNYYMcMlkMiYlYEmP5TeYJgaZQMxApdNpY5aQV7BsKT0V+HJuv3O9X+rE0HFyivYIvLAws9nMmCYCZL/fN0OHTVutVnrz5o0V++FEQRDYzzebjQaDgWq1mulZ0+m07VOBnpl2uNwXLUdZevf68/1+r8vLS2WzWdVqNV1dXeni4kLVavVsDIrFopLJpGlzmXyCBDKCdrut29tbPXv2TFdXV2dv7DBcs9lM5XJZ7XbbwBDHI6jBesICwaK8f/9ew+HQjJ7g7OUcBA3AP5vNqt1un23USDISBIEBShAEtoGl9AQ2MFj8HFvjPml7m0qlrAPX+/fv9W//9m9KJpO6vLw0KQWJCkkOtrRer/Xw8KDNZqNqtapcLmebbj4+Phqg+CDo6zBgqynsJjACeNtttFkiHYtGo5EWi4WNB0AzGo2sGB4bAHTy+byur6/VbDaVz+cN8BKJqOMQQIx+HIkBciBqDfDrRqOhdDraz6TT6dj9oBOn0He/32s2mxlrSBJGm2ECUzqdtg1NO52OfQeJxukUbTzZ6/Us+P7yl7+0c2WzWdvnYzAY6D//8z/V7XatxXSpVFKz2dTz58+VTqc1HA6NPUaWkU5H7Yi73a6CIDC2czqdqt/vKwxD3dzcSJL+3//7f3r16pV1XWIuvv32W0kRU/7s2bOzZBpG+vb2VldXVzaWJMqDwcDmdruNuoYxH/gs7CO2TFJJfGWFhI5qQRAYGBaLRdtvh3hHNzFsBRaP8wNE3vZhB9Hv/0/sXXz890eMkTFGxhgZY2SMkV83Rn4RSSHFYxhtKpUyiYRfikun02dacl8AjIFQCOmXev0D+8Fl4P3/pagwj/MSqIIgsELaSqWi5XJpk7Hf763Fqy9ChBmD/WDJHXnDarVSq9WyglT2QRgOh6rValqv12eFojAYQRBpogeDgVKplCqVil0PEKGLy+FwsJayvFGHYWggiWwAZoV2tgAhwL7f722c9/u9XYPl57dv39oSK6130Qcj8eDaQRDo8vJSi8VCk8lExWLRlrgvLi6MfYBFyGQyajQamk6nlmAQ9GezmRUJo7kFrPzyuWeJYJaQIJTLZUkygCb4+wQGmQFOn0ql9Mtf/lKPj4969eqV/v7v/96W3Lk/WCNkK9gNeuzFYmGbAyaTSZOxFAoFe45yuazxeKxer2fOSRei2Wx2tpRNPQdSgel0agExk8kok8loNBppMpmYhIHAPh6PLYHxDLqXDrAHCyyQDw6MrfTE/jIW+DY+CQABmNVqVWEY7c2yXC5tr5x8Pq/1en02XgRMAh/BGS37cDg0ORFsEqDFfTDP7969UxAElui8fv1au91O3377rcl7OGe73TYwhpFm3NmkFTkQtRqn00mtVkudTkfj8djs6vLy0ljpMAx1d3dnm2YiF6lUKsa0Uhzv6zdIqgj2JEteAkGiNZ1OjaGGwYSh9bIIahdI+JB7UHSN/yJLY0yxIw8qkmw8iMOwjPHx+UeMkTFGxhgZY2SMkT9/jPzU8UVeuCTZRncEd5bxWLbFMCWZ5AD2DodF70lQ8Uwby55efuHP7Y0ZzTKTA7vFQPf7fWM6GDQmFg364XCw5WiYr+VyaRvhwULCXqFNRdv7+PhorEE+H23ayDOhQ2d5tF6v682bN2aIbHgIY4Tx8F1+z3lo47rdbtVqtVSv103vz/PAKkkypo4x3G63arfbCoKo6Bi5AePwzTff6Hg82tLw4XCwe0EXvt/v1Wq1TI6QSCSsUHI+nxsr4MEVXe9wODT2hY4+y+VSzWbTAiiBD8eEEZ3NZvb8iUTCNjhcrVZmRzBfsIAkESQz+/1eHz58MBaO5APGi8J0SRZA+D+SlSAI1O/3jTX55ptv9Nvf/tbGmeth6zC1QRAYk7fZbEyWQ5tZdP+Ak2fH6dY1nU5tzxuSC4IARa3IZUqlktVUfMwEAbywXLR4hTVF0sK9wIBTm+GZH5hXnp89USh25VlIetBpfywP6Pf7lqTe39+r1Wrp+vpa3W7XJDPYEGD+7NkzSdJ0OlUul7M9gcrlsp4/f65Op6N0Oq3r62uTidANjWJnkg5Yct+O2UsNYFBJoCjCh6ljvPBb4iCM+d3dndLptGq1mo0n0jGSIC97kWQyF8aQGHk6nUwmwneJjXyez0kycONFyv/u43oE5vVTcon4+PQRY2SMkTFGxhgZY+TPGyM/dXwRSSFv9TwAzhWGoS3VSbLgQbABSGDlYO3Qj8LooNOk8w/BhqVmDJfAChuEw2McvjgSCQZaaN5kPfBR4AtDRVEwy6G73U6//e1vDeD4LuBzd3dnLMLpdDJjRQtLa1W61PjN/laraGNIDGU+n9u9Hw6HM/ak2+2aXnixWOjt27f23Owa3+v1tFwuLbAlk1Hb0cViYUvBFNA+Pj5aEKJlbSKR0H/8x38YK5PJZHR/f6/RaKR+v69er6f3798bm0UR7mQy0WKx0Gg0MkYjl8tpsVjYGKOPh9mqVCq6vr5Wq9Uy8Gg2myYHWa1W+vDhg+1p4nXPOEMiEXUH4n4oGCdpYZmc///Xf/2XVquV6aI3m40Bw2KxsI5Hx+PRbBpHpLsOXXL4LE6dTqeNRSOp2O12arfb1g2n2WwaEJ9OUSecm5sb5fN5dTodW16fTqeq1WpWZEsLV7TsFHZ7SQPXIPDCXsL00j2N8cGeYZ9JnOgOBZODPyELgjklsavX6zocDppMJmZz+AhAQ/0JTCPnZaxhXGu1miaTiVKplH744Qe9evVKyWTSmOnjMWp1S4FztVpVtVq1zmDD4VC/+93vbE7r9bp2u521Ps7lcqpUKqrVajZHtFGez+fqdDp6+/atRqORtd9lHxviyMPDg40xbBtFwEgO8B1i5fX1tVKplMk6isWiMdx0F+NaFCoDwMRRZEjITYg5xE9iKysrsOOADCDiEwriLPOKrSOxiI/PO2KMjDEyxsgYI2OM/Plj5KeOn/zCdTwerfiRwxfirddr7XY7ewiWfgEflsXR7WKo6F0psGMSKCYFlNCKewaPyaO7EbIHrwf3BZpIHQjUp9PJAgYFfDgB3+eNv91u63g8ajKZqN/vazAYKAgCPX/+XFdXV8pms7YJ5P39vVarlW02SAcnSdYpaTqd6vHxUb1ez5yeZeP5fK77+3tJsrdtjI2uUMlkUh8+fFC/3zcNNfptGEwAmGVv78g49mQyMSChnoCAuF6vTSNOQuDZpuVyqclkYs4uyfS3sGG0PM5ms1YkzDI+rCdzVq1Wz9gd9L3JZNSyNZfLqd1un+na1+u1OS8ADdAA2rAXsJ6///3v1Ww2NZ1OjbHz3cPQR4dhVOy63++tgJolaEAkDEPV63UrHKcNLIkKCQJBk449Xr8dBIElMFLEXBaLRTUaDQPier1uLDkME77gk6l+v6/5fK7Hx0dbasdnk8nkmd6ZYM+cHg4H04XDcAPs63W0qSYSKFjDUqlk95NOp7VcLg34ODKZzNlzH49Htdtta5ebyWR0c3NjMeL6+lqn00lv3rwxZhHZQxiGxty+evVKs9nMEqxSqWQMLhINivD/+q//2pJG7I6gSp0Lnc8Wi4Wxv9PpVIPBQK9evdLr1681GAyUyWQs5uz3+zP5ValUspgnyeYG+UW5XDYNPUyv70zFigUSDN/oADYQaUs6nTbWEFAjBnomjnMxFyQHXibhV1CIG/HxeUeMkTFGxhgZY2SMkV8HRn5Kdp/47rvvPhM+zo9f//rX3xFE0EVTNMikoJeEeTkcDvbWTgDgIZkIvwcEhaOAkncSAihBAyaRoJjL5WxSGPBCoWCBD2nH6XSye8LBDoeDAQntcaVILoDUggLW+Xx+xg4CUgSh8XhsBcuAHx2XarWaHh8frSDTt9CUnvToQRBp/GEa9/u9ST/QlPJ7OkGhCeeeYLUIfMg2uEc02c1mU91uV+/evdOLFy9th1prAAAgAElEQVQ0HA6t4xG6436/r1KpZAWvFM/ivLBTjCfME4WNaH6R2qRSKZNW4FDT6dQCPk4fBJEm+eLiwjomAZbIEdAyY1csCQNqsCeMd7vdVq/XkyQDADrlkGRIsm5Uq1W0KWMymTSWQ5IFWp5BktUhEJxhhmCdkCGcTicLHsfj0ZhD9P3b7dYChS+ETyQSdq90xEIuksvlTEaApCeVSmmz2Wg6nVq7ZhjF3W5niQD+cjpF7ZVhLP1GkgRyAjXHaDQyuQ6JQ7FYtG5RFxcXJuGp1WpmV0gMyuWyJTDv3r1TuVzWbrfT3d2dwjA8k0OR2MGcITl5+fKlMWr4fbVaNRYrDEN98803Fk8YexJXXyTPfFBAv9lsrA6gUqmYjIPNXEnOAD2ehW5ontHnPMiLut2ugRhxED86nU5WsI0d+ZjjJSrMD98jiQAQ+JvPcj/8DKmEZwL/WG/w8N133/3fz0eLr/OIMTLGyBgjY4yMMfLrwMjFYqF/+qd/+j8/hgU/+YXrX/7lX76jIBNDh93C6He7nWmaYWAABZZncQAGneAuyeQJMAS8gXvNLk7Nsqwktdtt087yNg2bQOcl2sUixWDyKda8uLgwxi0MQxWLRXM+pB2ZTMaWpuv1umlNkYLAcBK4Wa5nA8WXL1+a/INATMExUpDhcGiykuFwaEEc0EJqAKP27NkzAwd02AR75iSfz1tQQqM/nU5VLBbV7Xa12Wz04sULbbdbjcfjsy5GXBe9M0GCN3+AgwQinU4b++O7UXkmFUeAucMZmLfj8WhShEajYbUCjCXfwY4ADVhY7ImEh7E+Ho8WaN+8eaNf/OIX1tFqMBhYYuJZtPV6rcViYUvPyWS04SIMGEFivV6r0+mYnQIIxWLRvhcEUVcc7gEGDwamUqlYMlGv1zWbzeznjUZDkmypfb/fm7wD9gXJEa1Ob29vdXd3p/l8buwO56H9M4mH1+jjtzDizA8yIHwPmYGXofFc1DUQnEkEKTimyxGJ6eFwsL1UuF6j0TBbQeZBnCBRQ07Rbrc1Go0sHlEDASBuNht1u13VajVL3pCsEDMAOPYbgTkmsSGWkUDzzP53PqFbLpdaLBbGjBHYiWEUI+OjsGzENQ5+zmoDCQoMKT5Dcg3zCksOoKJF55w+ySHh9uAXv3B93hFjZIyRMUbGGBlj5NeBkavV6r994frJTTOYZAICjsfkBUFgTBkyCAII/+fhk8mkvT1WKhWFYWhL217TTmEjk86kAQi8/bPcjsHSGQmWAunFeh3tH8GSPW/D3AvgFQSBFRky4IdD1M2F4lWuD5uIU8B4wUIghbi9vTXZAkv0+320HwaGDxvK0jgFpG/evDFwQk+OEU2nU9NAAww8S6FQOFuu5R5hDMIw6gJ0c3OjMAyNCcSpYE4uLy9NH4xU5e3bt2eFxzA0gGomk1GtVrP9TSj09Y7C8j9OQqEpDALFouxp4gtrkepQq7Ber41VQvtMcGMvFc6dSET7u/z7v/+7/uEf/sHmN5lMqlarKZvNWq0EwEKhNmwX+0bARG63W+t8RODg3zAr+AiMqq8fKBQKGo/H1g53s9kom82eFa7WajVjecbjsRaLher1uiTp7u7OCtsBgu12ayzkaDSycQH8kArwBx/j/iiCJlAitfASEpbdYasHg4H5ul/uh6lHMoUtUCfAeEgRq4rfZbNZC/ZeV09suLy81Pfff6/j8ajnz5+fJXOvXr3Ser3Ws2fP9Pvf/96SKTpP5XI503Mj6QBcB4OBxQ4vTSEwS0+di5DOANjYO/53PB5tPqlroLaFGOUDOvGWcYExB7y9bAIJCnUGXI8GAbDisNr4m7dTXgCQQHkJTnz8+UeMkTFGxhgZY2SMkV8HRnpJ6MfHF9n4mAnHGLgRjIwb4A2VAISR8xBMBpNAsSGslTcsX+AmyViRQqFwtt8JgY5lfJg3JAIEJulpMzycFYdFY8pknU4n04vCBDGhLNnS8pad4TudjrF5FG6yH4PvVvXu3Tsdj1FXJt+JpV6vK5GIuhr96le/MiM6HA568+aNWq2WFUMPBgP1+329fPlS4/HYAoRnI1ju9YwK7MloNNK3336rRCJhQDCZTOwc7XZbtVrNloZZ5t7tdtYtCTClQBOH8EvFACudp/icLzZFbsL3/ZJwGEYdrxaLhUlICAYwo764lBbIgFwqFXV7WiwWtr/Jhw8fNJvN9Pr1a93c3NgYAYowlJ4JxtbG47EtX19cXNh9IQMCGGHEfJD9WEuPLQ4GA+sqhhxkNpupUqlYm+YgCIx5/d3vfmcF8afTyVgjxgEfYU+fYrFo9wobRGCTdMYekoAAIvil17qT1Gw2G5MLUMwLa0dgg0FlPnnmdDpte+7Qchp5he+qJskAHzadpPP29lan00nv3r1TMplUq9UyVqzT6eh0OumXv/ylEomEGo2GNpuNPSMbqrLqQOJC7UkymdTDw4NKpZIkmRRkMpmcATdxAxDBRyRZzcVyubSYhB0Q4wAQHzsZE1/ES0ySdLZawvysVk8blDJmxC5iwseMHTbuaypYuYiPzztijIwxMsbIGCNjjPw6MHI4HP63WPCTm2YAFLvdzpyOZWkCBYZN0P5Yn8kbOAVn+33U958JBlwABpiGzWZztqyOLAEtNJpZHI5JYiApioVxZMkTQwrDSBe+WCzOzj+dTi3Q+65GpVLJdoxHSoAx0vWFoMi1uP54PNb79+/VbrfVaDQMAGazmQUkWERYwlqtpuvra11eXlqgwiEI1ARQluhZTkcaglMSNAkehUJByWTSAJJWte122wobCdbdblcPDw/q9/uqVCoql8u6uLgwg6S4ERtgLxaCPAaLI8L8oPXms+ijkVnAcElRwXGtVrNORBRYwhAdj0cDc5heuljBaKBtfv78uemIkcv4ccdZcTKCNvd8Op3MZijwXK/XJjPwc0QwHY1GpofG6QF97ycUrcKGjsdjS3xg5WiJjI1j0yQtm83G5C+FQsG6BDFegN379+/1+9//3oKPZ9wBfJhvWCNkP4wF/sl9eR/keWHtE4mEtU3GbtCLS9L79++tixd1BYwn10V2EwRRK9qrqysrWEYGhZxkMBiYnySTSQN/EsX5fG6bkyJbYGw3m41++9vfajQa6c2bN5rP5zbmJNHYJgkn14Ft9iwuGnpsn6QJu5SiDSR54SExI5HFxqUnGQRSK2wKcPA/O52edOl+BQvmnPsnSWAu4uPPP2KMjDEyxsgYI2OM/Dowkhj+Y8dPXuHyF4Ct8W+fPCQTxBv8x0uMLKVLOrthX3hHAGbJmwfnzZSBoltMGD4VDjJpxWLRghcyitPpScPM/SLpQKcOOyfJdOmAWjab1WAwMA0vPwcEuRYaYK8nv7i40N3dnS3dp9NRtxo07P6+WEbn/zABz58/N4cajUaSZKwSgY+xZSmbv2FrCBiJRNRGlaJYdgnnnJyHgDMej5VIJIx54Zz1et0cgoDFhn/j8djY8uMx2pCSe6NbFVIXPsdSPPaw2+1swzofMFmaZg5wSJ94FItFkyh4OcB6vVatVjPGqdvt6he/+IXtZYLOmIDhAYoxhj0CPOmylc/nrVMWPpJIRB2oarWaPSMyIZIhHxh9h6V3794pk4naDiOjyWQyurq6suJY7vl0OqlarWq9Xps/sPN8uVw2CQjsIEkb3ZjG47ExVSy9w65eXFzYBp3YBucCiH2XLwITyQbskZfxzGYzK6oOgsBYzcPhoOvra3348MEKxEejkbLZrBqNhtUL4HuwdcvlUtvtVoPBQM1mU998840B+HQ6VbPZtI5PyEBOp9OZn+73e7unbrdrBdLIfbbbrcrlsvr9vgE+wEs8IyHm/NgoheskTePx2OyLMSLO7XY7kzd5qRpJjWcxfdG6Px8rJSTVxBnmjp+RYBMvf0wjHx//8xFjZIyRMUbGGMn8STFG/pwx0q+6fXx8kRcuAMUzMDwMRkTwI8DAEgAMGEEQBFboSVBIJBJnb6gMCkvkDCLnxyEosjwej1ZMzH1yDn4G48J94ThIJ3q9ngX7bDbaRBAnRrbBEjGgAlPkNbvL5VIXFxd6fHw829n75cuX6vf7tjHgfr+3SYQ9WK1WBsIAniS7dhhGbVan06k2m40mk4kxRDCPQRCYDh6WAfaTucvno00oX79+bSwrhdL9fl/dblfffvvtGbgB3gASQZ1r3dzcqNPp2Bixu7xnwujU4+U1BHruA10yy8W+QxNO5+sEYMpgS+g8VKlUTF5CsTmJAOOG/v3i4kLdbteCA0HkY/AvlUrW+Yj9MGCw0JbDziAXYE6xfYI/II/MAVtPJBL2PfaSIeEh6JJkYDfpdFrValWTycQYvTAMtVqtLIiiZyZosoQPA02gQasN4LfbbdvZvdfrab/f2zlns5kV33P++XxuPkVwxfdhw33R8W63syBOEAWEqd2Qnloq83Pmu9PpmGb9+++/V6VS0fPnzzWdTtXpdIyxQ+IDa0iAhT0lWQBgeA7063RXWy6XNt9easFGmcifuD+/igEb5z+DPAI5GOwd38du8F2SdUnmI55R9NIW6SkR53z+vmCIAcOPvxsff94RY2SMkTFGxhgZY+TXgZH/qy9cPACBBAOgFSzSgCAIjCXyb7Ys6eGYBDseAg05BoKj8DbKgPD2SnEguk82RmQQATBYGJYut9ut6XL5PxIaHxhhSSik5I2folPYpUKhYAGcsWDC+RwTxn4MGDTABlgwNr4dMN1i0C6zVJzL5Qx46MC02USb3PliWEAKA4NZubi40O3trS3fs4zunfndu3fK5/N6+fKlBTCWnWEb1+u1tT2lSJb9MpCccG+73c6Kn7kX5gWAoLYAWQ3zjGyGAIdsgCBDcMBGYcqQWjDGLEUDFASKw+FgO8RzTuweR6SzlBR172EuvW6ZdqbYH9cjUZJ0ppeHlarVapYc0OmM5KXX6+nv/u7vNJ/PVSwW7VnYdBDmDxaacTocolbOfB7wGg6HajQa1iaX+aBLEWPI/AJwSAw8IAO2tOVdr6PNN6+urkyK4ouQ8eP1em3dxxjf0ynSlBPc0fvjFzDXaO73+70lgIxzLpezeaSon8QrDEOTECWTSVUqFdNht1otA1UpAu5yuazBYGAyjyCIpBV8BltBAgHDS+KExt7rwH0M8isMJL0AA9p+VgYYR87FNRgHkk7PnDKu2LGXh5HQ+JURxp3YFR+fd8QYGWNkjJExRsYYGWPkF9GH8EaHNhw2BSDwemweyBfjEjx4y6SIEWZJkn2OB8eIGAx+B3NI4B6Px9ZpCeaFok0YIz7H9fzSv3/T5g2bAC7JOsUwyQw4+lwYKs8a9no9eyOGpYH1kGTdmAjiOBTMz3w+t7HGqWAGGEPamwZBoH6/bxIGNpeDIYE9Ys8ODKjf7+vq6soYM4C8UCjo5cuXVoyaSqVsf4vdLioeBvTQzDOm4/HYGLH379/r2bNnOh6P5gySLCgT4KQn/TJOB2NGcgCY8XNAizlF40zdAAwoc4XcggB5OBys0PiHH35Qs9lUs9m0BIikhQBB21UYQ5zZL2ej8w+CJ1kCc0pQoEvXu3fvrKUx56GAHLvNZDLmTwR/kjAf2Agc4/HYZEdo8i8uLkxLDoNJwrHdbu38JGWbzUatVstYLvTMu13UUYxOYFIUdJFoMK+bzcYCOjIbxhsmLggCzWYzFQoFGxfux/s+zz2bzQzwwzC0jSPT6bQ959XVlcIwVKvVUiqVsnbX5XLZGKlyuWzXOBwO6nQ6lhQDcMht6ARF3QE+yrVJWJC3IKfhWQj4JJgkcdgz3ZSQLCHXok7CxyL8i+clbsH8cXjbxS6kp/oiwIM4RSykcB4mnbgXH593xBgZY2SMkTFGxhj588fITx1f5IXr/2fvzXrkupKr7ZUn53meamJRpNitti0bhu9s2P5hbUAvbP+J9998vrLRDdtotCZSFMkasnKe5/G7SD9RO2WZaL6Sb5rnAES3yKrMc/aOiBVn7RUROCaBlyAISKAb5igTg5V0UrDL8S7/fTgcrKASpoC3Vh4cjbubECyXj7M6YIfco1aABwaHz+FtN5lMGmOHw7OZLDDgxue5m8rP47wYLgYQCARMh7zb7fTw8KBPPvlE0WjUBh1KsmNs7hUAgAHACd038Gg0qn6/f/KMADea50KhYPvCMS5DAxuNhqrVqhUho5n1PE+DwUBnZ2dKpVIKBALWbWm73Vo3JmQhdIdiPQKBgNrttjEX7HE2m9V2u7U2rZFIxJgmjJf1QIITDAZPtMl0ugJQYf5wPGZk3NzcWNerXC5nNgUDQgCFpYtGo+p0OopEIiqVSioUCsZqwhwRFHq9nhWuZjIZ68Y0HA7NnlzbJ5DyXLB60pExyuVyxjC7PgV7HY8fh5V+//33+su//EvrYpVOp+3zsUfWMZfLKZfLqdvt2h5wz7vdseWrpJNgQmDOZrO2vwQzkge3uBvp1Ha71XA4NNAKhY4DDW9vb5VIJE7kH8QCwJx6h0AgYHr+YrGo+XyuTqdjRbkEWuQTsJYwsIFAQJeXlyZn2G63Np/nzZs3CofDKhaLtj7b7dY6kAFW7DOJCDEomUzafqxWK5MFwdqv12v7TOyfmg6kKtgOiR/7QLtm2DhYUOzE/XwkUG6SQ6wjnhIrAAziMUwsCc8PJRwADYkh8dG/PvzyMdLHSB8jfYz0MfKPHyPfd/1sbeE5VsN49/u9veEScHmbRBsNI8PlailddoDfcaUSkkz/S9AmuBLQWACCCr9LkGYjObLk+3EIpBAAlOd5NtuEAMhxPbpR5CCAHKwQxYqe55netlQqqdfrnXRtgoWiYwwsII5KMMWQ3CGLOAO/w3E0bNZ0OtVisVC/3zcHBGgGg4G+//57SdLFxYUxDHSp4ZgVx1utVrq9vVWxWNRqtdLDw4OSyaTK5bJCoZCy2azG47EBMHr0cDisUqmkcPg4WO/s7My00DgeSQHAC1gi72B/J5OJSqWSPR/sk8v48j3ITNy1SaVS6vf7piemKDKXy2k8HisSiehXv/qVHcczBwT75blYcwJ/uVw+SU7xgXA4bECP9MFNQggSBDGKwvv9viKRiAGI6zer1UrdbleJRMLska5O7C9rSFCh1gB2E7/BT364VgTpUqlkTKLneRZMb25u9Gd/9mfq9/taLBaq1WqqVCpqNpsWQGFu6diEnyJrgiXOZrMWyNCA4xsEati06XRq7PxqtVIikTDWcjabqVAoaDKZ6LvvvtOTJ08sHsCkAaDEJmQTMITIPEhUR6ORMW6bzUadTsfumWL2TqdjsQsJBMCAlEk6JioUIzebTSsmf3h4UDh8bM89nU7NNmazmSUxSHmQCJEws3+spcsQE3uwHfabGMU9uxIL1++wC/+E68MvHyN9jPQx0sdIHyM/Dox83ynXzyYp5GE8z7O3fG6eYM8bIhpMNgXmwgUiJAtsMkb/wwfFIXkTxtH5DhaK4/7FYmGsCYGOz+X+p9OpbRbFrYAWDu15nmmsmYpdqVTsc3Be2EnP8ywY8XkEvGKxqM1mY6wjkg9+B+YGJ6B7DevgGscP3975XTTyzG7odDqKRqN21C9JZ2dn1mWKI/n9fm9g5u61e4wKGwYDBhvmHusul0sDEeQj3A/FxK5UhQDoOolbxxCPx83ZmCw/Go3MwQCl8Xisu7s7mzGBffCckkxKwPNynwQbdMPS43wNNOyTycTkPThxr9ezzkh0VoLBQ25D8XAgcJzdsFgsTL6SyWS0XC5VrVa13+/VaDTMRtGG0yEKppCAncvlLHhEo1HrNtXtdq1OgNqHcDhsLZVzuZzm87mCwaAxupKscNdNEGGwYfZyuZzNywiFjvM3AOHxeGzPTXJIzQhJJnvCoMlms6larWa+yffk83nN53Oru8DmAaXBYKDhcKiLiwtjVkejka6vr803SX7xGxKrer2uYDCodrttiacLTK68oNVqKZ/PK51OK5/PmzRnNBqp2+0qk8koGAzaoE0aBNBwQJIln/F43BIvAIX7I6GABQYAXalMMBg0SQodzZDkIGNhHQHj7XZr8iYkZ5JOTkWwHxJYEmxiuX992OVjpI+RPkb6GOlj5B8/RvKzP3b9LOjpBmw3EHDcRwAiOGMc7o0SaDF02BwCnStdcI/00B1zNO8Why4WC1WrVU0mE3sTl2SBFiPmSJ/jX/TS4/FYZ2dnqtfrevv2rW0Uml8cG6NutVoqFAoWAGHu0um0dWGBCQgEjt2AcPZwOKzBYGBgFAqF1O/37Xu4T4oe6W4DQ0UhbDAYNEbAnSkSDoeNNQSEW62WLi4udH19bUWTBCL+l/apzPCAwSsUChascYr9fm8B0u1KNJ1Otd/vdX19rXfv3mk0Gqlardp+wkwwCNNNTOLxuBVIUnyMo8K0JBIJKz6OxU6nvqN15rnz+bzpk/f7vUkpPM9Tr9ez+2CN0F1jb/F43BjUePw4PJLj/2QyqXQ6rU6no3K5rFwup3a7LUkajUa2nhRLw+qwP7BvkUhEb968MbsJh8N2j5FI5KSV7Pn5ufr9vkkHYHdgXUKhkPL5vIbDoUql0smatFotlUolRSIRdTodY0Txpd1uZ3IFAguJC3sKEBEwQ6GQXr58qXa7rVwuZ/IUfGW1WhnjRcIVDh+7R43HY9N04yfD4dAGNDJhfjabqVwuW+CVpGKxqHg8rna7bQlKNBrVJ598os1mo16vp6urK7XbbdXrdXuOwWCgb775Rn/xF39hNQisTzKZtLUl0FLAjwxHknq9nsliqtWqJcj1et0YU5IralHoAkYSyV7B6kqy/ez1epYsk4TSoY5YSVxC306C4wb/4XB4kqxLj6cN/H/u0z3NYl8BFP/68MvHSB8jfYz0MdLHyD9+jHwvDnwgbvzoFQweO81wo9wQN0ExGjdMcOQImTd3iiuRCkiPGlS3MxK/yzEhR7eTycSMMJ1O29vz4XCw43wc5HA42EYQkGCsSCy4l6urK0UiEXW7XQUCAWPC2GwAYzKZWEHqbrfTZDLRYrGwt2QCEs+USqV0OBys6JBiXdgbGJJkMmngx7E6TABHxJ7nqd1u232z9tvtsYUpQZFOQBzv4pyTycSO1zkmJ1jDCuCAq9VKzWZTlUrFiiFd3fdsNlMsFrMgxB7AiHEkTlee/X5vXXE4Bsem4vG4JRrUDpAosH6wqLB/2+3WNMwEX4qiAVh09Tc3N6rVahqPx6pUKhoMBsYCEoRTqdQJu8hawSoj14hEjh20wuGwvv32W5ul0uv1bO8AbdYzm81awMJhSWiQAMDm0lkLCQMF0NfX18ZWss6w6Pf398ZwSlK73TbWlQQEGU29XtdgMLC/p2C43W6bJGC32534HZ2h8OVA4KgJl2TsErKUHzKiyF54lu12a0XHnufpyy+/1OXl5UlNSTweV7lctkBKzQXtpwEO/IaEFdYKaRMyiXw+r0KhYPuLLAj/JUFB3gCQDQYD+9nxeKxCoWCSCNc/pUemjhbgyLw8z7OkZrPZWCevd+/eGdhiY4VCwZJoGHXAmAQb8HBPBrARYo9bjE5C63meSZMAZxJuLlcW518ffvkY6WOkj5E+RvoY+cePke874frJdCVv9Hw5X0rgiceP/fsxtHA4fFL8hhaTFo+bzcYmuANEMDy8US6XS83nc02nU+sMwx8cij8UqmL8fC/BDHDi+J6jeN6GuUfP84x14PsZPscch1qtZmwimnSCz2q1siN2GCOcU5JpgmEwuUee0y1CjsVi1smII1TYKYylVCqZQ9XrdRWLRe33e5s0z9F+Nps1XWqz2VS32zXtOse/sIi0hB0OhxZ0eSYSMZgAijBhAQEIin5Ho9GJYxeLRS0WixMGDobOPXInWMAC7XY7KzR2kweY30QiYayIy+xhV25gJDABWNw3ml2KMAuFgoF6Mpk0vTXyDwqL2UuSCuyQYZ7L5dK03zDHrL3nebq9vdVkMtF4PLbaDJKSVqtlARzNOvrn2WxmgDiZTNRsNq0ugGder9emYUf3T7AaDof2mclkUtls1toUs4ZnZ2fK5/O2vr1ezz5vMpnom2++sUJeJDzVatWYM2owYN5JnphPMhgMVK/Xbb5LNpu1IZ6wn0hAgsGg+WkikdDl5aUxc7DAMH/dbtcSDcAsnU7r/PzcAIrkDTB0AVQ6MrH8/uFw0IsXL6yltBuvAArP80ziQLINEMZiMWux2+/3TVdOC9/D4aBMJmM1IiQd7gDZ3e7YlY1YQJwgDnNy4jZf4PQEf+CUBFsHCInBMJh+DdeHXz5G+hjpY6SPkT5GfhwY+b5TruAXX3zxk8Dkn/7pn75ggBnMHBtAkMK5uEneKtlsWBGOBQlM7gPzILyJEkgJ0vl83mQMvB2PRiN78+etGeBwtdmweLz1suDMJSDwEvgJHmjTXdaEokfaWwYCAZMBwAJJjzpnEhi07ARmQCiTySiTydjbdSqVsueHXQyFQsaixGIxY7MoCCbwEiBgVzESl7kIBALW9QomkaLZWCxmDAwzKiKRiMbjsQ6Hw4lsJhqNGnuSTCatCxXO4nnHLjYEQD6b42QYNoIcrWphG13ZzXA4VCAQULFYPAENEp3D4WDaX0kG6DBLvV5PxWJRuVxOkvT69WsrEKZugoCCVh5WiPtIJpMaDodqNpuKxWJ69eqVPv30U5PvBINBG655dnZmiRNgRTJAAEun0ydtmNkbggzJBUfu3AcBh+N7pBV3d3emt5aOLHq/31exWNR0Oj2RqkSjxzkx+BdzNgCdWOzYVQmQA0RJJGCW0JOjgyfZ5Fk2m41Go5Gi0agqlYpJIv7jP/5D1WpVhUJBL1++1OFwUL1el3Q88o9Go8pkMrq/vzfAS6VSSqfTtl7D4VCdTueEgSwUCrq9vT2JQ9RNPDw8SJLFLFhSZC74P3KIwWBgMSqbzVq9R7lcNvkEtkJCzeBJYgxMtSQlEgnrltTtdnV+fq5YLKZWqyXpKNtBboVPE1OxdXybhAHb5xJGow0AACAASURBVGQKXyJuwfDDnLogg5/D9AEiwWBQw+Hw4Ysvvvi/Pwk4PqLLx0gfI32M9DHSx8iPAyOn06n+4R/+4f/8GBb85Beuf/zHf/yCzizb7dYM8XB4nIlBMOdBARxkDev12tqnSrIFQLMJE8Pi8SYsyQIwbEosFrMA6R6ZU8BKa1AK4jhCxYlc9o42tXxeo9Ew5iKXyxnoobuVZKwjhkNrVe49FAqZJp23cIyYt+3xeKybmxul02nVajUzAroNuVp/3v453sUwOcqG3QS8cSAkJgBjoVA4YRtZ/0DgsZBxv9+bzh+Zw3A41Gq1MtbFfVb2H408wL7dbpVMJjWdTk3ywD5Go1GTHOA0JBDT6dSkG7B/SCNIDAi0sA8E6PF4bEDJ/fHs2FmhULACS5gxSQZksC6ed2z/S6DgqLzf7+v8/FwPDw/2mYBnrVZTMpnUbDZTNps1lm8wGGizOXbjIiGiDgHwZF2q1apJKKrVqjzPMzaU+0Vig7SIgtU3b97o66+/Nh03IBAMHgt0qSeAzZFk907XKPxTkrHpodDjsFJ8iQTRZev7/b5ub2/V6/XMD/BPYgRBnXoIwHI+n5s8BjA9Pz9XPp+3gmW3i9doNDLmOJvN6uuvv9Z0OtX19bXZADIVZp7AHN/f32u9Xpu0CCa+0+lIkg2c9DxPzWbT/PvNmzcqFAqSjgwYrXeRE8GSuh208AEkWN1u1yQXAAn1FIAKCZord2DNiWuu9IE4QTwOhUI2y4X/xk/ZQyQ7brEw/x4KhTQYDPwXrg+4fIz0MdLHSB8jfYz8ODByNpv9771w/fM///MXbCgMCawQgZ+3Q96sYY94WI5DM5mMMVO8XfNwBCccmOCx3+8NkFzQgenbbrfWwjQYDNpCo0lnc0KhkLF1FCkTzFKplE3ZpnsTAQ4mgntGDw57RxEtDpPP55VKpSwYwcJwvAozlMvlbE1gABOJhEkCQqGQBX3WAHYHMCQIYWi0U6UWAO04xk7A5YiceQuVSsX07ExsDwQCdryLRh9mgn2BWUDTDuvXaDTUbDb1+eefKxR6nJ2Bg1B/AOBSFMt6cxT85MkTffXVVxoMBnrx4oU5RTD4OBOGQlrkONgSTsVaNBoN25to9DinhWC2Xq9Nv45tkQAsl0uVy2X7u2KxqEAgoPPzc3333Xe2vyQnJEp8dywWM3uZzWbGsNHOtVar6fr62hhhZADlctnAcLc7FhWz19Pp1EA7Go3a997e3podUQOCjAV5TbvdNhnDdru1I3YXyAEVjvUBuGKxaEweQxmJBYBXLBZTo9Ew5hRWmELpRqOhdDqt3/zmN6b37/f7xrJiF9RAvHv3zmoQYKhZJ+nYvvjVq1d6/vy5MW0vXrwwZm21OrZ7rtVq1q0Ilpd7XywWarVaZisAKcw9a0hgpwuWm4DwfdglJwG73c7kF9QwYH8UmzcaDWO7+T2YcgAdm3flX7DjxDvshPoEfAVfZZ8AEPwM4EF7779wfdjlY6SPkT5G+hjpY+THgZHD4fB/94ULvS6buNls7NgNyQDaSR4Eto+HjMfj5sCJRMI6DcViMWUyGTNEpovjLLy5EsRcVgvAgNlzfwbncBMJgHA0GtmmuW1fYcFYfIyBzjtsgtutZj6f28T3UChkk+ORGMC+tFotY7lYD9aUo33a1UajUWMCgsGgafAJ7LAGrBl6djTeGBh1AMzT4DspHu33+zb9nc4wdBuC0SwWi6brhe1brVZW5EmAzOfzmkwmJisoFArmtNgGTFAoFLLvwTEkGeuHc6xWx0nlg8HAgBhJCoMZXRYStjabzZq9bTYblctljUYj3d/fm36aWge38xGsGBpzSXYMDwMci8X03XffGVjf3d2ZLTCpPZPJGKgnEgk7kqaNL1rt5XKpN2/eaDweq16vq9vtajKZ2IT6QCCgh4cHFQoFNZtNJRIJTadT9Xo9u9/ZbGZsaDqdNukDrW4pzK3X63rz5o0xm/yhExHJIK2NSWpYRzoiMTMH0MNHYGFTqZQymYxJi0jCYDaj0ajevXuni4sLffvtt0qlUnr27NlJjQbM7Hq9tkQUzT86f5hMmN1isajhcKjvv/9elUrFurKtViuTfrhgDtuPP0ajUVtzWNV4PG6nAXQvwz632+MASfYV0OBnaSBAVzFmk1CU3mg09Mtf/lJffvml7u7ujHUlIUJHLsmAiv/P3mezWWvXCyNHPCYOcmJA3ACk8DmSVSRDwWBQ/X7ff+H6gMvHSB8jfYz0MdLHyI8DI6fTqX7961//6AvXT+5S6LI0sVjM9Khoi1l4NNJoaTnWdAMKQR1GZzgcql6vG+jwXbvd7qSYkM5LgUDAjpkJuiwGzrDf7621KG/Truad4342jGNXnIHPYeI4xZMAHIwEzjKbzSxAsxmwNv1+X+v12gr/OGIl+PDcvJkfDo+tg3e7na33fr83jTGgF4s9DuPD+VhrjC6VSpkkBLYDAx0MBqbvBZTQS8NolEolhUIhm7/BcfVms1Emk9F4PLbE4OHhQd1uV9lsVp9++qkSiYRev35t3+sekXueZ+tOIS3Bgha7BOLFYqHxeGwdoNbrtQqFgjExzHxARuDq3Ukm2He67MA+wszAKt/c3OjZs2darVZqt9t2XN/pdEyvT6CnEDMcDhs4UwtB0Ad4sVXWIRwO22fP53Pd3t5ancTTp0/1L//yL2q1Wrq6urLCX9hXiorpFhYKhXR/f281GwRvAJvOVDc3N8ZWsx6AtMtA3d3dmZQAm7y6ulIgEFCz2TTbhJ07HA7qdDoKh8MajUYWxAAohi92u13F43Fj5drttl68eGGAhz0AxtJROsBQ0Ldv3+rs7Mx8d7VamQ2SRCG7uLu704sXL2yAIjUEJHXMDqG+gz3L5XK6u7szAEKeVCwWNZvNDMyGw6HNBVmtjm2tSbIpXKd2gKJq2D5suVKpaDQaqdfrKZfLWXtdN9CTnLsAKj12O1sulxaXXaYXMHF168RUQMkFbFhx4op/fdjlY6SPkT5G+hjpY+THgZGc5P7Y9ZNfuHBQijApcHVBBYbODeRu0Sk36R7R8raIJICBcWiiYeZg13Ac2Lper2daeUCIoj4kE7A4gA2B1G1/S/ck3sxhkFh8tJ8EXBgKz/PsSL5QKKhQKJhulqNhhqwx04BNDAaD1nkJvTRGsd1u/1vhNAA7mUxspgr3DkhyzMs6BoNBtVotM0hkHvx/z/N0cXFx0gkG3TfrSoteAnO329V6vbYhi2iOYR0Xi4W++eYb/d3f/Z0kqdvtajQaWXccdLnSqSZ8Npup3W5rt9vp/PxcodCx4w/MyPX1tRUWD4dDY6/ocIVzsQ6sD8fewWDQ2pju93tVKhXrOkWBZyKRMKAkeLtH0dgmTE8wGNT9/b2xab/4xS9MzkOBdSaTsT0iiUHj//r1a5PXAOTY2V//9V+r1+up2+3q4uLCpEB0JoIlfPfunU20j8fjevfundbrta6urkx+wBrd39+rWq2q1+tJkkqlkklF2IftdmvDMV0Wnn/bbDY6OzszWQOsaSqVUi6XM605bPFut1O9XreaCD6PjkPSEVy73a7tHRdyqGQyqfl8rlarpf1+r6urKysCpj6DRGK1Wunq6spAB3kBgzmRZyyXS93d3enJkyfGIEuyltZIarAVpE8kh8QXhjsi4XDlDbvdzvYL9hswSSaTymQyWq1WqlarOjs7036/N98l2UQihg8TE0jIWT/XzhnACfDANOLfzGEiEaLBgSQ73fCvD7t8jPQx0sdIHyN9jPw4MJIXsh+7fvILFwYgPb5Nonl0H5y/R25QLpft2BVpBTILgIGe+xzzSbJCNbc4EcYHxhC2jTdTz/OMFYnFYsYIciSK9GI4HJ44NcBVKBQUDodVLpft7Xk8HqtcLqvf7xvLRzEqR5NozqvVqvL5vBXicl8wNMvl0sD18vJS7Xbb2Ad0+TAXsHbhcNicMhQKqd1uq9frqVKpWHBAesLMBXS87MF6vdZwODQAp/sT7BwBYLFYaDKZqFQqmZY6EolYoWQ8Hrd5DJJOjsxvb29tfkUmk1Gz2VS/31cymVSv1zMNLIFZOgLJy5cvJR1ZukKhYKwsGlr2hCALA4OdcbTNTJXpdKpisWjPmEgk9Itf/ELNZlPffPONzs/Pjbk7HA7KZrM2rR0GlWBIsgDYz2Yz69g0m820Xq+1XC51eXlpPx8IBCxpePv2re0bbAprGovFLMAwIBLAXC6XJ52V+FxkCa7OPJ1OKxgM6vb21oIkTC1rDIBgU65tDYdDCyYERQCWNYQ5putVIpEwSQx+HAwG1el0NJ1OrRC1Wq2eFMw2m01LOLHzSOTYFrlSqajdbuv29lZ/9Vd/pc1mo/l8rlwup06nYzUwz54902KxUK/Xs+93ExIYZtYE6Qygg5yBGAETjB+hiz87O1O321Wz2dR6vVa9XreEmmLnZDKpXC5nDDKfBVsHiOGD1Hcg3drtdjaThQSSpIXYStLuAgfJCiwhLC0xy/0+1oKkGUBxWXxOXfg7ZGj+9WGXj5E+RvoY6WOkj5EfB0a+74XrZ2kLz/wKHBl2yC2cBEh4q4f9ouuPWwDKAwIEu93jDBM6iVCYy3+z8OFw2BZhtVoZGwIoocfleJp7xDnQ8UqytpWweQS8QODYNpL2rDCAsDDcK+0ss9msKpWKJJnem+/M5/NqNBparVa6uLiQJGttyWcGg0HTv8MMuEwlGnnYU6Ql4XDY5rCwN67OFokABciwCGj3YfNgALmXdrttxaSDwUCJRMKSBtgrtNEYHyDCZ9DGt1arWcHjdrs9aW0rHVmkdrttQYvi8+12awMLYY45MqboNhKJmKab9ZtMJioUCqYd3myOM0uwi+l0qlqtdlL3EIkcO9u4cgQGBabTaWuLCsCuVsehl7RpBYhgwsbjsc20oD2xJJudQgcfGKZ3796pUqmoVqtZa1YYKvyMOT2AArIHkjXYOiQMBGX8jUQIqQRJjOc9FlNjv64kgZ/ZbrfmYyQN0WhU6XRau93OkpBkMmm/EwqF1O12LbmCBVwsForH41qtVlbUTXAOhUKWaGKvs9lMuVxOq9XKfAtmi1oUGKh8Pm9SApIDPo8C+v3+OK+FomvX5+7u7iwgwxBjC9g18dDVeCPT4Ts4KWIdALRCoWCtl0ejkbG7rmwKmQbJE3sI4OBjMNeAnZvIIgfiHtxiYJJxPhefxq5Go5Ffw/UBl4+RPkb6GOljpI+RHwdGjsfj/7Fpxk8efMwiE/wojCRw43wYGhO7J5OJHTMTsDgODQaD9mAwOzAEaGVhPXB4zzt2d0JTDCOIocCwhcNh06QzNHA2m9kbNW/VGDWOSeHmcrk0EOQoNBAI2KyGm5sbY6dKpZIymYwFBhgLju5hDHB4z/PUbreNVYBhjEQiVsAIWKClpnsLQEbgSKfTNgiOAOwaUDqdNolQPB43UEZ6AlMpyYpCkSm4jvH06VP7DjpGeZ6nfr+vxWJh2vx0Oq3xeKxQKKTBYKDBYKD5fK5Go6FwOKxOp6OvvvpKv/3tb/Wf//mfxrawt61Wy+QVAM5gMLA9Go/H6vV61lJUOs4KcWsg3D2GITo7O9Pz58+13W51cXGhfD6vfr9vbBeBknkaDOvkvgDyTqdjQzBns5nS6bTu7u6seNo9Jnd9BnsmWLt+0Ww2Lch+8803ikQiury8VCqV0uvXr61jFlIRSSf1H9QnoOlfLpf63e9+ZwGcBIJkYb8/1jlgS/g39RTRaNTaAk+nU2PyYJg2m43VZ8CwI7HIZDIql8tma7PZzFq8kvy4Re7MfWm325pMJiqXy4pGo8bS0llqt9tZ3chqtVKr1TJbJmCzFkiIYDuZVE/QzGaz5tMkJeVyWZeXlwZugEsoFFKtVtN6vVYul1M+n7fiW7TwfDfSmslkYrGGf+dZ6vW6DQulMJe5I4AZTDInF4ApSboLIK7mnriIfQAeMIAu6LhsNacYyLcAHP/6sMvHSB8jfYz0MdLHyI8DI//Xa7h4MB4AFonAxBsgb+W8ZbpSC9gm92iOByUAI4Hg39wCWRyVzeQ70Q3D8HmeZ8fdBCxYEt5qaePJfaAj3e12NqU8FAopl8up1WqdDHFkUxlUyLE4QYFuPQQSWAVa4w6HQ9OD02YXEIH1oIgQPTjfORgMNB6PVa1WTf++2+1MMxwOh40ZgGGESZVkgYG9QZuPjhr20J2Mjv6VgYYYJEkARZ/RaFTX19eKxWJ2XI2GdzQamVaeDjYUQk4mE/V6PZ2dnelwOOjrr79WKBTSZ599puvra7MJumYNBgM9efLEBvk9efJElUrF9hSAIDBwv4lEwlgmJDKAFBIAgmYoFLJCVWyaeRG1Wk2dTkfZbNb2IpfL2WwWlxHD9gh+sVjMClbT6bQFsOfPn9sRPbrl4XCoTz/99CSIELDwPexfksbjsS4uLixo4A/UDgAodCxyEzWAhPtmP10myvM8G9C4Wq10eXlpQRy/6Ha75gvEA4DAHcgoyYpr0+m0zThhHog7Z6dYLJqtD4dDtdtt26v/Oo0xuQD+O5/P9fLlS2WzWaVSKTUaDZNdzGYzSyBc+YbnHWtkrq+vrQUuvo1Pu+2tKSpnjyOR45wV9gpmH0kU7DCdxZAyeZ5njCfNBTjFgOGGwSXu0BXK9W+YdJcZJ267MQB/4MLeia/EXv/6wy8fI32M9DHSx0gfIz8OjOTU7seun/zCxYf/UI/uGiPMGSDAcSTHchx5wx7xJxAIWPtPjuJxhO12a2/RPCiLgeHwGbAhuVzOFs7zjl1v0LtT3Mtib7fHYYHFYlGHw8G6qRSLRTWbTRWLRS0WC5sBIulktgBFdzCazJTgqBOwggGJRCLWtpQ1pFXper024OTNHlBkXQCnxWKhdrttgx1Zh1qtdjJ93gVm3tIpPKZTzeFwsOfEaGF5kCS4/991EtgQ2AUKOmFACSBPnz5Vo9Gwgt/hcGgOt9vtdHt7q/l8rs8++0zL5VLT6VRnZ2c25A5pTLPZ1OXlpRXPDodDu3eKG3HGcDisarVqz0jtwnQ6ta5Sq9XKAj7MEgENJ4Udpr5hsVjo/PzcmFHkFIFAQOPx2AKo2x1nOp1aQkD703w+r9evX+uTTz4xOUs2m1W73bZ9+OUvf3nSZYt9owaCgIGsgGGSMFbYEQwPyQ5SF1gn/A0GCqaIC/sMh8PGxi2XS52dnRmAslbj8dhsnWN9kj3WWjoG3/l8bl2ZIpGIhsOhIpGISqWSdaZCXpDNZo2JAoSRrbD3+/1e3W7XJByr1Uq3t7cql8vGYC8WC0sqYDK32635MzOJNpuN2TYtcVkLGDyCsisFoaaAJJrYM5lMTtaU74e9Jx6w9/gbwEPcdcGYmITEwi2OB7zcmMn3cvFzJPfILZCZ+dcffvkY6WOkj5E+RvoY+XFgJD/7Y9fP0jTDvQnYOm5A0gnDxjE7b9ewDjwsxkcXGY7+OFLF0DFwjvcAERYABsINeDg9b+Toa3FgF4jQ+sICUhzJ50+nUyWTSRWLRfV6PWN2eF4MaDKZaLFYKJ1Oq1AoaLlcnoAfnZHQmsIqJZNJWxOe02UWfhjEYWOSyaQGg8HJEbAbaF3tLwEFQOHzpaNDwyKyP26QJxAC6tvt1toAoxdHrxsIBAysK5WKPWM+n1en01EgENDl5aUlFTA6BN1PP/1U0WhUNzc3isfjVrz79u1bed6x+9fV1ZUuLi4UCBznbsznc5XLZc3nc3U6HV1cXKhSqRgzgQQACYV0BA26MAHMrJObyNDKl8QB+9tut2o2myqVSprP5ydMLQGNImIc02W6CQRnZ2d238Vi0SQfDw8PVkj/5MkTAxIuZsaEQiGbYYNcYzQaabVamWSIAHZxcWEgPZlMTIqErAH7cH0L0MP3CDTIF0h2YEUlmYYcFmy3250w3uw1a0IiSYxBbkOheSj0OEiUdr9IiiSZbMbzPGUyGd3d3WkwGCgej6tWq6lQKGgwGOji4kLVatX2nuSHKxKJ2LqRWDIYMRg81nu0223lcjkDgIeHhxPGGI2+2xEOeQm6fthqkkZqDrCNUChkcYBk2gUs5BfEOOwgFAqdJN0k8MQ9bN99CXBjNVKLw+Fgpyj+9WGXj5E+RvoY6WOkj5EfB0a+74XrJ9dw4XgEOW7MdRTpcb4ChsbP8IAEJJgit6e+y3CgGyWA8geGCLDYbDY2l4Hgh/4d5oCAcDgcTNONVjgQCFi70sFgYOzYdrtVsVi0trfu5vHWzZ/pdKput6ter2eaT4weY9rv99YhyC1GzGazxioSMDGi1WplcgTkHDwXMxpoq3s4HIt9aduKk3qeZ87AsffhcFC/37euRpPJxKaquyCIE/f7ffsOHBc2ZbFYqN/vK5PJKJfLKZ1O25R1prpzPF2r1VSv1xUMBvWLX/xCxWJRT5480dOnT3V9fa3r62tzOuQVT58+tdkLz549069+9SsLVOl0WrVazdgSnA12ikAAa8e6ISHheBy2DSCllgHbIwDBMMOmAQrL5XEqPO1cSY7wDYJVMpm0+0WGQV1Cp9OxIYisF4kYdj8YDLTdbq09a7fblfTIZBGA8UXkPd1uV4PBwJ6PhAZGjUBG8TCFr0h4SLQAHTof1Wq1k8Sg1Wpps9kYE4etSDI23WXvFouFsZyuJhpQcxs4uFKG0Wh0MuuH9aAF8pMnT07kBCQMxC7ihvQoG+D78V0A2gVXZDPEkXA4bN2q0PnD/BLTYGvT6bTVPeCXbnLuSheQouCPsKIku/x/Vy4GWPzwv2E1sUtiATGb+ATjzuXXcH345WOkj5E+RvoY6WPkx4GR7MWPXT9LDRdOCLvhvmFiOO5RG4GdmyQoI2tgUTgO5CgR+QBSA4JYOp1WJpM56eZE8ME42EC06DgaDo4z0VGFRWUOAwwCGnf3eBO9sud5Bo6SrFgX1pFja+4Hg85kMieaeYyQ9QFMJBnDzHNLj4YBGwdTMJ1OTc9OsSwF0ejQYSsWi4V1XgKwkJoQzFhzSVYYDRtIMMU4YRvC4bANtORIv16vaz6fmwyAtYjFYtYhZ7/f27R5WMVqtSpJ6vf7Go1Guri4sK5Fb9++Va/XMyCuVqsGzgRSnh9bGwwGxsLCcrG/m83GuiZJMhaOZ2MtdrudtX5lKvxwOLSWrOwpwQc2hyCDfR4OB41GI9Nbf/XVV0okEiqVShZQ8vm8SXW4z8lkYoMdkUIg++B+a7XaSQJF8J/P5ybBwF+QGriyIZIaEhrpsX4Ev/c8z0AaeZTLnqZSKUuCsCuYYP7/fr+3+0bTDXsKkw6zBji5yVUmk1Emk9FisdBgMFChULC5M7CRDHqUZKA0n8+tzTDABJjDisF6kTgGAgENh0Nls1klk0lJj6w1nbL2+721J2ZtGfhIA4Fut2ssPMCIXAdmkmSFpB1fwiddlo1nWK1WJyckbjKOPRNb2L9QKGQsKs8Jm0vS7L9wffjlY6SPkT5G+hjpY+THgZHve+H6WboUwuAQZHkrdP/ODb4uqIRCIetQxCLxZj4cDu0Ij4CNkbGBvGXT3QRgw3FhBnjjha1zu6RQhErACAQCSiQSymQydl+TyUTD4VDL5dKChzv8kaDF/eEoBBg2B0nB4XCUgXD8zHrB6LlOC5ixtjis5x3nUMBQATpoonE+wAWWIRAIWFtT1oz7x4gPh4PpcXFY5pH8UDqBMXvecaZJpVLRZ599prOzM9t/l0WCvZ3P59a5JhQ6drSJx+Oq1+vK5/PmGHRZIlFBosLfA4LhcNiKRgEKl9lApgEIog+ez+fWkcnzPBuwCbvC50jH4DmbzYxtQu/uau6n06mGw6FpkWE7sQX2j4AQCoWMCSVoIeXodDqKRI4zQujQwwwaHB1NPlp8ZtcMBgOtVis1Gg11Oh0LumisYf/o5AXjSjJFMKdwlwQQmQx+sN/vNR6PzSYDgePcE4rHYTnn87nZLM/E/4fdd5lN/JbfpR3saDSyImHsUXqUwIzHYyumz+fzqtVqxgQSjEmqJpOJxuOxlsvj/BbaFyOVQXqCtj6VSlniSYIA0ynJ1oF9JDZiP9TJwIzv98dC5Hw+r9FoZHNOaEDAeuJvgClr5NZGSI+nJG7yDhuHP3GRCJBsATCxWOxkFhGxBImZf33Y5WOkj5E+RvoY6WPkx4GR7onbD6+f/MJFEJQej/bcxXN/jsBIgKEQ1S1o49/c4kY07bz1M2gOxyRIwDawQO6m49AEThZnuVxqtzt2KMFZCJIYSCAQ0Hw+V6/XM0NFY+uyG6wFwQYGcDKZ6OXLlzbjgcLc6XRqGzkYDKzgkWPbSOSxexPAi/HQsrfT6WixWCibzSoSidj3EqzRTcOOsUdokOk0xHE44MsQPpgEF7C5H6Z9szeSbF8BSY6TWevJZGL34na+cTsURaNRk7LAihF0Q6FjxxuCVDabVaFQUKFQMPCnm4/LvLAXMBmwZnTrYX33+2M3J7edLnIHQMhNSGA2SARWq+NEeEBgOp1aouJKHprNpnq9niUZJAOLxbGd8vn5uSqVig6Hg9rttskTSqWSttutut2uAoGATZynQBWbpUuS53n69ttvT5JlWCTstN1uazgcqtvtmmYasCYJc4/psQcCMOuQSCRsiORsNtObN2+sboFEMZFIqFgsmnQgk8mYZAC5CYkbbDzH9sHgsSal2+2q0Wjo7du3lgQhr8D/DoeDddKq1WoWn9g3kuD1eq1UKmVATzBer9cajUaW1OGHJED5fF6FQuGksBl9PKw5fkZ8wo6Wy6UxrsgodrvdSQLEUE6XNQbgkfngc6wNJwjcL/YeiUTMh1gHkm0AE5tGIkLcI7EEkN6nT/evH798jPQx0sdIHyN9jPw4MPJ910+WFOIE3IhbaMtbHxdH1vwdi8kNo2d1j+9YSLdgGLYNwAiFQhoOh/aWi96UxXC14wRNDA92izdT3nKlx45D7vEl2lYYGAIETNgP5R4wFqPRyAY6ugV5MJswKhQH9N105gAAIABJREFUM4gOIHODK4ELBojAEQgEdH9/r1gspmq1aoWL1BAAHrAUAIEbPDHOcrmszWaj6XRqjo9kAxClwJHjXBe0kR70+31jgUKhkLW2BcSQDywWC7sPOuoANDC0DNlEU813si/syWw2U7lc1mw2M8djT7BTHIQ9R4cOwwmLCjvJ8yID2e/3J0Afi8Vs78PhYyvZ29tbZbNZ9ft9u0905NjHdDpVuVxWLpfTbDYzm9psHlu6kryg618sFkqlUhqNRjaYkIL0SOQ4vX6xWKhSqRioIc+gDgNpistuEkhJGpjrQVBivoxbEwKDnUgklEqlNB6PTW7w8PCgh4cHvXjxQul0Wr1ez+oEXHZouz12e2JN+PvFYmGSBVi/29tbdTod02ojfSC5cxlJgAAGELulE5oroSDQ49u5XM4kGUgw6A4Ga0rswY4J5iSGhULBmEWSOGIktSXpdFrpdFrdbtfkQNgAn4vv8V3EC8/zNBwOJR2ZZvelaLlcWqwhaQc83RcAYswP/Yh9cGON/8L1/3b5GOljpI+RPkb6GPlxYOT7rveecAUCgb8LBAL5937CfwUfZAa8bbJx7pEfTBfMHkGfv0MLzFurKxtgIfhZAgGMBcfc6K4JLMFg0DrLIFtgg2HBkAHA/nFciCYULTjBHN0nhkrAwVAIVhRARqNRpVIpPTw82DogvQiFQjYZHXYLVkaSHeVOJhN7y+bfcABJarVaur+/12g00mw20+3trd0/P9toNBQIBEyPTVAE5HjWarWqbDZrwZG3fQKbuw+u3ng2m6nX69nkegIuTOxisTD2iePv+/t7Y4UIWAxG5Lso8OTIWJK1r0WSQHKBsyHdWK1WtgYwiNQIYFu3t7fGujDV3vM8TadT9ft9Y5az2ayur6+Vy+UM5GAmAWbWiWN99+ganXW5XFY6nbauX7Cvl5eXyufzFiiur6/15MkTnZ2d6erqylin7fbYoWw4HBrgEsg4ci8Wi4pEjoNUKZz2PM+AjOevVCrGHtJZCQmGy0BRL4CUZDab2QBS1mi1Wun6+lqSjGF99+6dJSTr9XFuRq/XU6PR0GAwsAQQv41Gj8Ma8/m8gSCsWigU0u3trQU/mDBiDvsKExeNRs1Hut2uzf1hr2GpSICoF2F9iC3EEZ4bG2s2mxqPx/K84xDT3W6ny8tL8zcADHskqBPMg8GgCoWC7WexWDSboMgbeQc2BFMaDodNrkJzgslkYkBOYkeiACvnsuouC80zwkADSsQEEgf/Or18jPQx0sdIHyN9jPQx8g/ByPeecB0Oh//vvb+tRwaOQI1x8Pcui8dbZTKZNCYNp+ft0T3yJ3hjGLBCvEViUAQcmDpJ5rjuW7wLMDBt7rFpKHTsDsRnwzb+11ookUjY4uJItKXEEfk8noHNKRaLenh4UDabtUBbrVZPjt3RINNxqlwua7lcWsDIZDIaDAa2qRyJEgxYW4zLlSHwp9fr2RwNWInVaqVSqWSMFdr//f44aR3NMBps1m63Ow49JLAScJE8BALH2RowXhQIwyBkMhn1+321Wi0bbpdKpczhYLBwmkKhoGazqd1up/v7e3mep2fPnqlcLhtrhPNyTMze4kx8N4zjdrvV+fm5scPcN44MG8T9MEMF2yeB8DzP5ogkk0ljnCVZq1eeYbvd6u7uzpgVgnw+n1c6nVYul9P9/b263a7ZF22MWadkMql+v6/pdKpYLKZEIqFms2m+4RYaI1+heDgSiRj4c4zuSpIOh4NpwPl+WMNIJGIFzcgaMpmMCoWC2u22isWiBc5AIKByuaxms6knT54ok8mYFGa/3+urr75SKpXSixcvlMvlTIcuHQM57CKymPl8bqDEz7jsMQykGzNWq5XOzs70N3/zN7q5udHd3Z3NEzkcDpZoplIpdbtd+z0SGQqy8X/2ACCnuxMJI7bi1kRQg0PSuVwujY3DLohbxB5sCzaW54LZ45SEuMeJBAkUnwlT6YIDsZXkhNjBvmE3w+HQElISS5oQ+Nfx8jHSx0gfI32M9DHSx0gwklj6Y9fP0qWQ4lICnLtIMHCwdPwOBsGNsnme56lSqcjzPOuu4z6Iq8ENBB7nf8AgRqNRcw6OwUulkrUiBXDQJLtH8rALvMUzhyGRSJjjrddrJZNJ63YTDodPikAJ9IvFQtvt1gbItdttXV9fy/O8kyLY6XSqJ0+eGBDt93tjYq6urqwQk6N85m9wvxjq4XCwjkGuLAXtLDIPgJtuOhSfcgTLoMjD4VhMmc1m1Wg0lEwmbc5COBxWq9UyQOBoudfr2T4dDgf1ej0bJsgx+fPnz5VMJrVaHec2VCoV01q7c2Emk4kVcAJcgFEgENAvf/lLs69QKKRKpaL1eq2HhwflcjkDymQyqeFwaNpcV5M7mUysNqFUKqlararZbKpcLhvzTOtg5kUAEoBMOp02NpLjb5IoPn86ner58+f6/vvvbfYEdgX446S9Xk/T6VSFQsGKlkejkbLZrEkTyuWysdXr9dr02Bzvz2YzvX79Ws+fPzcmx9XW53I5kxcxt+T+/t5sEJ0/7DLBFJ8jAD99+tSYR9i1dDqth4cHO/r//PPPreUz+wvjXavV1O12bVp8o9FQOp02dgmpBFIPWFrYNfwFGyHZJPAj43j79q3q9brVLSALgcHCzhKJhNbrtc2ZwRdgltHQj0YjzedzlUolY29JRpBcIFfY7XYGwvv9sXCaGMJ3LZfHIZjY4ng81mZz7ACGhAkZlXQ6C8eVbQE4JBCAIqxyKpWypJ6kipMO1sFdU0n2MuAmJ/71YZePkT5G+hjpY6SPkT5G/uQXLjYSbawke0sFUHgDd99AXQ0mIBKJREwnjDYTtoIrEokYaycdJQ8ciRNoMKLNZmOBjMUgSOTzeXtjRZMaCoWM6Umn08buIGFA70nBHyxGNps1lg+5BZ9PEAYsJalSqejNmzfa7Xb2/KlUSvf39+r3+xZsU6mUMZ3okUOhkGnZV6tjO1Dmk2AgOG2v11OlUjEjgG2jG9JgMFC/31ehUDCNda/XM+YB8BiNRqZ1DgQCyuVyqtVqxtzhxLBKs9lM0+nUjp7ptPPll19ah6hcLmcGnMvl9PDwoGfPnhk7CyuWzWaNmeHInkCBLRDsAZVSqXTCvjJIkWN52A8A8ObmRn/7t39rbAXOAzDzbEhMCD7YoKv/pWUse7DfHwuMJ5OJ8vm82XK5XDZ7S6fTZnOr1Urff/+9Op2OXrx4YXUCJFRID0ajkR2jw7y62m5XVuN5xxao5XJZpVJJvV5P7XZbT58+1WazUaPRULFYtDWJx+OW0BCYpUctcyqVUi6XU6FQUKvVMhAGZNfrtdlyJpNRPp/Xw8ODZrOZMpmM7QfBOpfL6eXLl9YxCmkJwZC5MZPJxBhGghyAn0wmzXb5/Ewmo+VyqXfv3qnT6ej6+tokS8hNqAtg6CTJVygUsjoFNORII2hn+8knnxijO5lMlM1mtd8f2/ZOJhNJsq5X2PRsNrMkCJ8m/vEdi8XCYirBHrAOBoPG/mFLxB/kESS/SHUAa3f2EAw7unbYbsDYPT1xYz2A5l9/+OVjpI+RPkb6GOlj5MeBke+7fvIL1+FwMAkCR62e55mT2xf9lzPwBs7C8Ps4BsfOsHB0/MlkMmZogJRbREhQ5u1/vV4bQ4d2lU2DUeGNPRKJ2M+iS14ul8rlcrq5uTHjJsC4Cw+TwPEpC85bNPcXDB67St3d3SmZTFpRJ2/l8/lcDw8PFhgArUKhYIYJMPGcHJMSiEajka1hvV5Xp9OxLjKuVj0cDms4HKrdbms6ndrAxX6/bxpunAIQjkajJ606YXe4JxydoETHoVQqZTMm/vzP/9xAOxgMarFYKJPJmGym2+3a7AWO1inEZlAkjA4Bk+JW9MBIV2B8cBiYMgKEJJtBcXl5Kc/zDOCCwaCGw6F124nH4/addM7Z7XYajUb/jTXu9/v27OPxWPV6XYVCwRKJ/X5vwy3ZLxKt4XCo8XisbDarTqejf/u3f9Pf//3f61e/+pVevnx5krhNp1PV63VJsuP/aPTYOrXT6ahYLFrLZ1i5UCikt2/fKpFIGDNYKBTs+/P5vNbrtR2vA+L4RjQatdkwh8NBjUZD7XZb4XDYvi8cDqtSqej3v/+9FebiqwRrupiVSiXV63V7LrTy2Pd6vTb/LJVK+td//Vdls1ldXV3p66+/NuCHvUaGgfQNuUo2m9VgMDAGHnulCL1YLCqVSimbzRpzDMAgmXElQS4LttvtVK1W9fDwYMnFD2U5gB1+hZxCeuxyNB6Prc0zAEZyQBIEk0o8pZMb7DcAE4vFTGbBYNt+v3/SRpqTB9hJYtQPE39inX/9v18+RvoY6WOkj5E+RvoY+bO8cAEIkmxjYRw42nfZHd4KOdZjccLhsB3rRqOPE+U5LifYs6kwd7AGPLjboSeVSqnRaCiTyVig4Lt52wZUYOdgDikahJFwwZA/+/1eNzc3xnrAMnmeZwP6ttutFZGu12vV63XrWCPJjquz2axisZh1Z6HTFEDLmzZGmUql1Gq1FAgE1Gq1rOVmMBjUs2fPlMvlFA6HT0AdKQgBn+8kGBYKBUnSYDDQZDLRer3WkydPJMkYFIoR6X4DawqLFYvF1Ol0bE8oCOaZsIlCoWA6/nQ6bRpsSaYNHo1G9u9Mfd9sNjZXZTAYGDARsLEJOh/BmtCRCdtByx4IHFvJSjIZy36/15MnTzQcDk33TSE1rCv2yR7BWFYqFU0mEzuev76+1qtXr9TtdnV2dqZ2u63Ly0tVq1UDfJIc5DvlclnZbFa//e1v9fz5c9VqNZN50I2Jn93vj9r7fr9vx+9IiABOwC+bzRoD2mw2dXl5qVgsdvLcMDcwkW6RuyQD/0KhoKurK5MjEJxev35tLBuA/PLlS33yySfGJJ2fn1sB/3a7tY5cLnvL719eXloBfbvdtmJ9AjJae/TqJBgkK+v1Wvl83oplKU6XdBJg2+22arWaJb4kJN1u1+oVrq6uNBwObX9JVorFokleSqWSdWFyGd/9fm8ymGQyaQnEfn/sNkULXU4viK+e55nt7nY7O/lIp9M2h8llWgFnfC0cDuv8/NxkH67sgpkqxHAKwYljgJcb3/3rwy4fI32M9DHSx0gfIz8OjHyfCiT4xRdf/OHI8SPXP//zP3/BhvHmiTFyAzAtMFuwfOjYKUzj+Dwej58UxUYiEet4FAqFLNgS+NlEWJFgMKh8Pq9Y7Djsjina4XDYutuwAYCYe/zIkTrggRNxpFkoFEwPi2YZzSfAgzO4syXOzs4UDoeVzWYt8BDEJJksIxA4dmDJZDKq1+sn2mzuBTkBz3ZxcWFrh/4+HA5b4KAQ0GUdkJHQbYm94LhWOupTG42GgcFgMLBWqCQJzNGA4SEoLRYL21fPO85nSKVSevbsmRVFT6dT09yTfLjyE0AD1iiRSBjLst/v1Wq17HNub29VLpe1WCw0nU6t2BWmhv0GSCneff78ufb7vb755hsLOAAQ3akKhYIVfMJwbjYbO+JnfZPJpOr1+gmTg+RhMpmoWCyq0+nod7/7nem6CR7olgGMYrFoM0dggLbb43wRANTzPEuCWq2WYrGY6cvxQUBnPB7bXBhm0kwmE2Orut2udSqC8T4cHgvupUe5EmtHMvDq1Sv772+++caeDYYb2+L7iRmHw0Fv377VaDSyQBYKHQcfttttSdLFxYXu7+8Vj8d1dnZmnZTcmEGiiH9hi61WS8FgUGdnZ+p2u1bwTytgkgB8djAY6PLy0upPOJlIJpOqVqtWsE7tiXQMtAzDBIyojUG73u/3NR6PjbkjTrC/SDhWq5UFeII890ECBNCQPFEfhA8jj+H3aCMeDAZNrgGzKMl8mUR7v9/b2iyXS/MjgKnb7T588cUX//cnAcdHdPkY6WOkj5E+RvoY+XFg5Gw2069//ev/82NY8LOccKEVdt/waUMJ08YxOYwXb+0YLEd0nueZw6LTlmSOBDPosnCbzcZ03NFoVKPRyIr1Op2O6XoDgYC1xYTVCofDxlLQKcmVFvA8u93OnB6W5XA4FuFOp1NzbED1cDhYYILZQtogSZ9++qkVp15eXurm5sbe0NvttmmLJZnjAQLISSKRiHK5nDl+vV7Xw8ODSUtgzzD8/f44yG04HNoaEpiRcRQKBZslQWcZCn63260uLy9tLzBSjnLdPeZ3Q6HH+Sw4wGw2s7aaME/SkflttVr/TbpAYEHTTF3B4XAwNiSTyahWq2k6narVaikcDqter6tUKun8/NxYjk6no3A4rFqtJs87tohFv/zw8KA//dM/NakPDDAyjOvra00mE3NUbIej9kwmo263a0Wf7IHneQbQjUZDn332mer1ugUI6RikAVGKqJFbVKtVk6gAIvl83vTLsM2lUslkPSRwJBjRaNQYs/l8bp2k8B+O59frtabTqQ0X3W631nYWP4DdIvCsVit1Oh2Nx2N9/vnnxkySMFA8i+Tkyy+/lOd5+vTTT1UsFi0WdLtdYyipiWD9SPiWy6VevnypSqVizBmsFBIpgiAxxPM8dTodYzhTqZQlFdgY9nc4HKyAnCYCBGvWbDgcajqdKp/PW7tmEs1YLKZ2u20sOUw+Her4uUajYYkc0hw3BiIlo9U1f++CDLKI9Xptwd7zPEWjUSuEj8fjVnyMvbmyKewYGyWGYZcAIwmEf3345WOkj5E+RvoY6WOkj5Hvr/D6Ay/eGCmkw5ilx2M2jqVdAySgcfRJEAI0OK7c7XZ2vMl/u7p2jo4xKOlxnkQkchwgdzgcrC2qeww4Ho9NZ4sMYbFYaDKZaDqdKpfLWcels7MzY+VwAPeIGd0wRahuIXIoFLIZFgRx1gd5B1rtzebYCYWgBFuwWq3U6/U0Ho+t0BCtLF193Ddx3uIxFAKcW2AK49TtdnV/f6/t9tiOlbf2brdrR6zT6dTmbtzc3FjLVUBBkjkqDBKdYgKBgLGXDw8PpoVmfThWDgaDGo1Gdt+5XO6kxWqv15P0ONcGBiUWi1l7UjpJBYNBjcdjO44vFAoGfAQAAkWxWNTFxYXW67W+/fZbayNbKpX02Wef6fz8XNPpVL1ezwIEumyCEC1vYbv2+73prGGLJOnrr7/WaDQypjISiajX6xmQcrTvsoxomPGL2Wx2Up9A0kNh6Gw2M200uvV4PG76+sFgYGzoZrPRd999Z4XervSFORkEROyWJJFBh8+ePVMgENBvfvMb7fd7a9cbCAR0cXFhUqjpdKpEImFT6PG3i4sLff7558rn8zanYzweKxwO2wwYgpzneQYso9FIqVTKgBtwJEbs93s9PDzo7du32u12xl49f/5ckqyQPBaLmf3SGpq1BjzcOhPYOWxgt9vZvBvAPRAIKJ1Oa7fbaTgcmp58Op3a/gWDQWsrTMxwWVyeYzKZWEEyzBpyJphV7IR4RvLMMyOfQYKBX3LPMMkAsCTTqbux178+/PIx0sdIHyN9jPQx8o8fI7GzH7t+8gmXJHMuWDmCtasH560RoOHNluNXmAn39wAiOo4QvN2jfx7Y7atfKpXUarVsM2AccF408HwGm5TNZm3AIoWj5XLZukBRxEtwxdAY9MYRLYWUgAMMGxvZ7/dVrVaN2YpEItZRJxKJWEek9frY7hSDRCIxm81svkOlUjGmdL1e6/nz55pMJup0OiqXy3p4eNBqtbK2vBgOQZzf4/iWY9tUKmVv+xxfw4pMp1M7Lue+CJTb7Va9Xk/NZtMAjRaynU7HgjnFijc3N9Ymdr/fGyMC28qROvu52WxMH4/jFQoFpdNp3d/fK5lMGtsIe/Xq1StNp1Nj9NzkBeZitzsWdm42G2UyGdMEU4QM+FQqlZO2xuv12vYkHH5s00ois9vt1Gq1dHZ2Zoxuu902phafyWazarVaGo/HJ7ULNzc3FtR4hnQ6rU6no1qtZkEEEHUZ7vl8brKC2WymZrNpgfhwOBhjnE6nT2Q5kjQajcyG0NnzuYHAsUB+MpkYw77dblWr1TQajdRut22uiiQ1Gg0Fg0G1220lEgmdn58bc49EAiYd2UKpVDIGnsCMHCKdTutwONaXUJA8n89t7gqtnYkTrPezZ8/06tUr7fd71et1i0HEJvabhKBerxuojsdj1Wo1ax6Azr5er1tzAdao3+9bLNputxqNRlosFubHrjxovV6bHIwZK7vdcbgpQZ0EmJcd6lxIxpCPUGfAyQRxmD8Mph0Ohye1L4AI811IlJBbSMfkwQUZ//qwy8dIHyN9jPQx0sfIP36M7Ha7/yMO/CwvXLw9SjK5AMHFZcoOh8cCYIp4ARfeImEOkDDwpkkA50geo3a16553LJpFM8zbKp/JhaQiEokYiHG8ydFrOBw2VgDGbbM5tjrle2FPADj3/gASt5NUIBCwWRgE43K5bG/IaLFdtm48HiudTlvgo+AWAKV1qXRktHCkq6srtVotPTw8WFtejlJhXlxmD4ddLBY6Pz/XYrGwWQeAIAwl09QpDgZIKXJMp9MW/Dm2PRwOBsS09U2lUsa+8T10vUH7jx6dQJdKpdRut43tg4UMh49zT0ql0slQSxhAJAIu40XR98XFhbEpFxcXury81MuXL80pA4GABSecG/vieWFzCHTdbtcGFcJIYzPh8LE7E4FpuVwqkUjo008/NaZ6tVqpUqno+vpaNzc36vV6ev78uTFY7MdqtbJ5NgAhCQJ+xXE6bYvZe3TiBKf7+3vV63UL/CRELitG4HSfgWBXqVRUr9f17//+75rNZiaXgXllH5FEjMdjZTIZJZNJDQYD89NGo6HD4aBisah0Om2zSIgRFNhmMhk9e/bMkiykEEhGttutJSPMGSKO0IIYfTj1DySJ+/3eCrUZTOrWdhDckSuwr7CL+P9isVAsFjPQhV2l8JcYQYLCM3IKgQ1RP0X8IVFjb4ghsLGsJdIb5t3AFuNnsH98DkwtwEjsRcrxPvbOv/7ny8dIHyN9jPQx0sfIP36MfC8O/GQkkaxYlIWANePBMUR+hpt1j9kJJpHIcVYIuvJ0Om3ODyvGGyUP6XmeFRput1trqZlIJCwwY9zM45BkAYU3VRga/g0tMtrYfr9/EiD5/lAopEKhYIDC0TCMAuwhx/M8QygUsuLRTCaj3W5nk7q3260BG+sFo0jBM8+52Wz04sULvXv3zoLYYrFQr9ez+QjL5dLYAhga1h/Q5pg5kUjYcbc7V4UOMNPpVJVKxcC40WjYkTNGTmEqoNfr9cxgk8mktXaNx+PG0EiyjlDofmFWo9GodVZCG08rU7fjkMvKhkIh5fN5O1aOx+Pabrf65JNPrF0vwZB7n06nOjs7U61W06tXrwzUCGDFYtFYTQIVDB+aZqQmAEgsFtO7d+8s0FNXQGF7LpczP4L1y+VyBkJISwjOX331lcrlsg0/RF+NTxDwsDUYokAgcNKCdz6fazQaSTrKmfC12Wymfr+vs7Mze/7VamW2xnR7/IR5NZ1OxxIZjvylR40z9wBDxn7B3sOIYdtv375VrVazuohut2t1JMPhUNvtVtVqVdvt1gaawoZOp9MTrXgqldJwODwZqBoOh/Xu3TvTwdO2ORgMms4dW4YZk2TDW0kiXfYabTuJEUCOxGq/39ugSlopS4/1N3QII3Zix9gcyXcsFjO2HX92Y2S5XLYOZCTLFI3ncjmbQ0KjBF4CSLphxrkHEhPu178+7PIx0sdIHyN9jPQx8o8fI1154Q+vn6wPwXExDv7wpsqxvGvoSCFoB+s6Jm/iMAQwFQQQ3rRXq5W9HXM0yKLwuXTQYVFgl1gYtwUkjBhv5TgQulNAkGF2vNlSbMnzwCIAPMhIYAR+//vfW/DFeAjABJJgMGhOQ7BHR0+XJknGkr5588bewul+A/C47CRv4qwnBk2gQrcMIIbDYQsIkUhEtVrNCoslmeOx/8Ph0L4XHTtH6YVCQf1+39gXOlfBmiIP4Kga4IFFwJ4Aj2g0qmazqXg8bqCay+WsRqJUKpkDuV1tXKnHdrtVpVKxYZlIcl69eqV8Pq9nz56pWCyazAHpDdIaSWafMLLUTBQKBauvoICXRAY7RHqRzWZNxw6rFovFrOYAECNYAuSAK1IeAg+fQwtk5EHYNwXZgUBA+XzekgLkO/l8XvV6XfP5XPf39+r1euZH/JxbCyHJ2E0YHthd2Et3lgxANJ1O1el0VCgUTFIEa83/Yg+TycSSwO12a4XR7roTPwiQ1JnQehqmeblcWjE7XaNg2RaLhUku8PVsNmsMNvGNYA97iiwFnbwbeF1tOMkvNg37zP2T4LCX1GPAVKOxJ8klxvDzyEhcNhnWn0Sf56VbG/UNrlzQ/Tz2mHjkXx92+RjpY6SPkT5G+hj5cWDk+1QgP/mFy33T5Kb4QhbJ1Ui6b4guU8ZbZigUsqNLjJ+NZIFgSHAaCuwAEY4GCYrFYtFmGNC2NB6PW8En7S05ZnSDKQXFOE8oFLJuQPl83gpWXd04OniOmt2jTN6ah8OhGSngyZH+bDaz41ECFcenFHi6hrjf79VoNFQoFOwomEJc1o3gPJ/P1e/3NRwO7TMxrnA4bDp51hMNcjAY1O3trQWzaDR6wlC5etblcmkab46ckZjAWBwOB2MUXbnFZrMxUPU8z+Z7kJSMRiMlEgkbYtnv91UsFq3gEyYVppHjb1ePy9E7yQ6SFZKOUOg4/DCVSun8/Fz5fN6kHev12jrmkBTxe7Cfu93OioMJAiQTAFmlUjG2zgXWaDRqkgtYmkAgYIkR7GoikbCkx7Vb6Viv4c4HIdHjHvFJgI92uqvVytquEjhJ1ggwHKkDXuv12nTzMJkAred56vV6xlBtt1urO4hEIsrn85JkGnBYTJhW1oyuS0yvJ8ARe5jzQic0l2WTZHUvSAmi0aglkrScXiwWlnTBzFF4L50OakQCxTO5sgLkTbB6JK4EbQroAUDiIUAO40e8YQ3wYYAIWye28D3sMTpy6gj4LiQ7rLuQ3MdaAAAgAElEQVQbo3lOEibWDruSZCylf/3hl4+RPkb6GOljpI+RHwdGui9lP7x+FkkhD4pBsdE8NIwdgOMypSyQ9N/fGAkifDYPRVDgD59JgAIE9vu9stmsnjx5ovF4bMwUOmeMmA3mHkkqAEV0nhxh73Y7K3jEgbgPjsNjsZgdVx8Ox5a7MDObzUbD4VBnZ2d2nIoswNXJugwFBsDxOKwULM5XX32lSCSicrmsZrNpMg7acB4Ox0JFgjvPyPfhgASvZDJphc7ZbFa9Xk+bzcaOWvldagFgTWCU6vW63QcMR7VaNZsgIKFXxwFZAyQAu91OjUbD1kiSHSlns1n1+31VKhVjMvh8gjxsSCQSMVkDaxuLHYdPoi9Ht42DvX79Wp7n2bA9WCc+EwYGBhY75TMIYrBWMG3D4VC1Wk29Xs+Yle32OOgSZo6EA3sheJBcUNwpyTTrADOyENi1ZrNpdowfwTrDDAcCAWNmuXcYTu6dQlY38aNoGH8G7AD9Wq1msiG3sBhZiyTTdlNbAgO53W6tpe1qtVK5XLbAmE6nzT5ZF+QIAJabvAJQgAXsPnUOsISwXdgTdSW73c5kELCX0pHRYnZJr9czoMC2WacfAjj7AYCNx2M7hXBPQ5LJpK0VdgVokCCy9/wbBemuvAyfp/B/vz/q74mVbvLtStI4FUG68z72zr/+58vHSB8jfYz0MZL98jHyjxcj33f9LHO4MEAWAmNzgcX9edgaSScPgENKMmaERWMD3DdLPo9jyP1+r36/r4uLC0Wjx0FqGBDtPjeb4zyFTCajyWRi3VLYLDSevOW7nZ/4b4CDDccIABJ6+u92O2UyGWOJkEHk83mbGs90dHfmAwGdz6bbEIDiskwwaJvNRs1mU8+ePbPOTJLMoAKBgHK5nAGL53k2rA1WE2ANBoMqFArWmhbw5bv4GRgNSSe1CAwJBNhhOw6HgwE6LALP4X4GYIO0gC5UABza2c1mY9KQq6srjUYj049TyLvf723/YUSRK/Az2CTyAoIyLKbneSqVSlZTEY1G7f45fmZ93WeipmEymVjyIskCmfvdBC7POxZDj0Yj04kXCgXd3d1JkgVRSdbBiEDgBv58Pm+BlWJwWGBA1fM89ft98zHuCRaRPabDD3UFrCOBkbk4JDlcvV7PNPkEfr6bfQkEAhoOh8ZYAjYu8xyNRlWtVq2FbqVSsUGdrB8yKbdYnqSOPSIJcBl1/BoJBLUK+AS2ht/w7OjDsetoNGrPRj0NCZt7eoGdo20H2NziYRhL/I2A7sZMmEvYWOInrHQgELDYwz3wzMRWPou/I1l3mUB+BukKNuxff/jlY6SPkZKPkT5G+hj5MWDk+0jJn+WFS3rUnrt/5/63eyMuM4MTACq8yaKf5WEJ4K7cgs1dLpcqlUrqdDpqNBoqlUrGyHBMfjgc1Ov1jIFbrVZqNpva7XbGxABUFDjHYjHbbIwSo6FAlLdz97kk2fH34XCw41xa58J08cx3d3fGpMDW4VQuo4gchM8MhUKq1Wq2Xv1+X3/yJ3+iSCRibMVkMtHhcDDmhhaj+/2xRarbAYjfIyAkEgnV63WNRiPrYoTMg/3E0FxWAAckiBwOB5vzstlsTD+8Wh07QBGocESAHRBCYkHrXIAX+0DL7B71TyYTSxzcRMWVdhC0KORlFgMSkO12a7UI3W7XAJdZEewDdklw5DN5Nob4EeAJSG79BQkTOnwSkvF4rFwup1QqZbYPSBCwOF6HCV+tHlscDwYDrddrm7nC52ezWYVCIds/umMhd4B5xvZ4VnwEIMnlcha8kDrN53MVCgVjdxOJhP07rJgb0Fhv9u5weGwEcDgcB6fe399rs9koFovZOlMQTWyggxkgQUvqUCikYrF4ot2nO5erNwdwJRnjS4JDnYZbwIzdMZcEe4Y1JTEFJHnW7XZr7GkoFDI5GPvjSo/wI3yYZ3DjL58Jg4f0iVjF2rtgz/fyu+7piisBkWTJuhvT/esPv3yM9DHSx0gfI32M/Dgw8n3Xz0JX8sUYOn/Hzblvh7x5AiSwVPw7hXwAkau9xQklGbvC98DGjEYjvXr1SqlUyhydQmKKA0OhkAUbOjOlUil7Q5d0coSN4RMEc7mcJpOJGo2GySY4ToUdwBnpXBQIBDQajazL036/19OnT7Xf79Xr9bRer60dJRsMW+eCAUfwFN1SsMqwvFQqpZubG+ue9ObNG7t/2CYABe2+WzgbDAZtcjfgSGErTAoaf6ak49wwKAQX9orj60KhYAABk8lROUHB1XgTPEKhkOmOATXXztDVl8tluw/pkeXADmEgIpGItW6FYaEmAJbP8zwLgi5gwZi5Nk/iARjw7DC4i8XCagoAdMD3h8HlcDjOU0kkEqpUKlZkTeEugWcymZjeWjpKOLBZ/ANwpBsRiRfAtN0eC6Lv7++tmBq5AgwSbCqMVzqdNp/E1lkTgI0kolqtmt7aZYIkWbLEHsBOA1gE+XQ6rUQiYbIBJDDo0rEN1hCWPxAIWOF5JBIxQIdhg+kjEeB5CdhcLlPmJkMULWOHdPmiTbZbDA4TGgqFTGokycAcv+SzXP074O0mrcQkAA0bBVT5HhdIeEZsGd92WUViC98B4HAP/K5/ffjlY6SPkT5G+hjpY+QfP0b+r55w8SAwIe4NuW+l7sXbKAYtyQpc2SAcWZItBP/O5xFkcc5oNGoD45rNpi4uLjQcDhUIBIzx4fiQ76SrDMzT4XCcLO55nnU7oktROHycO9JsNnV3d3cyRI833cPhYB2NCCZsANraYDBoBbmVSkWtVkvD4dAYA1iJwWBgTuWyVi5AT6dTa4lKofR+v7chgchF2A++H0kAQA5zxhE538F8Ba5kMqlisajBYGDHwgRMggaBhjXB8VOplPr9voEI7FYgELDAFwqFjDFDsgLTSOIQi8UM8DKZjLVinU6nJmvA4WC0sBGCpQv82ByFv6yjew+wawAZzgsLyXH2fr+3IMbsjNFopIuLCw0Gg/+/vS8Pk6o6039v7XtVV1XvNN00WxqMTKBZFW0WERTBUdGZmMw8M0GcRY2OCxJjBqNxJo6aZBLzB3mS0SQ6Y3xUxolKVJbEDcUAAiJgoBt6X2rp6qqu7lp/f/Tv/bjdQgPSRIHzPk8/SnfVveeee873nvOeb0EsFkNxcTH6+49mEOMYo2qZy+WELHhkTrU4FAqhuLhYsnzps05R7bFYLOjq6pIAc16T/aRXq1lng5mIaLSTyaTMLf11SfocQww6djgcMBqNogLS752gKsRxZzAYRFnl3Ovr6xPVjgsCji23243W1lZ4PB4AkOtwbnNucV7Q3QaAjGESP8meBJHP52WBwLHBRShtD0lS72rBtnOOer1eGRt0MQmHw4OMMf9LVyMuQLjIADAoaJh2Ue8awXFLktOrxbQ3tB/6e5J0Oc/0C3zOF71t1rul6Z9B4dSgOFJxpOJIxZGKI88PjhwOI1r4mB3JB+Wgo+pAI8UXwt0hXRS4U6R/rN5o8iiTHaUnFKfTKYpWdXU18vk8YrEYcrmcHBvTDz0SiYifNo0RDQ3Bf/NFJZNJ+X1jYyMOHDggR9h8eXSloP8vfY6NRiO6u7vlCJWBlX19fWhqakJHRwfa2tpkcNFVgxOZxovHouwTqlEMuqRC1dPTgylTpiCVSqG1tVWUOioAHHgckDx2526d1+U7CofDotoAEGPS1tY2aMJz4cDPABDF0Gg0SgAr3zX7x+/3y/31R8w0iFRsqU7GYjG4XC5RhmgYaDj4Of2k5/EzXQYYZ0AVzGAwiM86j8OpxtHg8D3kcjlRYemSozcqHo8HXq9Xsv8Eg0GZE4WFheju7hbiYKYfEjKNDJUhvl+mg6ViXFhYKCppPp+XY3e6zZBY+U5J7FwcOJ1Oqd1B5VHTNMRiMSSTSXGpcLlcopqSdEmUzPAEQPqexEh1lM/A8Ua1k6RrMBgQjUbFRpAEuJijCwRJkIvPRCIhxVH1BEmFFIAQAReAzc3NMkdpeGlv+vr6UFZWBr/fj+7ubhnXVFip8pE0+Qxut1vaT/tA9wwSBdVEqpRUhvVByLSFJFT9s3IhQyLjHKMrCV2s+Nz5fF4URPYLbSPVVc5PvXLHucc2c8yynRxrw6l3CseH4kjFkYojFUcqjjz3OXJYHjhJvhgW2WxWJicfWO96om8wj+uofpAc6LvNyczr8BhZryjpXRj4wKzoHgwGpZiZ0+mUnT2PLbPZrKgevDc7kgOaPshUPGw2GyoqKhCJRLB//374fD45mqaCw6NoAOKrygBRGhn2VTKZRGtrK8aOHYuOjg60traisLBQ+ouGWR+ozuNoPVHRf7miokIGeSKRgMfjQVNTkyyQUqmU9InX6xVSofHis1OtotHXp+9lulWz2YyGhgZ0dnYODKD/ryxQVdArCkajUe6XTqelzgfdIKg8sWI77wtAXE5o+Elw0WgU3d3dGDt2rAxuk8kkahmJTV+7hYuQoUbV7XZLlXKqNSRfXjsUCiGbzQqpcdzyOtFoFD6fD5lMRgjHbrfLIoaGkQsL+jnTTUc/rjmOSIT9/QN1Qnw+36DnonJK/3kuAKjIMaOS3s2B7gh0U2HRUwZ3k3xcLpf4+WuaJvENzITEe/C90/AARzNBsago/0aXCcaK0N2AY4oLSKrMXBzwndIu+Hw+KfbKucGgcfYjjT8Xo2wrXU8cDoeMfYPBIGREe8V3TOPucDjkmUnoehcGEjmfh+4v9IMnMbBtNOYkWADyPFyY0NhzLnGesl+ohA61hZwjHCMcyxxrVF/1qrpeWdQTHwmfNsvlckn7FU4diiMVRyqOVBypOPLc58jhcNobLj4wd/g0SnrwwfkCuNvl72iI+CL4wPpjPP2OlrtnAOIrTVWCfrnBYBBOpxOffPIJTCaTGBfuiPVH+wCEvDRNk0nMCV1RUYFkMonDhw9L7RAeTxsMBhlQrGXBIFiXy4VQKASDwSCBkjyKTyaT8Pv9iMVisFgsCAQC4iPPyZ9IJAZNMh7du91u9Pb2CqHQHaG1tVUMQUtLCyKRCILBoBhKAFKHhHUhOGk4kO12uxgYHqfSsDB4WNM0uY6e4D0ej7gUaJom2aVaW1vR19cnPuEMjOUEZB0Lt9stim5PT4+MHQZcdnZ2wm63IxAIIBaLIZ/PS0pYAJLmkwaZz5PP50XpoxHSK6I0CvpjZgDiomA2m6UODd83JyoNKI1ELBaD3++XSvMcy8yuRFJgnAIJiOou4xTYPwxC5nixWCw4cOAAxo0bJy4jzMhENYsBv1x8JJNJ6U8e6zc2NiIQCIhhaW9vRz6fFxcYvlsaKCpNdGdgTEMsFpNMP5lMRvzXA4EAQqGQjFWqYtlsVt4Jr693xaAy6Ha70dbWJv7lHo8HHR0dQiB6YmPbuBClneCxfygUQlFRkfQTyYnzn0H/vD9VZ96LixCOf96vra1NVEOr1Yru7m4hb45dvSJNm0G/dMaE0J2FpAoMqNBc3OkXMHT74L8Z2Ez7y0U9+7O3t1fayMUClWX60JOIqXZyEaI/PaGNVi6Fpw7FkYojFUcqjlQcqTjytDdc3OFxR8oJo5/Q/Lve+Oh3iHzxnOA8YuVxN9UwAPLAdMEwGAxipMxmM3p6elBQUAC/34/6+nr09vaioKBAiIJtZGfrXQf0Rom/pzLT1NSEgoICIUQOAP33jEYjiouLkU6n0d7eLultTaaBWgwGg0GKMjLIs6OjAyaTSQKYacj5O/rP610a+FKpsOmLJFKpbGpqkkHvcDhQXFwMl8sFn8+HlpYW9Pb2Shpbs3kgeJUB1DxizWaz4uaQTqeFJBnUSJWRAaZUOuh3rGmaGHwe+8bjcSGMTGaghkRhYSG6uroGqRs88o1EIjAajXA4HLBareIKYbPZ5JifRi4ajYrCQYNCA5ZOp+UInePRYDBINi19ACdwNJUzx47dbhcS4JF7JBIRw0GDQlcD/UQ0GAxieGlYaAD6+/vlexyHDFYdNWqUkC37hsGsVqsVFRUVQo5sH40E3S7ommOxWCS4mAseBndT4aWiSdXJZrNJNh+qhTQysVhsUGCspmlob28Xu2CxWERJ58KDhioWi4lLEhcKmUwGHo9HFlFFRUUynvQuHHRN0rtk6RVQBgsz3qOwsBAmk0nqo0Sj0UH2IJcbCJi2Wq0yBhhrwPEQjUZlUUPbQdvG0wAWGtW7YGQymUGKMk8DeCJA0uI74++6u7tFsafdZP9TpeV/AcgY4CkIlUYSJV0w2J96dxQ+D+cF3xPtOgC5Hm2DwqlBcaTiSMWRiiMVR54fHHlGN1xsAJUMviA+qN71QX/MSqNFBYrgRLRYLIN8JUk49IXn76kAsDp9Op3GqFGjYDab0djYiJKSEgns1bSBLEhUongdTjwqhlQHGbBbX18v6TR5L96bCl4mk0E4HIbX6xXDGYvFUFVVJek/eUydSqUwYcIEuN1u7N+/X8iSGXUYUDyUNKl0RKNR2dlTOQkGg0IoLAjo9XolkJZparu7u9Hb2ysGjeoUFTqqAHTz4N+prFHZpNLAI3VOQL0LCY+m9eREtYnuAVTq+DxcHLB9BoNB/PtJIlyIZDIZhEIhIWVOKqbyBSBB1CQWqnR023A6ndJOEgMAUUdpWPUTyW63o7m5GfF4HKWlpWLoNE1DIBAAMBD8HYlEEAqF5Djf7/cLCdlsNlEgOf71xOR0OmUBwraYzWYkEglMmDBhkHuJ0WgUZYqGls9rsQzUV6GKTLUbGDCUDEoOBoPwer3i9x4IBMT3uru7W+aK0WiUtNFMCU1yKCoqEhJqamqSxQndk+gOQuLt6uqShSNVKhI8a5JQeY7FYmJYaRtSqZSkV6bhdjqd6O3tlRgUALKgiEQiovTT955KVzablVgNzmmSQjKZlLTLVOFogDluGXfA0wuq6nw+2jaORS7MuAgjUVBlBiDvlYocYyBo3GmnNE2T/rVYLGLvuBD1eDzSR1TsueAn0VCp5+9pb0lw9FNX+GxQHKk4UnGk4kjFkec3R45IlkKSh16B07s0ABikzPHIX08y7FgOQKoCdEMgWbGTObBoQGlo9LUAGPTLoz4aNk7CoW4IdKnIZgcqx5OgPB6PqE5Mq6k/tqQaRUPlcDgQDAZlx+xwOFBZWYk//elPGDVqFPx+Pw4fPiwBegw0Zj8AEAPV29srLhp2ux1er1d8z3O5HAoLC2WX7/F4hBRpWPk8HR0dSKfTCAaDYlCMRqOof5xkNLJUaegy0NPTA4tloGBlOByW90bw/XCipVIpdHd3izGju4beDYZqC5+XihxJju43VF8YaMp3ro9dKCgokMnS29srxQqdTuegMeBwONDb2yspSvnOeA8eiVMl4335vHRHSaVSsmgh+erHYSqVQkFBgag1+v6Ox+ODAr3j8bgULNQf9bN+Bw0cYx5I/CQwqrterxeRSETefTKZRDqdlud3OBwyJrhIocEKh8O44IILZN75/X40NDTA6XQiGAxKxjBgwChSAeL/02BzrhcXF6OxsVHcJdi3LM5JdxwuMkh+drsd4XAYyWRSshgZDAYUFRWhsbFR7s9FD79PhZiuWVTrgsGgKOL0x2cK6lwuh0AgAKPRKOoxF4305+d4Zh9T7aNNonFPJpPo7u6Wtgx9No4huopxgUJ/c7pR6W0ixxkXX3QzIYnRVvKdklwJuntwvtHW6BcvfGd03+CYpLIHDCxQONbYfoWTh+JIxZGA4kjFkYojzweOHA4j4lKody/QH6txR07Dyx0xlRdOED4MFRsaaE4YHqFzx0wlg2TA66dSKbS3t8tOk5PRaDQiGAwiEomIsaf6QP/bdDotaT7p0kB3Abox0GByR03FymKxCPlw4ND3nMfyXq8XNttANfqysjLs2bMH/f39QlAkX7vdLll7mC7TaBwIrK2pqUFLS4sMFE3T0NHRIZXk6YLBCUiCDIVC0DQNZWVlsNvt6OzsFEPL98VjXY/HIxmG6CfNNhgMBpnoJCmm/dVnaGEQNtUiGlAGVfN90ZDS4Hd1dQGAZEPipKKqSENCdxIuHpgeVq+E8dn4fb5f+hdzIcL36vF4kMlkEIvF0NvbKy4hHIfA0QJ68Xgcfr9fxg0NFccqDRXHDwBEIhEcPHgQNptN3CSKiopkbNBtgUaEKgvdKJgxSdMGAmrpC81Ad5fLhVgsJu5CenVb34clJSWw2+1obGwUoxoMBsUlIRAISIYnujlUVlYiFotJtXsaGbaXRpdjNhKJYMKECaLCMZ0uXW4YKM96LnQXiUQi8Pv9iEajsFqtqKyslPkFQPqZ/tXRaHSQ+4JeaaU96OjoQElJyaAFLg0v7U5LS4sofVxIcNFDNxW2l5sOqnNcgJEouajgvKebjtlslkxOvAZPJTif9H7oNPK0DZnM0QKYdMPhe6XdpfsSg6+pPAKQuAP2D+0e3SL0xMa/sb0Gg0HmOduhcPJQHKk4UnGk4kjFkecHRw6HETnhYnYhNo43pdHm7pHKAI9zSTS8DicnH8BoNMrgNRgMg47K6UphNpsxevRoCTzlzhsASkpKZGfN63PHSgNhNpulo/jy6eOZzWYRDAaFmEgwVAb5vHwJJCgaX6oW/C4H2JEjR6RYXXl5OUKhEIxGI8aOHYu+vj50dXXJIOJEYkYf+hXTb17TNBw+fBgVFRUoKiqSjDWBQAB9fX2iYjGYlEoGB2EuN5BFKB6PS2V1/aQkOBipitBXl8addU7oYsGJz75n9iUOdA7Wjo4OUf1cLpcQC5UcTiAOcC466OPP/u/s7ERRURFCoZAojvQfZ0FEZvyhomgymSRTEOMYGHzKdtOw0V2Dbi9UQTneOeY5Gfl7TdNkQo8ePVrcVeifzkxUVAtzuYE0zSS94uJiZLNZUQG7u7sRCASEROjTTb9uGjoSWTabFZXzyJEjyGQyaGxsFII1mUwIBoM4fPiwZIzinKioqJCUx2VlZTCZTJIRqr29XRaLnB/hcFjGfyKRgNvtFneRZDIJr9c7KIaFBp5qI6/J7Gh0iXA6nejq6oKmaRJA73K5UFRUJP77JMN0Oi1jk+p5S0sLSkpKpF6LnoB4QkCbwZgQjmO+Wy6m0uk02traxKgXFBSI7UulBjJ9MZscA/ypkNPYk9D47CQHqo/0+6d7Be8NQEhP71bFBT2VX9pUxhdwAaJPU0yCyuePZnwigZFMLRaLjGt+l/dXOHkojlQcqThScaTiyPODI9mOY2FETrh49Ks/LmWwLTCwy+dRInfFVHroG0rC0TRNdo36z/C6BoNBBqR+EUBDyHSb7Kyuri7U19fLYO7q6hrkD55MJhGJRGRw2Gw2MRj5fB6RSATjxo1DPp9HS0uLqDrcdfMaDQ0NKCwsRE1NDbLZrBST5GSiUkjyjcfj6OzshNVqRUFBgbShublZVD/6clPJiEaj0jaXyyXH/u3t7YjH4ygqKkJbW5v4vtKAZzIZ9PT0oKKiQgYYYwTcbjd6enqQSCSQTCbR3Nw8iOzpegFACB2ABIpS/SEpsR/j8bgMeA5AxgtQMaTbhc/nkwnHoFS61uTzA/ViUqmUZL4Kh8NSG8ZisYgi2N3dDa/XK8+r973leLJaB+phsF1UuDRNkzS+Ho8H+Xx+UBB3QUEBwuEwUqmUkAoXFuwfAKIS8nibvs0AxDBTvWTwKlVFs9ksCwCOb2BAUWJhwmAwKK4iudxAgCn7mnOOClkymURXVxeKi4uF0OhWwcVER0fHoMruDKqne47RaERHRwc8Ho/UwgkGgwAg/twMZqU6ZjAYpO+oMPL5qbjq3XLi8bgc5VN9DIVCiMViKCkpgdPplAKlBQUFEiuhL1jJRUIymURLSws0TZOFk9VqFdWbBp8+5Vw00deebdGrdsBRlwH9uLbb7UgkEhKQ7/P5RN3ngognDVRXqTzSzYexByRYo3EgroUnBZlMRjKWcSFHO0gFkmOSsQyMA+CCQh9IThvNGBO6b+hdOfRjWp9KW/95hZOH4kjFkYojFUcqjjw/OFLvRjwUmt6H/LOgsrIyv2bNmkFHgPzhYOKDUYU71rGbnmj0fuP016SSxPZyR8odMR+aHctBRMPKbD083qSySEWRR+E0+vRHNZkGCg9y8FosFvG/5n17e3sRjUbhcrng9/sHGeBMZiANqNlsHhQY2dPTI6RrNBql/kFvb68YP6o5LpcLpaWlSCaTUpySWXEMhoEsTDabDaNHj0ZLSwvGjx+PtrY2IVoOGr/fD7vdjmg0KsqL1WoV1waqayRs/QIhm82KQql/J/pgYRpF4KiSy/dts9kkswyJgP/Wq6A0SLwfB3A+n5caFfn80exOnKRG40BtBbaf7zOXO1q/Q99n/KHxo2uO3i+eR+fM6sTJzTHHY2q2h+OFSrXRaBT/bR6Ns5/1ag2JjosvLjqo8HDik4hk8mpH/Y6pxgGQNK10MWFQJ/tB78rE5+eijsG5XCQxuJSuDel0Woy2/jm4eKOSpo9P4Xii8aPaT7cQ+j739vaKewjnH0mTY4WuNOl0GrFYTJ6P96HqBhwNsCah0aecizW+T44dziW2k4sMuhNRMeY1+NxUH9n3emWci1IuYuhzPtRXne+C9lLvMkE1Ta/0cbzy32yvfm5omiYxNjwpoU3WtyGdTsuCj/dlnAvHC/+Wy+XwjW9844/5fL72ZPhBQXGk4kjFkYojFUeeLxy5du1aNDQ0HDOY67Q3XLW1tflt27YdvaB21Jf3VKBX9BQUFBQUvpgwGAxqw3UKUBypoKCgcH5g+vTp+OCDD4654Tptl0IqLVQxuAvUqxp66P9NBYi7T72CNxI4VYLiZ/XPoW/zsZ5n6HdHqu36a57oukPbdTYQ8/HaOPQ5h35mJPv3z4HhxsypXAM48RjQQ9+/n/X+Q8ff8eb0Z4X+WiczFo7XvuONmZNp58nOkxMtks/E/B/p6+uVvRNdayTf8/kOxZGDv6s48uSgOPLUrgEojjxe+xRHnvw1Tpcjh+uvEUmaQforhpQAABkySURBVMMbiUSwa9cu5PMDmYSqq6vh8/nQ2dmJ+vp6lJeXo6KiYlAcQDgcxscffwwAGDduHEpLS4/7kCdjJId+5lhkdrxr0F0AOHrseaxrHOv7PHo/FZxJw3g2GN2TaePZ8BzDYSTa/1n76XTvPdSA6slEf20e03MhONyCS/8zNFZF/zn9onK4+TfUMOrbeqy2HMtI0i1C/3Oy4P30ftv6hejQPhvu2voAfP0z6f92sm3kQp2fZTv019XbboUzB8WRA1AceepQHDly11AcqThyaLvOBEcOd98RKXzMRn788cf42te+BgDwer2YNWsWVq1ahX379uHf/u3fsHLlStx8882SjrS7uxvf/e538fLLL8NoNKKmpgaPP/44xo8fP8gf/VgDQv+C9INz6GAFBr+IoW3Wo6urC5s3b4bVasW8efOk7sOxnvVYbdD/fqiKqYf+O2e7sVQ4t8GxyyDrZDIJl8slWY6y2YEq8qzzQl/x482bdDotgby8TiZztMghv2e1WiWm5FjIZo9Wqrfb7YN8+xmDwsxnvAb9xZlNi+2JRCISKK0Pjj6ZvtG0AV//rq4uZLNZSV/MNuqDw/VFQ4cuePULWf6NwcTZbFaKkzIOZzhVmzZHH0/T398v2ZhIIgyoH/p9hZGH4kjFkQrnJhRHDt83iiOPYkQ2XEQ2m0UoFMLUqVMxc+ZMPP/888jn8/jSl74kGXa4qzQYDHj++efxzDPP4LrrrsO4ceOwZs0a1NTU4JFHHpEsPqFQCGazGUVFRZK1hLUUfD4f3G43Ojs7JT2owWBAc3MzNE1DeXk5+vv70dbWhnw+j+LiYkkn2dnZKVXr8/mBYMwdO3bg0UcflcDeqVOnSu0HPfL5gcxMDOItLi6WQRUOh6U+g81mQ1dX16Dg20QigWAwKLUnFKEofFGhJ4Bdu3bhlVdeQVNTEyZOnIjrr78excXFePfdd/Hyyy8jkUjgy1/+Mq6//noUFBQMGte8Tn9/P3bs2IFXX30VHR0dmDJlCpYtW4ZIJILXX39dMjWlUinU1tZiyZIl0hb9gi2bzWL//v3YvHmz3HfOnDnQNA2bNm3CW2+9hd7eXixYsABLliyRFMgdHR14/fXXsWzZMhQWFiKZTGLTpk149dVXYTAYMGPGDCxfvlzIYLh5ybakUils2bIF69evRzabxZQpU/DVr34VdrsdH330Ef7whz8glUrhK1/5CmbNmiXXHopwOIzXXnttUErwCy64ABdeeCE++OADvPnmmzAYDKitrcXMmTM/lZ59qCtEe3s7nnzySUyfPh3z5s3Dm2++iXfffVeKmFqtVlx66aWYNm2askF/RiiOVBypcO5AcaTiyFPBiG64mNVm3LhxuP322xGJRFBfX49gMCi1Rvi5/v5+vP766zAajVi9ejWKiorw3HPP4cCBA8jlcmhtbcUvf/lL7NmzBy6XC0uWLMGiRYuwb98+/O///i8aGhpwwQUXYMWKFdi4cSN2796NO++8E1arFQ8//DDGjRuHm266CS+88ALeeustZDIZXHzxxbj22mvR0dGBxx9/HIWFhYjH4yguLkZhYSFee+01HD58GFarFY8++ijuvvtuzJ49WzKeEAcPHsQvf/lLfPLJJygtLcXVV1+N6dOnY8eOHXj++efR1dWFWbNmYerUqdiwYQMOHz4MTdNQUFCA1tZWXHbZZbjxxhsHFQxUUPgiIp/Po6GhAT/5yU9w6NAhjBo1Cj//+c/h8Xgwf/583H///bBYLKipqcG3v/1tJJNJ3HHHHRJrQuRyORw8eBA/+MEP0NLSgrKyMvziF7+Aw+FAdXU1WltbkUgk0NzcjA0bNuD222/HkiVLJH5Fr463trbi4Ycfxu7du1FUVIT/+7//wx133AGj0YhHH30UpaWliMfj+P73v4+qqipMmDABmzdvxquvvopXXnkF8+bNQzAYxM6dO7FmzRqUlpYiGAzihz/8IXw+H6688soTGlgq9Hv27MHtt9+OsrIyjBkzBv/+7/8Oj8eDWbNm4cEHH8Thw4fhdruxYcMGrFmzBvPnz0dvby/279+PYDCI8vJymM1mHDlyBN/61rcwbdo0FBUVSQ0Zm82GNWvWSJrr1157DQ888ABmzZol9rW4uBilpaWS3SuXy2HdunV46KGHcOedd2LBggWIRqNobm6GyWTC3r17UV9fj7KyMiEThT8PFEcqjlQ4t6A48vj9ojhyMEZ0w8VjukwmA4/HgwkTJmD37t3o6ur6lN9oIpFAU1OTFFoEgNtuuw1erxfpdBpPPPEEfvazn6Gurg7bt2+XQojPPfcc3nvvPVRUVOBXv/qVHEW++OKLmDt3Lux2O55++mn88Ic/xBtvvIF7770XXq8XALBx40aUl5cjGAziN7/5DcxmMyZNmoS6ujoYDANV50mI+mNWvV9nf38/HnnkEfz2t79FXV0d3n77bTQ2NuKuu+7Cj370I+zduxdVVVX4r//6L+zZswcffPABuru7paBgOp3G7t27sWTJEhQXF49k9ysojBj083X37t3Yt28fVq5ciauvvhr/+I//iN///veoqqrCoUOH8NBDD+GrX/0qNm/ejF//+te44447PmWIqQDu378fd955Jy6++GKsXr0a7733HhYsWIAHH3wQZrMZP//5z/Huu+9i/vz5x/XD7ujowPbt2/G1r30NM2bMwL/+67/iD3/4A7LZLKxWK9asWYNcLoe/+Zu/wdtvv42qqio0NjZi586dUoQ0k8lg/fr1SCQS+NGPfoRcLodVq1bhjTfewKJFi2A2m4clFJ5APPfcc2hvb8ezzz6LiooKfPTRR1i/fj3Kyspw4MABrFq1CqNHj8ZDDz2E7du345JLLkFnZyd+8YtfYM6cObjmmmvEJcJgMGD58uXw+/0IBAKoqanB1q1bYbfbce+99yKTyeDee+/F1q1bMXv2bDQ0NODJJ5/EZZddhssvv1xS2r7zzjt48sknB6Vg/su//Etce+216O3txdq1a5FMJjFt2jQAyp3wzwnFkYojFc4NKI5UHHmqOCMR0/o89Xqy0Af56StD099yy5YtePfdd9HX14dnnnkGU6ZMwWOPPYa77roLEyZMwJ49e7Bz504sWrQI3/3ud1FdXY0NGzZg/PjxGD16NF566SX8z//8DwoLC7F06VL8/ve/RzgcRllZmbg07NixAwaDAU6nEwsXLsQjjzyCm266Cddffz3+6Z/+CWPGjMHEiRPxne98B7W1tYMUNpLghg0bcNFFF+Gxxx7DP/zDP6C0tBTvv/8+3n77bSxYsADf+973MHbsWOzevRuJRAKLFy/G5MmTMXHiRFRUVKCxsRHxeFwtdBS+8Mjn81JksrS0FD6fD5MmTUJ9fT3S6TRGjx6Nd955B8888wySySQWL16MdDqN/v7+QT/JZFKKmFZVVaGoqAhVVVVoaGhAV1eX1OrYsGEDysvL8ZWvfEXqceivk0qlUFJSgrvvvhtLly6VehwOhwPhcBjl5eUoKChAdXU1ysrKsG3bNpjNZqxcuRJz586VehpUE/lZr9eLmpoaHDlyBKFQSJ79RNi/fz8CgQCqqqpgtVoxZ84cHDp0CFarFatXr8bChQulbo3P5xMVf8mSJfjyl78sfvKHDh1CPB7Hli1b8Oyzz2LdunUIhUK48MILcd9996GyshI7d+6E2+3GqFGjAAAlJSVYvHgxJk6cKCcMkUgE//Ef/4GioiI4HA6pn8TaIqFQCG+++SYmTZqEyspKAGrD9XlAcaTiSIVzA4ojh4fiyKMY0RMuYIAw+vr6sGPHDmzduhV+vx8lJSVSMK2lpQUWiwU2mw3jx4/H3r178cknn8Dj8eCpp57CFVdcAaPRKGTECtChUAjl5eVydJrJZKSIXFFREWpra/Hiiy+ip6cH1157Lfx+vxR3mz9/PsxmM5588klEIhHk8wMFAufMmYPp06dLZzudTqlw3t/fj6amJhQXF+PAgQPwer0YP368PCcnTDgcRmNjoxT4Y+Vt7vwNBoMUUzQYBqpcUyFUUPiiY6iCZjKZUFJSgnA4LIZx586d6OnpQSwWw+zZs/H8889j165dsNlsoujb7Xa0tbVJsUaHwwG/34/u7m4kEgkYjUY0NjZi165dWLp0KSwWC3784x8jHo8PytjmdDpxww034Otf/zr27t2LX/3qVwgGg5g3bx6efvppyTrk9Xrh9/vR1NSEXC4Hm80mxUOHPhf9tQOBAA4ePIienh6UlJScsF8ADCqoaDAYMGrUKEQiETidTvz1X/81tm/fjmeffRZVVVWYM2cOTCYTfD4frrrqKvkeMFCA8pJLLsGiRYvQ19eHW265BbW1tbjlllsQCATwu9/9DuvXr0dRUREmTZqEfD6P0tJSlJWVyXWy2Sz++7//GwcPHsSDDz6Im2++eVA6cQDYu3cvWltbcfPNN4vNUrbozwvFkYojFc4dKI48fr8AiiP1GPGkGalUCu+//z6am5sRjUaxcuVKGAwGJBIJvPTSS9i9ezeAgeJgV199Nd566y3cf//9sNvtcLlc+Nu//VtYLBbceOONWLduHb75zW8iGo3C6/Vi6tSpqK+vx6uvvoqdO3eiqakJ11xzDUaPHo3LLrsMzz33HJLJJK644goYDAbMnz8fL7zwAt555x3Y7XYkEgnU1tYin89LlWg9SkpKUFVVhffeew/3338/ZsyYgauuugrf/va3UV1djSeeeAJOpxNLlizBiy++iNtuuw1/+tOfUFVVhYULF+Lw4cPYtGkTDhw4gEgkgosuugjbtm1DKpVCX1+fqBishq2gcDZAr2JlMhm0tLSgtLQUH374Ierr6/Gd73wHY8aMQX19PZ566in8/d//PUpKSqSSPDBgLMPhMICjKngoFILf75fg+K1bt6KnpwcLFiyA2WxGMBiE0+kcNE/tdjuMRiP27duH73//+wiFQviXf/kXTJw4UYgEGEi/HQqFUFlZCZPJ9KnnYGICkkpfXx86OzsRCATg8XiknScCr0GiOnLkCAKBAAoKCrBjxw5JbnDbbbdh/Pjx0DRNskcZjUZYrVbkcjmMGTMGd911F2bMmIHW1lZYrVa0traiq6sLPT09Eouzfv167NixA5MmTUImk0EqlRL3rra2Njz11FOw2Wyor69Hf38/tm/fjjfeeAN1dXVIp9PYvHkzHA4H5syZczpDQuEzQnGk4kiFcw+KI48PxZFHMaIbroKCAixevBg2mw1lZWX4i7/4CyxevBg7d+7EvHnzxEWAO82ZM2fi4Ycfxvr16xGNRvHAAw9g4cKFMBqNuOWWW+D1evHhhx9i8uTJWLp0KaZPnw6/34/CwkIcOXIEV111FZYuXYpAIIBZs2bhuuuuQyqVwsUXXwxN03D55Zfjsccew8aNG5FOp3Hvvfdi0aJFiEaj+PrXv47JkycP2sUHg0HcfPPNCAQCaG9vR01NDdxuNyZPnoyKigrk83lYLBasXr0aFRUV2LNnDy6//HKsWLEC06ZNQyAQwEsvvYQjR45gxYoVmDFjBqqqqjB27FiUlJQgk8kgHo+jq6vruJlYFBS+SNA0DWVlZTCZTGhra0M8HsfHH3+MUaNGIZFIIBKJYMyYMZgxYwasVit27dqFefPmYe7cuYOuk8vl8MYbb2DTpk04dOgQRo8ejSNHjqCiogJ+vx/JZBIbN25EcXExJk2aBJvNhhtuuOGYqV1jsRieeOIJ9PT04Fvf+hZmzpyJXC6HiooKvP/++wiFQgiFQmhtbcWyZctgNptFSaf6bzQaMW3aNGzfvh2HDh1CNpvFxx9/jBkzZiAQCJx0/9TW1mLjxo1oaGgQ15HKykpkMhk88cQTMBqNuPvuuzF16lQhtebmZvznf/4npk+fjuXLl8NsNuPHP/6x2ERmcbPb7Vi3bh3279+Pxx9/HPPmzcOLL76IhoYGaJqGjz76CE8//TTq6uqwcOFC5HI51NTUoK+vD9u2bUM+n0dTUxMOHTqEhQsXIhwOY8uWLaipqVHuhJ8TFEcqjlQ4t6A4cngojjyKEdlwsUETJkzAD37wAxiNRlHj7HY7Zs+ejYkTJyKbzUpaRofDgWAwiGXLlmHGjBlIp9MoLi4WI1teXo5//ud/lrSyfr8fRqMRU6ZMQVVVFZLJpOTz1zQNfr8fq1evBgAEAgFomga3243rrrsOdXV1yGaz8Pv9cDqdcDgc+OY3v/mpegIGgwEzZ87EuHHjkEqlEAgEYLFYcM8990iAsKZpqK6uxq233opoNAqbzYZgMAij0YipU6di7Nix6O3thdfrhd1uR2VlJSwWi/iJ5nI5ZDIZGbBqwaPwRYQ+feqFF16IyZMnY926dXjllVfw4Ycf4r777sP48ePx7LPP4p577sHkyZPx4Ycf4oYbboDNZhO/cSKfz2PKlCmYPHkyfvrTn+KFF15Ac3MzrrzySgQCAezbtw9//OMfMXfuXBQWFoqN0JMJ586mTZuwfv16TJw4Ea+//jp+97vf4aKLLkJdXR3eeecdrF27FrFYDHa7XRaW+XweyWQSiUQCwICbw/Lly/Gb3/wG3/jGN1BaWoq+vj4sWrRI4mWGcyPgvF2xYgV+/etf46abbsKECRPQ2NiIW2+9FVu2bMFrr72GKVOm4Le//S1efvll1NXVYe7cubBYLKiqqkJhYaEQ3JQpU3DffffB7XZj//798Hq9WLBgAT766CM8/vjj8Pl8AIB4PC7+6U6nE2PGjIHf74fBYEBJSQkeeOABpNNpOS25+OKLsWzZMuTzeWzduhVHjhzB3/3d38Futyt3wj8jFEcqjlQ4t6A4UnHkqcK4du3a07rAunXr1q5atQqapsFkMsHr9cLj8cDpdMqu2WKxwOv1wufzyY/b7RY/bY/Hg4KCAvk8f6xWq1yLD81iZG63+1OV7vXF5giTyQSn0wmv1ytpd00mE2w2G8xm86cqf9NP3ePxSPE3h8Mh2ZkIq9UKt9sNp9MpPqA8tmTbeBxKf3yr1Qq73S6F2RSRKHzRoWkanE4nqqqqoGkDdXSWLl2KZcuWYdy4cfjSl76E/v5+9PX14corr8Stt94qxVCH/rhcLlRXV4sv9/Lly3HFFVegoKAA4XAY8XgcK1aswJgxYwbNS/38zOVyqK+vh9FoRElJCVKpFFKpFMrKynDRRRehsrIS8XgcPp8PK1euxKxZs2AymWA0GhGNRuHz+bB48WKxCTU1NYhGo/B4PPirv/orXH755YPs0Ing8/kwZcoUdHZ2wmg04oYbbsBVV12FpqYmmM1mFBYWSjBzVVUVqqur4Xa7UVNTI0HEAFBTU4OysjK0tbWhpKQE99xzD2bPno3KykqYzWbs2bNHYm+uueYaOBwOeDwe1NTUYNSoUbBYLDCZTHC5XLIIfv/993HppZeirq4OACQ+4MYbb0QgEPjMtUUeeOCB1rVr16475S+ep1AcqThS4dyF4sjhcb5x5M9+9jOsWrXqgWOOldPNL19bW5vftm0bgGNnLNGrACeL431nuN8Pvf/JXmNoZ56orafathNdS0Hhiwx9IcVkMolUKgWbzSYV7rPZLBKJhKSbdTgcgxZ4Q6/DIP5MJjPoOplMBolEAg6HY1C1+WPN7WQyib6+vkG/47V4/VwuB5fLJWlgAaC3txd9fX3w+XyD6nH09PSIWqhfoJ5s3+RyOcRiMUm/zar1/f39g9pot9tht9vlO3rCYjpsJgzgYhsAEomEqI4ulwtOp1Ouqb+O3gblcjl0dnbC6XTC6XRC0zSJj3G73aJQfhYbZDAY/pjP52tP+YvnKRRHDv/7E11LQeGLDMWRJ+6b84kjp0+fjg8++OCYXxrRDZeCgsK5B72B0mcW49/0gbj6tNbHuo7+Z+hnhxrGk7kGwWuxPfrf6ZU/+qbra6gMve+pGFi2Y2gfHK+N/NuxwPbp267vF97veC4OJ7MwZpHM04HacJ0aFEcqKJzbUBw5fN+cTxw53IZrxNPCKygonFugkR5qxGi8TtY4HUvVG+76w13nWAb/RO3Rf4/31DRt0Oc/i6I+9BrDtXG4exyrf4f2y/H68GShYrYUFBQURhaKI0/cHsWRasOloKBwEjhVw3imrnOizw3392OR2OliJF2ehiPIkbqegoKCgsLIQ3HkZ2vP6V7rbOJIJXcqKCgoKCgoKCgoKCicIZx2DJemaZ0ADo9McxQUFBQUvuCozOfzhZ93I84WKI5UUFBQOG9wXH487Q2XgoKCgoKCgoKCgoKCwrGhXAoVFBQUFBQUFBQUFBTOENSGS0FBQUFBQUFBQUFB4QxBbbgUFBQUFBQUFBQUFBTOENSGS0FBQUFBQUFBQUFB4QxBbbgUFBQUFBQUFBQUFBTOENSGS0FBQUFBQUFBQUFB4QzhtDdcmqbNGomGfF7QNG2mpmnFn3c7Pgs0TZt1Frf9Ek3TCj7vdnxWaJo2+yzu+0vP8r6fe7b2PXD297/CqeFs5kjFj58fzmaOPJv5ETj7bfTZzJFne98PB1WHS0FBQUFBQUFBQUFB4QxBuRQqKCgoKCgoKCgoKCicIagNl4KCgoKCgoKCgoKCwhmC2nApKCgoKCgoKCgoKCicIagNl4KCgoKCgoKCgoKCwhmC2nApKCgoKCgoKCgoKCicIfw/kWFA/Wu0KJ0AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1wAAAFDCAYAAAAu+g+jAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOx9ebxkV1X1OvXqdaczJyQkBAIhJjIqIjP4McmMCCICn8woyCwKCEKYEUGRWWSIgIg4MAUhIHyCgAhBGQVMMAlJCEkgA+mETne/96rqfn/c2vVWrbfOrXrpF+hh7186r+rec/bZe5+99zp333NvlaZpkJSUlJSUlJSUlJSUlLTx1PtZC5CUlJSUlJSUlJSUlLS3Ul5wJSUlJSUlJSUlJSUlXUOUF1xJSUlJSUlJSUlJSUnXEOUFV1JSUlJSUlJSUlJS0jVEecGVlJSUlJSUlJSUlJR0DVFecCUlJSUlJSUlJSUlJV1DlBdcSUlJSUlJSUlJSUlJ1xDlBVfSbk2llHNLKTtKKdvo3zFXk9djSylf2GD5HltKGY7lurKU8s1Syq/N6HNwKeX1pZTvj/udPf5+xPj8uaWUi0spB1Cf3y2lfJa+N6WUb5VSenTsFaWUd2+kfklJSUlJuweVUn67lPKVMW5cVEr5RCnlVzaA77tLKa/YIBln8hrj11VjPS4opby2lLIwo09V91LKS8Y8H0rt++Njx5FcTSnlttTmhFJK/hht0k+F8oIraU+gBzRNcyD9u/BnIUQppV859aWmaQ4EcCiAtwD4h1LKoRUemwB8GsDNANwHwMEA7gDgMgC3paYLAH5/hkjHAHj43AokJSUlJe2RVEr5QwCvB/BKAEcBuD5avHngz1KuXaBbjHHzLgAeBuDxtYZz6v5jAC+dceH2YwAbcmGZlLReyguupD2OSimHlVI+Vkq5pJRy+fjz9ej8Y0sp3yul/KSUck4p5RGllJsAeCuAO4wrZFvHbTeXUl4zvtv0o1LKW0spW8bn7lpK+UEp5bmllB8CeFeXXE3TjAD8LYADAJxYafZotGDxG03T/E/TNKOmaS5umublTdN8nNr9OYBn1y7cxvRnaAGmdiGYlJSUlLSHUynlEAAvA/DUpmk+1DTNVU3TrDRN89GmaZ4zbrN5vFPiwvG/15dSNo/PBZY9a7x74qJSyuPG554I4BEA/miMjR8dHz+mlPLBMc6eU0p5xvj44WNeDxh/P7CUclYp5dE1Xl3UNM1ZAP4DwC9dXd3H9C8AlgE8smO4vwHwi6WUu8ySKylpoykvuJL2ROqhvfi5AdqLlx0A3gwA4214bwRw36ZpDgJwRwDfaJrmdABPwvhuVNM0cSHzKgA/jzbZnwDgugBeRGMdDeDw8VhP7BJqXFl7HIAVAOdVmt0DwL80TbNtho5fAfBZAM/uaPMhAFcCeOwMXklJSUlJey7dAcB+AD7c0eYFAG6PFstugXbHxEl0/mgAh6DFuN8B8JellMOapnk7gL8D8GdjbHzAeKv6RwF8c9z+VwE8s5Ry76Zpfoz2btQ7SinXBvA6tBj7HsdrlmKllBsD+D8AztoF3QGgAfBCAC8upSxW2mxHe5fsT2bJlZS00ZQXXEl7Ap1SStk6/ndK0zSXNU3zwaZptjdN8xO0yZMrViMANy+lbGma5qKmab7jmJZSCtqLqD9omubHY16vxPQ2vRGAFzdNs9Q0zY6KfLcf3zHbCeA1AB7ZNM3FlbbXAnDRnHq/CMDTSylHVs4HwLxwvFUxKSkpKWnvo2sBuLRpmkFHm0cAeNl4x8QlAF4K4FF0fmV8fmW8m2IbgBtVeN0GwJFN07ysaZrlpmm+B+AdGGNj0zSfAvB+tNvj7wfg966GTl8rpVwF4HS0xcW3VNrNozvGcv0zgEsA/G5Hs7cBuH4p5b7rEzcpadcoL7iS9gR6UNM0h47/PaiUsn8p5W2llPNKKVcC+DyAQ0spC03TXIV2P/iTAFxUSjl1XEFzdCSA/QF8NS7o0G5L4AucS5qm2TlDvtPGd8wOA/DPaKt1KKVcv9DLPsZtLwNwnXmUbprm2wA+BuB5HW0+DuAHuHqAl5SUlJS0+9NlAI6YsX38GEzvrDhvfGzCQy5atgM4sMLrBgCOoULnVgDPR/v8VNDbAdwcwLubprlsTj2Yfnk8/sMA3A7tVnyMX4YRuPkIzKc700lo7/bt5042TbME4OXjf0lJPzXKC66kPZGehbYyd7umaQ4GcOfx8QIATdN8smmae6K9sDkDbWUOaO8IMV2KdjvizeiC7pDxg7yo9KnSeJvgkwE8qpRyy6Zpvs8v+xg3+1cA9y70BsIZ9GIAT0C7raNGL0ALhvvPK2tSUlJS0h5DXwKwBOBBHW0uRHuhFHT98bF5SHHufADnEC4e2jTNQU3T3A+YbJ9/O4D3AHhKKeWEDl71QVv6J7T6vWh87L6Em3+H+XRnnv8P7fbEp3Q0exfal1w9eF5Zk5J2lfKCK2lPpIPQXihtLaUcjvaiBABQSjmqlPLA8QXNEtptE6Px6R8BuF5svxu/5OIdAF433ouOUsp1Syn3vrqCjfe3n4zp58CY/hYtmH2wlHLjUkqvlHKtUsrzSyn3M/zOAvCPAJ7RMeZnAXwbwGOurtxJSUlJSbsnNU1zBVpM+ctSSuzyWCyl3LeU8mfjZn8P4KRSypGl/YmRFwF475xD/AjA8fT9PwH8ZPzCqC2llIVSys1LKbcZn38+2gurx6N9wdN7yurbAZXXPPQqAE8opRytJ+bUXekFAP6oNtj4Tt+LATx3nXImJV1tyguupD2RXg9gC9o7VKeh3QYY1APwh2grez9G+2zXk8fnPgPgOwB+WEq5dHzsuWirYaeNtyf+K+r72tcj3/1KKb+oJ8bbGe6B9s7b/0P70ov/BHAEgC9X+L0M4+0WHXQS2pd7JCUlJSXtZdQ0zV+gxbaT0D6ndD6ApwE4ZdzkFWhftvTfAL4F4GuY/xXofw3gpvSc9BDAr6F9Acc5aLH2ZACHlFJuNZbj0eN2r0Z78fU8x2tO3b6F9tGA51TOz9Jd2/8HWlztor/H/M9TJyXtMpWmyd98S0pKSkpKSkpKSkpKuiYo73AlJSUlJSUlJSUlJSVdQ5QXXElJSUlJSUlJSUlJSdcQ5QVXUlJSUlJSUlJSUlLSNUR5wZWUlJSUlJSUlJSUlHQN0bw/JFelUkr1rRulFACAezGHO1dKmXzv6uvaz3su+CrpuHGM5ah9dny79JglQ1ebq9N2PeT4sr57G/E87c16bjTtqq1m9dfztThbz3hBmhscz664nhXrtXFr1JUPrk4erOUm5TEP70q7S5umObLaIWmKEiMTI/dkSoy8epQYObutjlujPQ0jm6axAs91wVVKuX3TNKfVzi8uLs4lmJ7r9XoopWA0GmE0GqHX660xymg0QtM06Pf7aJqmEzy6xnLgsh5nHY1GUzJzv9FohIWFhcn34NXr9SbHSikYDoeTvgsLCxiNRiilTOmpCVzl5jbcX2UPniGHs1uv17PjOvvo2ErcL2wUtmHeLMvCwsLEXrUgDT5hSwAYDodr9IrPs3SelTiYX/DSftFOZXbyq86qWxc/5cu2WlhYmJpjnUPl78YN2WKO4pzakvvyPCjP8G/2+5rONVJ/Zx/n/sPhcCrm2IbA6vzX8oLz8+jjYpj71xYgvV5v4pu1BK9y8hjqe7W4V3tx365FtOa4mm3Zf9SHmqbBysrKeWsG2IdpFj4CiZGJkYmRNXslRnqda5QYuXtj5MrKyhreEz2qZ4hmgYkKzQKxQ6oR43wIHm3V2cNpOJHXAledoZZoOeE5Q8bYLpkxhfxN02A4HHaOy0klnE7t5mwVckQCiX8KbMFH7TocDtc4JwMYj6mk52fNay2hqY7BQ2VQfm4BEgsLDVAAExvp8fje9U9lULuzPNqvazEVCyXnvwqINbn6/dXaSMzpYDBYM9duUcS2dP6oix5NJiwXz5ee41h11GVrXVTU4lhld+cATGJR54Ptz0lVj3GSZV+Nc7PmOWR0+uoxziG8OIi2YVft53ywBvYhW8xhAEm0Z/91ccrfk6ZpPfgIJEYmRiZGKiVGrrW/2joxcs/HyF3eUhgDaEJmAYPYAbocwR1XZ4vql1bAoi8rzQbjyoSbZB6PEz47eQRzyBH8uqoJLCdXCRzVErUmXq2yuMQbejl7qs7OoVhWll2Ph7NHgmNdw1Yqu46twMVyMwjz/EffWhVSEywvFDRwdI5i/hVAVDaWS+dG/UDJJcyoTLFeYVeWXeeMv9f0Yt/nWNQqa80/HHiyH3D1qmtuGUCdTdiOKqvqqfLWfIfJ5R2OLxdLbpyFhYUJcHEeUoDmJN7v9y0wx9yGL/E8MR/1J45Ll7Nc/LPOtdhzn7vAJKlOiZGJkYmRiZGJkXs/RnbRhr00Ixxy1i24IKecKqAGV+fXJKu3d52MLIde4atRtV98jslW+aJKxrJyfz6nPPkcJ9rRaGSrb6w3kwYnJwl1XieL9uXKgrNHjMdtef65ounswrKpHpoQWZauZKfnawnX6R1BORgMpvjV2vPY0UZ9IKqNGrj6OfrwooT1Z9Bm3XgOWAa1dZx3lSBN2rqwUr61OeA+tXmPPjzH7rzKqDKFf2lcsU1YX7XFrLnkz+qfmuP0bgDbW9vF2BrnPB7bXHMBgKlqMI/pcqyzcRdIufju8v2k+SgxMjEyMTIxkvskRu5bGLkRL82wxg8ha8Gqwa4VKxcMrKAGZExOnI9zWtXRCg3zdzrwd6evO68gqm3YFi6A2elcO7avVtFYDq4iAKv7wVVflW9hYQGDwWCqAuV0rlVA9ZhWonQe2WYqH8+V2tUlHNUnbFCzM48d9uHKXbSr+TPziaqbAgf7Nh9n2XUu2Z+jQlRbbOn8aWKIPmy/Ll2c/6quzJPb1yr07IcK+I6vjslxrH7jqvaOt8sZQZpUnU+xfi7pB+/wATf/bB+2oeYn9cHgXevHOrDNXH5QH+DF8Sw7Jq2fEiMTI/V4YmRipM5hYuR0fx53b8HIDdlSCKy9ynMTz+c1MPSzU8ABUfxlA/DteWcAF0R8XCtpTm6WSZ3dgY+7+mfAYP3Z4dz+0TgeFS2tAqg95tU/xmE5+PZ3jbcDOj4XcnF/tqnycPbTc7WFAM+Znp8VDNG2K/k4wGbAY2Bh3+G/umXHJbdoXwML1V8Tg6u6uMTCD6Zz7Gqfeas4qlv4qcaIm5t5PmvC08WIJuuYC7Uh2y2IF28cl87nedERlTQH8pqzuD3PP+vDMT0L3FV/p5fbulPLjbwgd7mpa+6TuikxMjHS8U+MTIxMjNx7MLKLdvmCS52r63t8ZuW4MqHOMa8SNbm6EpSrPPF5lte10ytjN8nclvlq4KusfDXNAOcARe1bk1tl5CRfk5vbuvYuwNyYLCcnpdr2lminzs/9aiDOutfmVxOVVjlrFV9HzIP9iuXjeWf9dFuF8yMFJPVNta+eY50i3tin+LzqFLxURydPkFac1Bc1DpmXjqf+UkvUKrPy5b+8OJuVhB2xrzB4zVrARV+NIwdcNdlnyca2YR05/9QWgsFft/Q4WZLWR4mRiZHcNzEyMVJtnRi52ndvxsgNu8PFA+sVdJAaQwNAnaPWr8aLjztQm8VHeTjDA2tfFVuThcfViQ4+OoYLWMeXeTtn5sBRu6iutaCeJ9mwHEpsL5ZJk7Mmw2jPW2Y0+GclldptYhecLMto1D7Y7XxYx6/x5MB0c8B6cz8FL8e3ZuOYr1r8uW0M8bm2OHELHT7ngNKNx3Ne8xX1YfVLtaPKqeeULwOoVrNqyVN5a07h51t0y4bK5GJJ31alCxiVoZbMa34aMjoZtD2PX7PtPICWNJsSIxMjdczESKwZR+2SGJkYuSdgZBdt+AVXzfE0YQbFg5fsMOqkzlhB6hQ8bpd8akB1LL6lqeOWMl0F4XPqcF124Ilz42tf/quOobfeNdm6MeO7S8BcoegKrnm2JnBFq7aVpStZ8nGXmBRI1a4hV42/9tVkx4mTt6nEd+WhtmA5nI+7W/xdcjpd47MmSubvfKo2B5rMnd/WEk0tcdXiRcdmG3Ff9d2aLPw5tid0AaP2qX3W7+pjqg9/d3PMMauVXNaZqQbGaiMnc5fvzNI7aeMoMTIxUtslRiZGujYqS2Lkno2RG/KWwlowOaG0Db8NpuaA7NgcvNpOk6STR4NFJ73LaYP0Sp3Hr5FLbPr2GJVJvztZZjkyJxD+p7pH5UcBoivIagsH57DBX7eO1OzDidrp5249Az4RKbFdGRh0Lt3cqkxuywProDbXZNFl2xhLgV0rQdxe9VT5+bj+rVWOdT70n/Mpp4+OpQs51pHP6RuUHAiqTaO/+82TWcChcrlFATD9FiQXp7Vxw5YBMOq/2k5lmlePLjnU5jGGazcv36Q6JUYmRiZGJkYmRu7bGLlhr4XnAVVIve2tgKDKaNDprV43uTrxXUGh7eZdSHAwqwM7kFI9XSINni7YHH/WwwEP89C5cCAVCVXt6cZUqunt2qm+et7poQlcwYT5xW1hPe4WGRpMKocmDTeOtlO/47FqfsA21ASqSYZ5O7mVBye6GuAzqYxdoBeyMRDrD19qn3l8Q+2nv9nDnxlgnX1UFpc/FDAciKg9nE85G+o/tbuLJ13EuVzq7NflezymVpo1j7r8oTRvrkyqU2JkYqRrp/rq+cTIxEi1X2LknoWRG3aHq2uvak1JYPXNPEHOuWrB5wKZ+ToH1iCpJUkeP4LHAZVLKrUJBqZfO8uTzKQVoi5yVSoOcE7ICtZhn9iP7eaJdeE2NR1qCwLn0GwvpwP7hgbXwsLC1CtqtcrByVSrLEp8jPuwP7vKGMvD2xSUpyYstSVTbeGgcVIDQwcCXXHi7KDJmiuiLg6Ct3ueQIGf5XBJnHlEv9qYzMvpyxVC9cuwP+8VVx8O4n3/TlcGfvVtbqc2YL1rIO7mzIFArfLKvJyNXa5i+3eBfdL8lBiZGJkYmRiZGLn3Y2QXbcjvcPFnl+R1b6YzkirkHNE5DpMm99o+1xoQOd56XMFqlp6qE8vIe505uWjSYN2jLe+dr4GEk5Ftr3qWUtY8L6A614i3HKjMbI/aVgznH3yedW2aZlItUjld4nDJuWbf4M082NYst9pUk7HbSsEy6aKE22pCDNL98c4/nQ1mJSJto77MY6sN4rW5aluNcX31rNNB50LldcfdvKgd+Xc/9FfvGdScz3LC51gPHRSEdGFdyyHOL1j+mAO2scocsrh55HFj7mqLHc1NStF/PeCS1FJiZGIk2yMxMjFS5zQxcu/ByC7akDtcPBEqJDBdTZq3MuWSvH7XiVVDOQdnObitGlC3EHRVW7QPO5D+ZedUnVQf1Zv/1fryeZcM4jwHgAMVR5pg3dzwP7ax2rPLX5QXyx/ECcGBdpfsMTZXh3is0Wg09epWBSP2q1m+rADjKkRaXXSJKvrUwJHnpJZw+v3u+orzLf4Xeqvszh+0Hds42vf7/TXzrG+/0vO1ZOfmoUsfbeeAJM7xnHEOU8DQ+FC5XO5iO0YbfmZHfxDV+ZOLfZdDeCydW8ejK88krY8SI6f7JEYmRqpOiZGJkXs7Rm7I73CNRqOpvaTq7EHhUBHEfHXKV67a1ymsMnA7dXxOOno++rmr4VoQ8ZWwggTrUQuucAT+ZfRoo1sSlA/r62zNc8GO1uUEIXdta4JWF2ty8dW9Vky5rXvrlgv6GjiXUtY8TN3v99fYWCt+tQDjAGQ7OF1rc6G81Xb8Y4sqJ9tLkwXz4oWNjhP6uaQYtg6AZH04Jvh4zReYn7YFVn8k0v2oJceZyhjHNCaiX1QsA4CUQh61I1eVNb44rtmm0cdV/weDAfr9/lQOY9006fOCgMdTHfjNZJxbeM7cIoMrcqx7rdJWy826yAs+KkfS+ikxMjEyKDFy7fHEyMTIfQUjN+S18OyYoUTcntRkppMQx2rG0oTDE83Br06hARDHSlndEqAJQBOaVk9Y3lIK+v1+tcrDMnDCcYHEfV0yUyDVRNQ0q7dTWT6X/LsSjquwhdyqnyYjZyNuF+Ny2whs9RM3d6yXa9u1IFFwinnl8dUH2W6xeKhV3oDp33LQYFbf1rnhvzFOLAp47tQuvDhTG9Rsy34SxHPikjkf4wTDY2qCVsBRu6mNo52+ojZ4Apj8ACxTDbCUn8rQ9eplnRsex4GF9ldeLhaCP+cz/s7gr7K7+HA+r21rC9eohEcbjlWeHzd/SfNRYiSmdGQeiZGJkYmRq3ZQSozcezByQ57hYuO7yQXW7tnUhMbVv/jMgc5OrEkSmK68OJBQXnyO5eXP3F4dOQKeDR3Br2NEe+att1xZTk2aSgwI2j8qC2oztb/qzUlMk5f242SkfGLutJqmc63A5IJDAY159Xq9CZiz3O72sb41yS1a3Fgxp+zDOp9qBzdX3EYr1zVySYjBvtfrTW7dDwaDNYsvtWmAiKvq6EIj/L0rYXUl0Zo99JjmCK6s1XiyT9ZAgI/xcwxMvF/dAZ4D7ziuvtm1iNE+ysv5dSwkuuLMAUTNB2ugznMd3zUvB2lFM2l+SoxMjGQ+iZFr54rbJEau5ZcYuedgZG0MYAOf4QKmE6YKFdWSEEyTSfzVRB4Kj0bTe4b5dnmQcxx2GPer1RFg2kerL7zgCHkiGfFChCeZE4dLwg5wHUA42bgdX33rQ5dsA50bFwy1fqwzn9d/w+Fw8s8lNQVgrTxpEo3g4sobAyDbiBcTusBx4B633mNct3+bdXJ2DTl0zCB+FoIXCax3VJK4v/MBNzbfTg+9YixdWNWSAcsTNgmfCt113tmODObaxvmIIxdj3F79iGNGgc3FiOvv5OkCqS79hsPhmudvOL+ErKqz2rIGuCqPk5PHcL7keLs86qhmr6T5KDEyMTIxMjEyMXLvx8jONrsKoL1er4kg5CThJoAnUJXlpKGVDT7Px4K0rY7NY0SgaDWL29Xkdp9Z31pSVp69Xm9NcqrZLZIr/61tX2iatbfDGUzdHMV8cMUqbM2JnbeGuIQUY3BSCQB3wMX24sqSCxSWKyjab9q0aWou2YYqu9sjrlsSXDy4IJyn0q8Jj5OkW0yonvE5Enu/359s9WFbRhuuwgS5tyNx4mZ/cdsKQn71IY2BGIMXbDW7OqBwbbid2sqRJnudI553lY951/KU8nU+zX85sbvYUF+Lyrsbw+nJfGtjukWIs7+TyeVCABgMBl9tmubWawRLspQYmRjJtkiMXNsvMTIxcm/ByMFggKZp7CRs+AWXnJsS2vSd9Auj6C175tNFajQ+5pwjqGZAvQrm4OP2vP9VnUbH1jG0D7Ca+NRRFRS4j3Ocmr2co7LtXYIN0ipJF8Az8GmQR2IM+7EO0SaO88PNwZc/R5tIkAxQ3IZJgWc0Gk21Z73UP9h2Lgm7hFxbZOlc1AA0gELHU904sbgEzcRzqbZSQHaJLebVAbnqFd/jnCZM1lkrjqzbLBBw9uE3GNV0Uv90OYL5a7xp5dLFlwKjyq4go/HA8mlu4fmona+N6RYJLm9xXC0vL+cF1zooMTIx0umVGJkYqXokRu75GDkuHtgLrg15aQYDgAaTU4aEso7a7/exsrIyxUcrVy5J6LhsTJfU4zgnPHbISGr8A4KR6CM4OBA5IdecgW2h8gHTr3JVudnRmIdWbjThcxBzGxcADFIuEWmi4TnXYNOxou1oNFpTCdREoltMOMFrold5a0nU2ZsTgtpB/U4BQBMqP8PAPhLt2E5sF124sP3iXCxauPKrvqf685aV2tyw7d1ccV/2j5p/sk6ukqcyBOn4muDYPhqjPDYvziI2uK/TgWVUQHc5hOXQZ2lqWx64H+ebOM5VVvUz5w+sk+Y1bqsy65yx/bmNu7ugeiXNT4mRiZFsu8TIxMjEyGm+ewtGduXdDbngUidxiqkDR5BpIHFwuepfzZG4kgOsvmHFjRt/lXd81mqMm6zRaGRfs6pAwsdnkauEOB7s5L1eb41Dq8MzuUUTO7cDwBjHgaQLtvjMwbOysjIls7O7k6vWxs2H6qJtHBCrHAFobMuoNCqYxXgMZgEoMS+8vYHH4e0nXC3lsVmfSDThI/xdebh41LeF8RgagzW715Jh2EyrbhGDaoeQnwEyeGkyD93UxurnLJ8eVzvWFnla0XOxEvaq5RXHl8+rDdmOCsIKKgo0XeOoPVxc1BYCbAv2DQfASfNRYmRipMqYGJkYmRi5SnsLRnblsQ254IrBNFEHuYnhN9twnxBajc1tXYLXANCEXgM4NbhrrzqpLgyMOqY6kfLnKiHLxAnSJRdu45xG3xjVBSLBg+dAx1InrgWyG4vfEDQr4NWHdK5CBk7oEYC130HgLS5ObpZJg4sTmfLR/iEL+xH3UTu7uVS/ZrtwsnGLovBDByj6FioGTR6Hq2hq99Cb+2nb6M8Jkbd71AApbB2kCdC9/tdVmV0OYvtpjGui1rlVu/CcOb9S/+L5UPnd4kvn1uUdBV+d85oPsB/yyxPUJ9iW6t81gE2aTYmRiZG1sRIjEyMTI/d+jNzwO1wuCbOwzhG6ErUe4+DRCdQkE+eiSuAmNIJMqzAqP8vqAkz1VvsoqKhNuBLobBE2Y6Dl42p/l/CdgzHpnKj8tYdntWrT5QMxTvCJhyDZxqxLHOd51d/BUBuoP+lWArVPJDp+1sCBLN9+Vptp4gnfcnbkz6q3JlGWtZYMuaqofdXvdJ44yTLo87g8H+rrrH/01wpk2IIXFG4xUfMBHVN92+Udbhtg5sbk8Ryv6O/G1jl0OjpZ3RjMU+3rfJbjIr6HL2iVmecm+nM8KZCyvyt1gUlSnRIjEyNVX6bEyGk78ufEyFVKjNz9MbKLNuyHj93VLVcCakap3TrvCiqXCDVp8BYCHZ95ue8u8bATuO+aWGsJ2R1XcrpzMo/vzh6afLvAg9tr23AmlsWBnOqmAKH6qR/o72MEf03SLikOBoOpBz41iTFPdyz0qr3ViI8pbw5ilTn6KwBqBZP7qT+q78WY4QNRrawBgANhXXixvsxbgUDnhsdRgGQg1facD2p+75Kzzl2tOsU8Qm+eB12EsV4qly6QnM/X4sGBnPq9ju/O1/JRkMtpPK5uu6iR062mVxefpDolRiZGJkYmRiZGTvxj6LAAACAASURBVLfdGzGyi2a/t3OdFA7DinZVSmuJuzZZ8xwH2kTDV7I6wS6o1DldAmJyAeXO621ptYH77ioGXQ7M4KFVJ5eknE4KhPEgtNMrbFcDdeYfMqgf9Pv9qS0bysfNr9qc9Y3j+kpjZy/mPxqN1uwjd1tNeLHCVao4F/943tmWOh9RMXSA6+aZfZIfPGb7uUWLs79LlvNUr50/abwrdQGIbpEJ/TV36P575wvOpmxr9cdaXCiYzmobPN02EjfnDKzBQ+OA50JBlv/GPLLfcRyo/y0srP4OiuZg1SfpmqHEyLXnEyMTIxMjEyNZjr0RIzfkDpdWCPRWHDC9R5SPRx8OgPjX6/WmHEOdVZO4O89tmO8sHuEcoV981uqk68vHtPrAcjkZ1AHVkZSibdhXq2Yqdw28ua8GvksEtcTA209YBtUj2ur2BFfhqgG246dbY9Sm3FcrcM62QbW5Zp4xLu9pZ9v2er014OwSqPJlHTXJBaixnLXFDRM/MO9AxAGAbjnSCiXLx/HDdlCfcOOwTPpZEz3bggHc2VBBdp5550SvdgmeuhDRxM92cqAQxM9Y6Ku1gbVvHlMddVGlflEDdCaXH5xdktZHiZHTfRMjEyMTIxMjuf++gJG7fIerS+HJIHJr1jk7T9I8ijtj1hRnw3YlcubBwFNLphoscdxVCphnfI5EwODkdGMeKgeP25VwOQmono4v66l81ZYKTI6X2tzNG88//+NzNTswiDn9mZ/aTNurXdSvtGKi/VW3kI0rLHzeJQk3r/y35hvaz8WjI5eoVRblFQAZ/9i2KiePMWtR6BZSNf90gFUDHY0BHpvPOV/l465SrraMdlpJrM2DyurI6cX2rsnBY+hiw+VMF+uz5E+qU2Lk9HiJkYmRiZGJkdFub8PILtqQLYWqkP6r9elKhG5ieayu75Gg+Za248+JxvFiZ1GH7poM5VHjF2M7OzAvJ5cmCx1L27EjaVAE6XYJ5VULbAeYOnYtObvEUaMuW2swsgxapYu2ushxwaR61wDbBd08CU2/6zgujvR7jSeP6yqVTs6afYNmJSPnt7rY0M9KfLwr/vhYzTdqvlyT0bXjnKI6avWyFh+qX8Q+j8G27dJnHmI7M/jNcwHlckrIpDkjaT5KjPSy6PnEyMTIxMjEyD0ZI7vG3+UthZoQOXHzRKvzuUlQUkfpmviaczkg4c9adXB9ox1PjBq3Fki1IFVQcY4667Y/O3lt0udNwKqbysA8nEO5JF7K6luIeBtEtNVXnCq/WQsH1SHkVZBUO4WurtKngKG2AKZ/54b7OVtpIor+vNjRqq3bFqGyBF+udKvMmrjd367E6vxN5XCxr21mUU0/Phef2TYxh9qGZec5ctVWHUMXDiyLq9a7mOL+XfLplhHmXVvE6JzWtpKEfZh0+wSPxcS8wsbrmc+kaUqMTIzkPomRiZHaZhYlRu4dGLlhv8MVpIYJ4dTZ1ejuWNf+1hiLz/HVrwKBk4cNxXw1+HmsCGC+VVwDPealfFgWPq5A7ECK7aMyuiBwlRtg9Xde3PwosOh8sF2dvTUAagHjvncBvyMHFNpebaCBym+rYXlcHw1St0ByyYaTks6v2lATvX7W/cucuHjOWDeNP63kaF+XrFQX10d9XvdNO52YrwIU83b+VNODY9sBdfiKLmq4vyOdJwdAuiddiceobUNhctu9NF7Ud2fJMI9+zjeTdo0SIxMjEyMTIxMj9z2M3LBnuGpO4hIyfw8ha/s4a8lSHVMrIAwYnGQ0GXQ5N1/tu7+c0N1kufYuaJ1dajqHbtxfQVTPO4BXPdlm7hyPVdPT/avNv/MbDZgYL/b5OhtxEEd7TSLMxwWcJne1q9NfbawyOb21v8oSMcBz6SonXElmv3Z2rgFzkNv+UJu/2uKKAWTWljPn72wz1p37xHf3Kt2uGFJbaFsH7DVZ1X9qMRhtmQ9XbLWvyqW+yzxUd9WXAdbluZpvqI0URGblkKQ6JUYmRjo5EyMTIx0lRu69GLnLF1zs9KGEUpeDaX9N4LVAVcPHZ3YOThRaUajxUoOpfI63S0RBbpJdwmfw5OqTk4fHd7dEFVRUdpatlvR17K5bxZGonRwu2FUe1b+mh84TyxT2YB6sj9OZ9XF9NJi0Xw3oeUzWy43BSahmY5cQ+QFo5c1VWZY/XgWs1TE3htveUrObJiDexsCxx+BYS0y1GKzZTXOE2pGJ3zrlxorvzn46voKpfnbtVe6aXfkHXpmHyllbAAD+YfUasNbyVq/Xm3pFrlsEJM2mxMjEyMTIxMjEyMTIDdlSGJPHvxquyUMTFjueJq2a0Jy81KAxJn9n3gpKLomynBwgOn58DufQsVRmBaDBYDCVsLlNHFcZ2Unir0s+MQfqjM4WbOua/KqLk8VRyK1bF1ziUsfWY9ovZHWv1+XzvKWF7cny8XdOgrowUXBg2d0taZ07nm+2P7etycnnXAVHZdSqjc6305H1c9uInI/qOZeYlIf6h1twxNw6UIj2uiBhmWvE5zXHuHnqqrS6LQ1s71joOUBwPsNycwyzfFoh5HO1BUFXtZ3zaRBvbeF8HvabBShJnhIjEyOVEiMTI7VfYuSej5Fd9t3lCy4X8LMcyPHoOuYCX53QJX/mozyYtNLEfTUZ1sbuCjy1lQZPjO9+SLBmH+anwdwFDNyeQdm1q/XrIg4U1i1ItxtokNRARJMAJ34nsy484jwnZbfwUNDS5Fsbp0sGHqPX601VkWr6qd8y9fv96g9Xqm/pudqxWsw6vRRonJ34t17cNiXmq7Kp7sGLfaXfX5u6mD/7tfqAzpHaTLfmqEw6RzxWV/7TxFyrqvMiyFVb1XbM3wFgV97VsRUg1TdnxX/SWkqMTIxkSoxMjEyM3DcxckNeC6/BH381ObBQ7ha7JpowqBo+vuukqcG0nQsmloeTcYwzaxJYT5dYdFLDMTQ4SilTvxTOOrBNtSKgCZVt5/46p2R5+W/IpLaq8WLwncUXmN5mwXPMtnN24Xb8MGfo2TTNGmDWsbVSq4GtQK/+FroOBoMpvmp/F3z9fn9qTLcYUaCpJTu2JduglsR0HmoJgpOY8nSAH3w1iev4zhejHSd1nmMFpfAHXQw5n2adQyeWPbYDuMUTy8T683YKAJNq/Gg0mpxTWzjZIp5rWygUpDUXan5V0GDZuU8NxFweYd5dQJLUTYmRiZGs3zx8gcTIxMjEyL0JI8uugmiv12t4QlQgnhQXJEx6i1QDTJMoOyEHb+1WZrTl8zqBMRaT3ooNJ2A5lVdtbHVcreToJNb4chJmEGTH07lwFas4p/Zlp6tVBPStRaor81bgjPlj2+lccOJnngoqPJ+coNgGzg9VXpc4NOGpn7t2tYSqyTj4RRvXNigS1Wg0mqpmueSrNuLEy+TAYZYPs61qc8v6sd20bcitsvEx9hWuCKoOLl7Yb1hmtyhxCyK3GGAd2D9ZNpezamDCNnK+7/Tj43zejc/5JeSM9iqX09f5w2Aw+GrTNLdeo1CSpcTIxMjEyMTIxMh9AyPHF5fWQTbkGS51Sk3Ork2c1+B2SUWJnYO/axvHU40zCwTVKR0PdV4GOQ4GBjIHIkG1ff4hJ1fUnN4acCEXH3cJvQbebg5qcx3fo8LCQRZy8K1+B2JunJot1H6cKKKyxoDLn5mPju1sy21qdufvYQd+uxgnIq0Yd40RbbidVj8VFJx+akPmpbbl2NBFyKy5i3MK6NyXk3GMr6/yjfYKBKoDV8UBTNlcZWMd2TY1fXSBUMsDHPfK08Vj2EcXxmy/Lh1YDwVDtrECO/PSvjWAUwBNWh8lRiZGJkYmRiZG7v0Y2UUbtqUwEkR8BzCV9EIprlS4q1blq/3js3sFKgenJggdwwUV31aM7zx+yM0UCUtvw84DdA5I3YS5ylTYlm/PMn8edzgcTtkrZGXduLqi8nA1i2XSOWUwVX8IH+laaNQqruEr6jPBk9uw3jqfqpcmA7ZH8FEfHQwGU7ZkW0cC04SviwXV3y1kwn5sU42r0DH2amtMaBxofIas4cNOJ7ZJ/NMtAerj3J8B3MWeA0gFgpWVlTW+Ez6t8xh9nS/VFg1hU07EbE+NDbYD+yDPSZwLH1TQ0TsIKh/7qOY1Pc56MKCqri7mamN2xUXS+ikxMjGS5UyMTIxMjNw7MbKLNmRLYbyikRO9A2g3FhtBnVOBgHk4gBrLM2lXc6h55FAHjYDV5OCCtEtW1bU22Swr93UVuHDoSCwrKysT/R2w1pKAS+psB60UxbGuLSDKWxMJb7lgO3EC0gqjftcx2MbzzoWzAScTlpn1dwuAqNjp2BEn4U8MsOxHkYgGg8EUIAafCPhox/3DZtGvy9c5cbgFk0u+/F0Bh/vEZ9VNdVG+ulhwMnP86DgKYrWKk8adflcfjD4s18LCwmRver/ft3lA+WjucAuWWt7RuXKJX32bbRrH+O5BzAlvo2Df4n7BK7cUro8SIxMjEyMTIxMj9w2MXF5eRlPZUrhhF1zqoCwEtbUJLL6H4dm46iA1x6k5M4/Nx5zzOCfVtvydJ4snRNtxwld7KHXNhwKLS7DqQF1g5WTlzzxXuuVBkzG3K6WseVDWta3J6wDbzQvz5cDWLRFsO5Wd/3JFhfsxv5jDOKYAwIkggJJ/iJBv56tt3JwFxd505/M8V+oHNeKEqePzcQYBBwgxXti/y4/cYkb1ZRu4qh/LpmOonHye53g0Gq1J/uyHXTHj+PG4zrcc6MZ46gdKmus0pzFffr6nKy/q9ozwLae3y9l5wbU+SoxMjEyMTIxMjNw3MPIaf4YLWHvFHwIGqWOxs9aSIvd1V+LskG4c/qyVIja2c2x+6JJlY5k1ofK4nGzYJvr6zqiy8C1iPh/f3a1ulp0/c1J01RB1MpeU2BbszLU3G3H/0JWTTeigQacJSm/FM0+2SfSJc2w/XaxoP7Y9B7T6R9dtZ9ZBqzPsJyyrsw8fq1V5wu5xPF55OxgMsLi4OBkrdNC5DZnU53lrgas8udhg3eN8yM0xH7K438sIcvbWWGJyeYGJwV3lV146NzUZdU6Cr+aiWDyEbTSGnaya+FlnloftrbHOuuqbymqLLwXR+M4LJTfXSbtGiZGJkSxfYmRiZGLkvoWRG7qlUI3ohHAJjc9pII3HmDKmGqyWdBxIcJXNJdhagDMp4OkYfKzGX6+81WF7vdWtDwy4ys/J7ICFv6t9FdBrC4KYU7cn3o0f5PTTPqGz9nHtGAziXBdosG7xl/vyQ8uaiDXomU/YRMn5MNtPK4TRjoHV+a5bGHA8MEDxYqKWEONYVLLifGzT0HFVXkc6Lywj+yzbqpZM1Z48voKEJkaeY40f1ksXdrXkr8d1fJ1TtVnNnm6Ona5BvACo5RuXm5iiv4sv53fBM/qsrKzkHa51UGJkYqTqnBiZGMn2SozcezBynI+uuTtcIQAbRIOQP7uErcJHcACYuvXOk6YA4+Tidjyma+NAyiUpDs64Tcw6uAnV2818jNuzbfTNPV168diaCDkAOOF0Oa0bp1bNUlI78jF24tDfOb8LXh3DARrvDS+lTPaBK/F8urf+6G+rxGf1NZccVW+eb5cIg9SHnI5uPLWpLhxUfpaTbcFzxPy7kr/yDru5Khrz08Tv7MsA73xF24d/xzjcXtu4CrIubIKH2rrLN3mx6nSPan2Mr7kxKrNMbsHF4MkLia654gp62JRjRXVnYh5J66fEyMRIpsTIxMjESK/7no6RXbQh6KkO3hUwPEGclOOcJlZ9uJSdgPvEuPy9No5z7IlBer2p6lQNXMIpOICUVC5NyAEWmiyir3strsrsKis8BuuutuNgcM6i2wiYRqPpNyIxP5VDZQw5IthHo9Gat0RpBYjlDN4azMFHx3bgyd91UaOk/hltNek6cFf9NQEwwHACY90YZKJ/tNEfGIzxXMWXZXG2Y99yfSM2eG4CeF1FSivUNfszhV03bdo01Z//su9p8hyNRpPnFBYWFqa2j8TfeHMTLzTcYqVmK80LHAexRz/04D6j0WhNNV4fiOdFrc5pjOdiwlUd2beYR4xXe4ZE7R1/FeSS5qfEyMRI5qdyJEYmRiZG7v0YucsXXGz0+O4SMBuck5P+MCAblMHAJXIO6DinwcWTwxR8XPVMZdFXcKr+2l8BgPVzSZ2rFDqJnGTYwZqmmQS3JuuYdPf7Fvy3FkhKTp+QG1ibsDkBcgLiKpbOm0tUCozORjpXHFQ1fZUf68OVO5dMuDoWxJWY4OPsBUw/HKx+pQsFnWsd1+mg5/SzyhZysA31M+upNuFnLpjivFbmawlZ5y4AoUZd+nASDz4KOmw7l0xZdqV+v2+B3+mviV/9W9vEcQUZB4q1xSHHifbROdbxlZeCYl50rZ8SIxMjEyMTIxMjp2lvxciuOfmp7A9hx+Gk6AyqgRnntKLmkq47zp81AbjbpMpTdQiZo39tHHWc+OtuYfNixk202oSrF24BFLdBXUVF+TFwO0eJc1xxY7ldIAXAcXVSQZYrd84/anKr7EqarLSCpLpGm7A/j8tj1vgzEMbcqC7Mi+VwOjr9uW8cc77J86K8nS2dDVmGLgDT3/xgvsrLxQH/dYs/F188B+4CQMd21b0asKhd1H9jXB6LE676svoBLzZ1PKdXV0y48yGTgkvN9zkHsy66uGaqgWvSrlNiZGJkYmRiZGLk3o2Ru4ygbEQWQB1Ib90F8ZtlmCf3Z+dS0Ij28dc5TIxfc4wYJ/hzVUq/s0ysF/NUh+KJYpn5uFapHD8nt/bhNsGXZdSxtT/ziWoWv8qUE4zax4GpJlR1bpWXnZxlrQWJJjytNLhkpHrGmGEvlh+YrjZzRZn7awWVk6j6SE0m1i9soX4b/GJuWH71fzf3wZ+J2+hWCD0f/dkH2C7OPx0oOGBj3krcX4HGxZ8jto9WFfm4VpbZLjxu+IsusBwQuOTvdFRg4AWYzm/MMX92YKZVV/VHt+Cq/UtaPyVGJkYmRiZGJkYmRs58aUYp5XYAzm2a5kc1AziHn5c0AcVnBSedHA1KB0hGlzXfnRO7MdhpuqoCTrZoo5Pm9OdEwu3UqRhYmRcfc0Gm42o7Pu4WAMxDx2Kn0yTPCZtlYj3YTnGOk3a0Y978al2WV22gNg5+7gcotZ0DCrWf80e1DctdSwwKEGwrJjf/KqObW/Uh1y/8tPbjnG5M1ZVto7GgizPWMfSaBbou/pS/ysnJVfVh/3S6sWw8PvdRXjpGTQf18zjH/uB4ufhz86NzqHp18WZeo9FoamGVNBsfgcRI1z4xMjEyMTIxUseo6bAnYaTjEzTzgqtpmi93na8FoUumKmhsgej6YTOXgHQS1JmcITlIWR6XSFRu5tk0zeRBQpaLqy0qg3NClpmTjCYk7aN/HTio/OyQXJ1SoHTOEzwdaDl78ZywDDyuHnf21kBingq8zt5ObpWX7aCVLh1Dk7Kzd5evuTmsJS3m64BH54n9r+Z3zDNijvVRPmxzt0BSfXTOdVyWryv58jhcHebq/bzx4EDN+W0t5lmPmsyRx3hrRchcA8Oab7l2KovK0BWLLP+shY72cf7lKsH7Os3CRyAxkmVMjEyMTIycPu/kS4zcczGy6znnDXktfJeTqZAqsBrE8XYJk53KOTpPhBtnnoVDzRk4YbCMTvda0NWcM34crmabOOacVZ0P8G+hUt1VlwgQriqEzG5bR80GClyalKI9Jzbn3GxLPa9BoUCic8DzEH+1j86fAl700zE0SDlZ1HRytuz6HjbVW/o8vsrmFssukTu/coDn+Dh9HJDpefYZ1VvPKZ9aLDswdO1YDp1H5u18zslak1ntoxVprSaq3HrnwIF7bQ74rpQudrSf87kuOyTNT4mRiZGJkYmRs9q584mRexZGdtGGvhZeE1YXQKjAWjnRtpqoag6vCYGTaZAGlkvCLJOe5wd3udqhcjr5XdJl/v1+v8pLic+rHF220eMhhwKcJigGFrVR8NM3YLFvOH1VlhjHAQHLWktAPGaXv7Ac7la1JuiQrZaQ4h/PgyZq5u100jZur7TrU9ONbeqSMsvHScoleSd7LU5dRZV1Ut76Wfdcu8qyyjWL3CLO+Skf033cNb/lWFEbuXG6bMZzyjkV8A+b61iOd+gSPOaxGcds0q5TYmRiZGJkYmS0SYzcNzFyQ+5wsQBA/TYtU1xV8i24mBB2IJ4MvQ2tiS/IJUxOGnyeHYGPseyzthi4LRtM7BDuNwX0djwHDycTNwa3c+PrK3F5jpRHzeldgCh1JSvVT3lxoNf04+/xt7Z4cXZTGZ0P6XcFTT2m9qgBdc2HWD7dTqLJ1MldG4PPc+zUANnN+axxuxYumrhcO543TfLaTvlzLqjJUtuGU5O5K48wxdYIBx6qi/5YqI6p/hXE8VKb79BRQaXmX+6c5kPtozmoFvtJ81NiZGIkf0+MxJr+iZGJkXsyRnbRhpQwXZDVqgH8j5WK/afBQ9sw7zC+LgTUWID/DROWmSsFzkGUGERUJrVFHFf5NCHqlgTV38nFgVhKmar6OZk5WTvn4d8wCJu4X/N24Mv2dWCgx1UPrSjUqhQx7zWg4/3MjpRPLGZqYBW2YNn0mB4P+VxFUD87ANS55x/WZPBlPi6h8Hguqfd6PSwuLq7pUwML9V8HojyWW6wov7AX/3Pjcfvgp/Gh/9TfHFixbDW/Zv0Y+OMf5y0l9WH+x/x0IcA6qc0cf/5ey1uzQKdGtfhIWh8lRiZGJkYmRvJYiZH7HkZu2B2upvFvbFFHmlWNcnxrt891Enlyglyb6M/naxOlRo/2/EN26gRxjHV0jqfOxADMtmK+NaBzFUC1k5NNdQ9b86/Rx3e1jfuuIN0VaCFbtOW+TGofDQrWWZOeBo8Ce/TreiYg7Ks+oYDOc6B6K5CzP7AvBh+ugPX7/alkWvt9DwVWtrVW1GIcBmBecDBPngf2G10Y6ZwoIOs8qTy8mHLgGj5YG8v5s7uTwPblvlwNY7BgUrnZrrxQ4Vd5O7mYHDi6NvzXASafB1YX0mo39a8aCKtdk3aNEiPX8k+MTIzU/omRiZFKewtG7vIFFzuMJlCgnqSdQ8RxnihNVO6zTrAmBE0gtQnhxM7JjtvqXwU7blsbVymSmcrBOrGzcRJmfbUvf9ekofqzTpysXDAyqZ0B/7srnCSUlyaS+Nd1u53twAlXk7XTk/9G0Lvk6xYazrYsK3+u8dAkq+DL+gWP+OsCvstGKpPal/+6WFD/dWDMurAdGaCcr+lYah/1SfYrB/iqv+PDNlBf0W1brtqtyVp9J2xQqzTWQIGBaDAYWJ1cDuEYCjvHFimOJ13E6mdX3eQ+XD1OWh8lRiZGJkYmRiZGrtK+ipEbcocrhFfBNckCawFBr6LDuAxOwGoFx/Fjg2i/kIP5ljL91h/+y3x1LJZVq3FafXPy8n5V/ht8OfkosKmucVwriOxUmiC0cqqAGBRbL+ZJYhoktfOsC4O1zpOrWEZbfitU2Dq2ecRx7s/2ZjkcqLE86s9qU50/bsf/VG9NmKFH6KCJJ+Yg/pbSbotZWVmZ0iU+1xKGA/AaqHMflVkTMs+jAk2Au1b02dZMqr+LPZfUdV6ZuCKrfbR/jFGbq+BX+90V9kmtjKne6ke1uVIQDBmCt7YJOdgebp7Zv5mH6qQLhaSrT4mRiZGJkYmRiZH7Nkbu8gVXzQFdRSvadB1jXsqTJzKCJ9r1er2J8+jVq/JyE8HB7RIwy9XlwBqY6iwsB9Ambv59ghiTv7NjuPFdYDmZOZEDmNoSoUDIfDRp8NhsY5bNLQTiuC48XICxPYP/YDCYJHtgOqC4UqQVSraDk43tzsDE/PnBapcc1c+YGBC4Ssu31JWfblcJGUJOrZIp6XxosnNgqMCliV8TnUvuPA8LCwtTv7/BvDnmNIFz/EZllm/56xw40GMZdfGiSbZp2t8N4goejxH21W0qPAdc6dPxXRyEvXWe1X9ZRu7LeUP1Zv6O3B0J1rcrH9d4JtUpMTIxMjEyMTIxct/ASHcuaMN+h0uF0OqYkiYgrTyo0JFE9BamS+aRdDRo+K+ThRNbTU91UK4YaeLiK2S+lRm3Q10S4CDR/uFoLC+AqQTFgRP6cDVOQcEFM9tWFwddCwB3jpOggjiPEeTAif8BwGAwmNw+5+BnwNKFggMVVy11Cw7WT33P2YH3BWslkOXhyijPK7C6yNAkr9WZGIdl1IWW6qFJRv2d7chyM9C6RBmyaDy5RRF/1+TLyY7106Sn7VTXWjuXC+ZZiKn9HNjWgKQmw+Li4hQohE9z5VEXlwrMTi7Wy/m/Ayyt6LkFQNLVo8TIxEjWPzEyMTIxct/DyA15hkuVAlaNUhM8JjkSHTtr9InjoRBXVzjBaWJlJ9Vx3XeWXas0IUcAFDuXBhDLrzKwPgwiXBFQ59SqmTpi7TYm6x0201/55v5hP54L57zOmXhOXHC6/hzgvKWB51jnJuRh4NQFiI7BvsJzwrK7cWp8WB+XUPl7gB3z0dcPswwaQzFGVMFYLm2r/WrJmkEneLL/ManPxnenA/fhv9GO/ZRlY99RXVgPJZc0XcLksUIOrQLGeZZd50Rzk7MRV3x5L73mIc2Hmqg5rwFrt0e4hM++GFVItpXOrwN0Z/PQTRdtSeujxMjEyMTIxEhnu8TIfQsjS23C5qVSSrNp06aJMDXHZCVc0uG+XKXQYFWQ0TFEtqlJqSU8d4yrbTwGy+baq/yaNGs6OLBVu8XEs7MqkMbEayLRJKSg5ZxabeTsxM7P8mjiZh35uCZLbut0cDJxsuDFiPNHPaagwwsVToZuEaHzwzLw/OieZgYKBXaufim5KqCSiz/1R01uDviZn7Njlw20L9uSx3Fzw4sGnl+XFHV81VkTM4MXV8hqcmm86HguRpTcokZtqH3DZrV82bV403iPY6xXV97rApk4try8/NWmaW69RtkkS4mRiZGJkYmRiZH7BkYOBgOMRqO1E4EN3lLIW4RUxAAAIABJREFUgumVr7YH/N5vTd5K7ko/PjvHYmd0fTQhM+ltSpXD3cZUcOS3tLAsmgTZceIcH9dEpI7BQOO+s41cotagczaK8wqoLtCYasmR+7NMzAvAVOD3er0pWfUtOLEfWm2jgcM25nHUJjUgi89avYwHdhVUFVwZnFhH1b2WfBjgYp5VT/UjBS8FYx2DgTkAwSXFaMv9FQh4jmbZlfs7n1Q+uojhcywnb6OqvfGsBtScxFUWBR+Nmdoc8vg6FucXBxA1foPBYFKF5wWLi0nOi656XPPDpPVTYmRiJP9NjEyMTIzc9zByw16aEcbj28TsYJpkeQL0tr1WP1wimEXqIK4ff9fkyO2dPOosmgg0CMIWmsh5MkNWvYXN7djmWilaWVlZ89aZCGSthjgbcMLRMaNvjOeqXGprTvj6Gk+1T8ilQBCy8xj6Sl6WjSuYWnULfpG4nY7aVkE9/nEyju8AsHPnTvT7/ak2Or81Chup3/Z67WtQXXDrnDKoajtNLFztjGMxV5zkIyZZNp63SNIxf1oVU0BV/+dKVG3xx3OjiwDWM4iTcfDRvKTH1ZYOZFlvjSsFNyb2H+XB7RVkNV5r/HSxGrKqLmzDeXxS83XS+ikxMjEyMTIxMjEyMXKXtxT2er1mcXFxTWLmybEDGyeJ4+6KU41bmwA+7l7t6ORwSbZmvPVMACddJ6s6lQINj8nt2SHYXjwGHw8eGoAA1iRWZwc9x7LogkFl4O+Ot9NbAVkBShOKVh4iiCLJOQqeUe1jcOR5rPVT3bm6xccYTGrVOdVP7ebePMbycDJRezPpokhtzPoFGNfix43J/skAwXMSfNQGLhZYLtaXx3A2YXnY7+K4W6w5G9V8m+dN9eU5VvvrMV4k1XKFLkrYT/i7zpHK6xZ9NftxG1epXFlZyS2F66DEyLWUGJkYmRiZGLk3YuQ4VmxQbcgFFz+Ey/zUeVyy0fNBcczd9uQ2PBYbvSshcR9tx3JrOweYykcdU3lo0OqY0Y77cpCqrDXeXC1TmdgWNcDmsV0Ci3ZqKweIrJMmfU3k/LpT1l1lV+IEEkEdsnBii/OsV63i5RKR+m3IxXbhLRv6MC8nYSb2MX1jlto62oTt+DMnZtZX9VC71eKH9dJEGGOq/A4cow8DkEuALNesNs4uLsZ5HnmRwfq69s7nY5ywe1CAr8rkgJX5cVJ3emqyd3lBfZu/6/wEL+3LttO8pHl6MBjkBdc6KDEyMZIpMTIxMjFy78XI8XZZe8E13691zaAYTK+gOYHUqiKsPFeItDpkhaeKAF1ZVq9ENdG5IHEOzc6g/XjSmVziY9DlcVU2Tna9Xm9KT2dHtuF4QTTpF8fYLgqMLK8DXz2m86m68Hw4PXnM0JVfE8u6OJ+oOTsf099tUUAL+YKvBr3aQv3UzRsfC53U1908sl6RANgOLCO35+Sg7Wo+7uZe41d9Xqu6Yd/wKQdA6s8xt0zqt/qd51wr1yo786zlDLeg4STK+qr/qA3ZrqyXVjT1u8oTOuhCgMdyrzTuAk3mqWPVbKjgxed5oZJ09SgxMjEyMTIxMjFy38bIDXktvDqOJls3+Q4suF8Ylp3MOQ6Pw8fdlXQQV4ZcUnUVQ666qCNpIlGKcRhgOHnHmJx8WQb3ukkHyFox6poblTdk1LZaWeBjHOw8Z66io/OitlG7uYqlVjBYFwc0bKdSypRd1Nd0DiNh8gJAgVx9NGTjxM92Y3AJfrUgZ75cbQnieWEA0wTC+/01HvWYi1cni7N7jBeLGH5AWW2hY7M9de70eQVXTVYb8sO+LKf6Zdh1nlzDFOdCNp5v5/N8nOOFq5luQaLj1QCJ5Ve5uZLcRU5/tg2PlbQ+SoxMjNQ5S4xMjEyM3DsxspPHzFHmIDWGJiD+B6xe/bureRXaObdLBFoNqd2iVEDQ8Zi/OrCbVHdMr9hdkPBfrtSFkzuwqAUc9w0do/rA8nHyqgFfVGQiicYx/hz8oi3b1FVzNIiVT5xzIK724Gom8+AKg/6eipsjZ0eVXRcBCjoKBsozfNwl8uDRJYuzG59Tn1D5NLEoKLoql1bbne/xnNV0Zxsy71q7kM0tUlgu3qLBY3A/9lWtxOn4IZPKpn6j/bXiqrbjOWf5dN6UXw0k1d5qO9WtFoPO12p+5GzWBbJJdUqMTIxMjEyMVEqM3Pswsos25C2Fqgw7tFOWnaZWveNAZR7MSwFM5XGVFP7MVTxNFjxGLelqRYV5ODlnBWHTNGt+lNJVDNW+tWBXmZhqSVDtGfbRJKv8HQ+WtwZmmvQ0iLVKxTzDPmoPrWR0+abaJPpzZYd5crV2Hr3Zt7hCW7OfiwFeHLlX+tbmUO3l9NO+al8FU7Wr9gEw2a7jwNMtFlw8B7kKu+YRPu/sG31Y5y67O/nURzXpc7J28a/y1hZ2tZzZZUOWQ/txjuN2LkbU5upHDsySZlNi5LQtdiuM3LwFozs/ZPV7KRg1IwAFBUADoKD9X9MApYTewEqvhxI2KgXNuMOoGWGhN/4x1nGnBs2Y2Srf+P8IYxlLO2qDBqX0gKbBcDx2KT0M0dBcNhjxHEzaFYzGsqMUjEbD9vtYr16vh7EXTLQbtUqhCduN+67OR9uuHWuEXm9hYode6WHUtCOWHvlBM57DgqljMexwPL1jSdH0CkajBs3CAgbDAZpm+nkxlqMAWPjmZ4FLL0iMRGLk7oaRXTi5Ib/DFQNpMqsN7pyc2/NtSJ005ekcS0HCkTpIl8zOmVRnBywKUG4snXQdLxbprk8tqHQ8/l5LIC7IuS9/V5BW23TJoQHiAoPnWROqs7kDdubl7OR8SS+Oavp36aXn1VfcbWkX0CGPVhRrCxzlMSuJqy1q9nPJvDbfXfEW55Vc0lUZHU8XD04m5tGlh4sL9QX2N54HVxlj33U6zSIFFaerxpnmHOWheZf51MCrZq+k9VNi5O6Hkc3+B2Pw7JMnfGdvLJqP+L7IRvHcF2hpznb7vey3sDC+4EqMTIzcUzByQy64OPGFcHzOgQCw9lWl0Y75uCTOARN83a1fHocX0M7InNR5q0Cc14sMt+h3xtardLaJ2in4BH++/VqbZOYdb4RxCeXQ/nXxywf/plncr1a3VmUJvXU+J/W+sR1Xq35tO0yOTapRExvHxVpbqWqPh31i7tryl/Jctdlo0m/V9g3YNGVcQWu4nDbhjSmZV+dr9RwQcx1ztFY/ljWqlmsT9aod1vo9H1u1actj1e6lAKdd+R7sGF6xZmGlYOCO87FVe63+8F8twTDPWoJi/lFR5PitJXhNfExdY3LFkm3hdHekSZjl0HZd9mT9ZiXoLnLgqfLMqlx2ya/nVQ/NodxG5dE7KUnrp8TI3RQjkUWEPZF647lk30yMTIxkvj8rjOyiDb3gUmFrTh3n9Wq+aVa3bCmgqDMrP26rdxHinMrqts1xEme5mR8nfr0TorJoYLpJCVvEazNduxqI8as2nb2j3+GbjsXdDn/6mrGTdm/62pUfwg5cMQUos3yLq9cRU/1+f/IAqv6gZXyOc3EMWJsUu8aL2FEf5j4hv9u3zv7qdHTjOxCs3Z1kmygPpVqCrQGc5ild3MY51TVyTW2hXNPD5VnXvjYPLJf6gi5gmE/Xoj2pTomRuytGAth6MfCelwPXOxF48DPWjI1PvAs482vAQ58FHH3c2vM/DbrkB8Dfvxo4/heBX3vCz0aGLrr0AuB9r2o/3/i2wL0e1X7+3AeAb36u/fyQPwCOOb7OY3kn8NbntJ+vdQzwiD8Gzvk28NG3tcdud1/gdvcDUM+FiZGJkWqHnzZGOlsFbdiWQp3YEMwpoM7F59UQ8bnmoHplGcaIyVYAckZn+bsuXtSQrjLndNRzTDp5wbPLLmzr+Kc/YMltZ8mQtPuTbpsB1sZIfOZErfPe7/enYkNjUfcza1x0gQv/PkxXHnB5QReBAWxdz/A5OVVGF5ddCbEGHjq+8ta7QGqv9eSDePMXg3PMv/Z1QBb/uE+XzjXZQo+uvknzU2LkboiRALDtCuDDbwZ+6a7+guu6JwD9RWDLgVUZr3G64tJWxjs9cPe74Lr8YuAvnghccDZw+/sB735xeyW7eQvw1ycBt7wb8D+nAef/L/Cck4FrH7uWx3AAvOqxwJc/Djz2Je0F8HAFOOGWwJlfB65zQ+D0/wSuuAw45vjJRRIwXURIjEyM3J0xckMuuFSYGohw0ox+cazmpBow3NYZFvDbMPgWq9v6oJUz5uHkCz31uSLWkz+7lzuo/g6c+DiPWbOfOhWfO2/7V/HpS96Emx98X9z2sIdV53N3pI/98BW4eOksPPS6r8GB/SMAtPq/+/uPR68s4DHXP3kGh5Y+d+nbcPZVXwIA/MZ1XoHDNl1vbhm+v/3r+NdL3jB17C5HPBHfu+rLOH/HN6eOP+rYt2Kxt5/l8+Pl83HKRS8EAJxwwB1x5yOeiK9vPQVfv+IUAMA9r/0HOHbLLcat1748A5ie5/CtxcXFqXFi3muvQOaEpVU358M18Io23DbaaIJzxOfdA6xKHC9cYVKZ2AZqO6ZawtQYD4BzC1mdH9abZVH7KPBrBTXmRCuy3I77O90dIDNwx3HNG24LWABU0vooMXL3xMhej2Loe98C/vjXgFvdA3jIM4FP/g3w2fevnr/xbYB3vhC4+PwpPfEnHwFe8MDpY8ccDzz9je3nr/4r8IHXt58f+GTg3O8A3/z8dPuT3gcccDDQNMDzH9Ae27w/8JJ/Ai67EHjj+ELwf05rZYwLr4/8FXDaqe25J/058OE3AT/6vsj3z8ALfr39fOAhwJNeA7zmCcBxNwN+79XA1z4NvP91q+0f9uz24nNe2rENOO3jwC/8CnCLu7a8vvsV4IBDgPO/CzzqpNZmp50KbNvqL7hGI+Df/hE49EjgTg8C3vIs4L8+1V5w/fBc4CeXA6d/GbjP44Bb3g09uQOVGDlNiZE/O4yszSGwQa+FV8HZcfW7S35dPGc5IfNyfGN8vrXJ/Vx7lplBJ4jbsKwKKDw+Tybz575qKz0e/bXCx8liNBphMBgYQAV+MrgEZ2z7DC5eOrNq82uC3nneY/CWcx6Mt5zzYPzVOQ+5WjzO3f4VnLHtM1gZ7Zw6fvq2T+OMbf82N5+bHnQPbB9uxRnbPoOl0bZ1yRD229Tbgrsd8VTc7Yin4qjNP4/zd3wTZ2z7DG5xyANwwc5v44xtn8Go8YvSK1d+hPec/0RsXbkQNzzgdvi3S/8K/37ZX+PS5XNwxKbjMGwG2Da4FB++8AW4bPm8qp/qD2bGZ1e5U+Bh/+U24VvMNxI283PjNk37Y6LxC/XRl2Xh2HMxxfJz3NcWdRqrtVyh8aQ61M5p3HHs8t8AYgYS5sd8XK6q2ZQXqmwb5qF5iPvwlg2XD/XHGl1e5O95obVrlBi5G2Jkj+Q+6vrAze8IvO9PgVP+ErjFXYDffh6wvKO9WPjJVuD+TwC++1/t94c8E/jPfwGeedd2u+FppwLf+RJwz0cBH38n8MantxdWr3tKe3fs4GsBb3oGsPmA9o7VaacCd394e3HynHsBSzuAP7hre5fnoc8C/v1D7YXSgYcB931cK+P1TmxlutWvAqee3N4JutGtW34vezjwy7/a8jvt1NV/z7zLqnxf+VdgaftY1i8C3/oC8Lont98PP7rlfdxN27Fe+yTg6b8CXHZR1f8AAEccAzz3XcDZ/w2843mrx+/3eODOv9nK+J0vdvOo0c3vBLzoH4Bb36O9oLv3Y4Ajrwc0zUw/TYxMjNzdMHJDfviYBWGH4TZxbmFhoXqVyxW0MErtitRdBeu4msBre8lZRj0f8unVLlfUog+Pzwmfz9V+BFKdJmzEt0v5Kl515MpEv99fA3irL6YAvrr1A/jfbZ/H3Y98Gi7Y8S18d9vnAACPuf478KELn4+fDC4BADzz5z6O15/d7pne3Nsfj73+O/G2cx+Oa2/+OdzliCfh/Rc8Bzc76F6491HPBgB8+pI34ZtXfBQA8H+v9wZcZ7+b4ORzH4kzr/oPPPWGH8ZibzNed/a98dqz7jWlw5Gbj8ejjn0rAOA7V34Kn7z4NQCAux/5VPzSIQ/Ehy58AS7aeToA4OTzHoWFsoinHf8RvOWcBwMAhs0KXnvWvbD/wqF40g3/CRcvnY33nv9kAMBND7on7nNUuy/8M5f8Jb5xxUdw+YpUKNdJZ131RVy8dDbudsRTcMMDboMHXeflWBo9F0dsOh6fuvgvOvuuNEu4cOd3cNz+t8ZRm0/EVcPL8OPl72P/hUPxzSs/hpXRTpxy0Qtx/6NegIP7R7ULhZW1CxWtIrmEy3HBPqcLpMXFxaltDu5ZCfZ/jUutLrpkpokzZHEy86JznlxQi38dS3/gkL936ccxFhWzsBef5/jUBR/Ly8CkdwyYD+vn4pzni8dxVTfVpYu0TegSD5EnrY8SI3djjOQF0gGHANe/SbtF7qJz2ue1jj4OOOTI1TY//8vA4ub2883uAJQCfPuLwM3u2B5b3ATc6FbAzqvaO2a3vDtwwZnAHR/QbrG78HvAwYcDBx3Wtj/xli2/078MNKOWF0p74TcattvoNm9p2wHtRdsv/Er7+Q77t3enPvHO9g7Qjm3AsTdq+b3hc8Bz7gO8+QvA792m5efo+F8E7vUY4J0nAV/6WDveE/8UuP392ztdS9vbMbto037A3R7W2uYbnwXe9Pvt8aNuADz9DcCVlwFveBrw3//ezcfRwYe3un32/a09zvgK8IRXrokXXZwnRk7zTIz86WHk8vJytd+GvhZeAcIl2zjPf+OzAgmAyV5N5cHj8PHoG79zEHtcI8G6LTEOADkoVO4gd/uaPztHikULX/G7W7khE1+x6/7UOBeBz3zVzqzzzQ6+NxawiFMuOgn3P+oF+P6Ob+Cc7V/GYLSES5bPxtaVC/GsEz6NPz/rbvjx8vfx7BM+g9ecdXecfN4j8cDrvBQnn/dIDJsV3PHwR+NjP3oF+r3N6JfN+Oylb8G9r/0cfPvKf8HJ5z0KTz7u/bh4+Ww0GOK9P3gyCgqed+IXMGwG+POz7or9Fw7Do459K04+75F47/lPwW0Pezg+cOEf4aYH3ROLvS045aIXYlPZH/e89jNx/o6v44Kd38ZDrvtnOKR/NBbLZjz62LfjVWfeCT308djr/zUKerh8+QK8/dyH4+D+0bjd4f8XH/3hy9DvbcamsgX/dulf4l7Xfha+c+Wn8L3tX5rY4w1n3w/bh1vxRyd+Hgtldkjc9KB7YnPvAHzkhy/CYm8Lbn7wvQG0F4NXrFyEZxx/Kjb1tszkw/Qr13o8bnXoQ/DRH74UNznonrjJQffAYm8z0Ey/4YgXMf1+fzL3OuccF+ET6pPsX7qFKfaxa3WHxwjih9JjsbiysjKJP/2BUbfY5MUUj6FjcVsAU0BTi9VYqGr+cC8fUFJQ4ZzDCz4lXhjGZwZ/AJP5i2PMz+VKl3/cnDp53WJZt6QweHO7kMftj0+anxIjd1+M3KPocx9YfbnElZcBj3858O8fXn05BQDc4KZArwfc8BfQ+SLGAw4GfvMZwD1+G3j/a9vnxF71OODF/9g+ezUPXfx94Pfv0m65vMcjV4+//3XAh94IPPV1wP4Hr1tNAO0F5Zc+2t5ZvOqK9nm7bVunsI0vFBIjEyOZfhYY2UUb8sPH+lkTYm3iQyG92lSQYCVd0ARvHjM+hwHCcRgkumTlcXgSA5xiEtVB2QlisvU4B3nIxlUBriKojC4I1XYMLqsyrLbZr3cQFsoitg+3YrG3ZfpZozH7wxavh8uXf4AGDd5yzm+iQYMrBz/CIf3rYNisYMfwShzYPwLLo+3YPtyKe137WfjR0nfxyYtfg0GzhGGzgmGzgt8//uNoMMSf/u8dsdLsxJu/9yA8/+dPa22MhQm/nwwuwcpoB64a/hj7LRyETb39sX24FUvNdhzUPxILZRMA4ND+dSbPXR22eL2JXQ7f1O4Lv2z5PFw5+BG2DS7DqT/8EyyPtmPHcCuGvWUsjbbhwIVrrXm26ndv8F40GKGH6f2+Sjc68K540Y2+hsWyBZ+77K3YPtyK5dFVk/NXrPwQIwxx6OIxiFe7z0tbFg7B5y59G7677bM4+6ov4RM/ehWedNw/YTgaYjAcTPlSfI4kzT8QyQkiti70ej0MBoM1Y7LPa+IDun/8OvqurKxM+Sdve+ALfbdQ50TqkljIpnK5GArSuOO2utiL3MNtuW/YQCv17k4A8+Z9/5rE2Q662FT+XQtwBgR3IaS+EH5Sy1kOEEOW2iIiaT5KjNy9MbJK7/0T4INvAK66sv3+3PsAbzlt9fzDj2tf9vChH64eu/xi4Mm3A254c+DlH26fw3r6G4C3Pw9AAZ74KuD//Abwqfe07Z9x5/a5pr85vW37gQuA3zgK+K1j222I7/p22+64mwGvOAV46UOBr38G+NXfBvY7oL1IeueL2jf8dVF/E/B3ZwOPPAF40m3bY//zZeA3rr3aZscYyx730vaO2PPu125PfNtX/XNXQde6LvBH7wSec2/gvz4J3OW32ovA0mufV/vTRwPLS8ArPwrc4CZtn8fcpL1YfP8F7QtJ+ovA+84GHnEC8OTbtO1e+VFgv/2BG90G+Jd3Af/0GmDUALf61Smfi21fiZGJkTpO2O6niZFdOWXD7nDxFbxOWhiKJ0ABhp0wyAmvTsLn1UDqBAoUzoDMgysmPL4zrgKf+64O4fTgW9l6OzXaRGUlEgQHXySWcLJwosXF+lQ/+ti3463n/NbYAMAfn/hFLJb9xl8LnnviFyafY7uh0r9f+g58beuH8ZvH/Cm+ccVHcOZVbZ+/OvcheMpxH8CLbvQ1vPiMX8BVw8uqcsxLr/zfO2DHcCtecuNvTY4tDbfhFf97m4mNjt1yC/zODf4WANAvi/j0pW+q8jugf/hc45511Rfw7Z98Eg855tVrzr3j3N/GxUtn4g9/7lM4YGE+fkyfueTNOKB/OG520L3wCwffH5+/7B0YYdC+nnZU1vgFgMlrbJeXl6eSmPodsOpznOgiZsPPouIW7XkBx3x4CwdvM6slPCUGLycfML19gduH/vFdtzmEXpogo1LGuUjfcuUWa24bBs+D5gcFRT3PvLvAheeQyQEjz6kuLFUHzsMOcFkOzie6sE1aPyVG7n4YiV4POObngFOvAHr9duF/6hXtBQoK8GD5KZX9Dlj9/M5vA/ttAbYcBKyMtxEdegTw3jOBsgBsGbf99ScD93ls+3lxv3bbYdCffQK4/o1bHqUAh167Hb+VFtj/oPZjf7Hd5nfKGH9Dvse+eK187/5O+/eDFwELfeCjl7e8r3ND4GNbMZM2bWnHe8l4G99+M97OuLAA3OLOwD9fOv6+2G6DBIDffzPw1Ne2nzfv38oDAH/1n2i3b4y/lwIcTfKx/RY3tzaM59g2bUHvy+2jC4xHiZGJkXH8Z4mRtQszYIO3FHKyC+VYEFbS9Y9zrLwCEPcN/uyUfFwrB6PRaHL7VoEn5OWr99hiUQMQ1YlBhInlZP23bNmCnTt3rgkGtQf/DVlZNrW/VhdK6b7Vuam3BaX08Lqz74MRBtjcOwClFLzipt/F8//nBLz0jPaNeQf2j8TvHfcPlsegWcYIA3zwwudhhFaH4Pey794KBQUjDPHKm5w16bNteClec9bdcZ3NN8ETj/t79NDDQ455NT544R8DAH796BfjFge3b2x68g3fj9edfR+8+sw7Y4QBXnLjb6GHBfzJTc7EC04/ES854xex38IheMGNv4yLdp6ON33v1ydy3+laj8f9jnoetg+24h8v+IOJfK8/+3547omfx5vPeRC2Dy7HK256BhbK9FuMmIbNAP91+T/gq5d/ACMM8YCjX4RfOuRBAIDl0Q40GGHT2HYxZ88//UQsYAGvuOl3AQCHLx6LZxx/Kt74vV/De77/BNzykAfhAUe/aDLGD3Z8C+/7wdMmMoZf8nMdTdNgcXFxKpHGbfaIBY4JnntevHGM8ZaLIAYm3grF/3gh45KVttW447EUQHSRX6vU8TldmLF94jwnXU3MHFNsbye7q5BrrOs4zmY18OW4rQESV+NqeSfaco5R2zu9NLfkxdauUWLk7omR6PWmt7z1CQM2bZ5W4ndvsfqWwocdC3ziKqAZAfcfXxjFHa6/OX2aH/N85aPaO0EA8LQ7tXeeePza9ruF/tpzKh+w+vr6uFiLPqWsb2sfX1zOot6C571pv/afUsjG1CXf4qapC9VR06CH9tmqeAFKYmRipNLuhpHFJcb1UK/XaxYXF9c4gzqCjqMKxLEwoE5WGIsNoUZxWxyUd5zT3y/QW8Msp/IAVrdf8AWNViNcNTNuXUfw6g/D6bjOls5uanvWtdfr4Qabb4PHHfO3aNCgjLcXtp8BoODN5zwQ5+/4Bv74xC/i0MXrrtqFXrYRbbmf8ptFBQXDZhnPP/1EHLhwBE660X+hoCC24DVNM+HTHmenH01GiHPT7YFSelPHam35XEiu4ynV+LJsymPUjCZyOT5rz03r+KYL740rRxdO/e4E0O2j6ncTWeRCnBcqEQ9xDph+7qFpmonfBi9evHAshAzcJvjqgomTtfbh/ffuBxSZmG+tOlXrp23dcW3P53XBCWBq+5Paura4dnnGAZkuaGdV43Qc1mXWwp75xbjD4fCrTdPcek3HJEuJkbsxRh52FHb+3Tl+4hxp9TrG0zgyi79VwUeYgqFSVvkkzUdqwz2VrrwMmx++ul0zMXLPx8jBYICmaWxAb8gdLq2WsUBd+x6jAsXt2Ai8yIxJi0qGu8rUapaOz47Atzp1suO4244Qi1Ze/GoyZz7qBMyHZQj9VFbuF/bl3xbQcxpUq3KM97vSs1wFBX//g2fgm1d8DADwrBM+veZ3qXrmWSTl4T7XaNSMcNLpN0EPfWwfbsVfnHUPPOfEf5vSu8anlN6aM659jUed9zySr182oGK/dfBZ6PXRG/XRW2gr4gsaHkeJAAAgAElEQVRjvxwOGywu9jEaNUDTYEQJpDQ9lF4PiEvASaJufWQ0atCMGvTGft9fWECvVzBsRgBWk+NCWUQzaoGtaYB+b3HMEWiaAkzGHD8TsbDQDtk0aJqCXulj1AzRlOnKX9hAP2vi5WdLgjimFIh4sajgEOPwxWHw4HY1sNHxam24LQOY5gO+cNYLXL74Zbu5cWq6cF7R8bm9q9SpXtyXgT1pfkqM3E0xcrVR+zzWRtEoYyRpBvUXAbnISYzc8zHSyTBp767e1kO9Xq+J7RE6kFbMu4y7RjCTTGvnx3KseX0lt3MVd+alV8QqL9+K1vEVBHhSuZ27vaqTVEqZupPg5InP6iwB0M5xj9vvtvid6/3dGp5JSdckXb7yA7z2vLsBmE7YHFvAajWcF3i8BcQlO/b9OMdg4eINmL4jx3cKavnKLXSDNDHzoq5WVeP2mndUHx1L82fYi8cJXVinOM/ta7y5H8sW/ZaXl/MO1zooMXL3xUgcfjSW3ncuyplfx+LTbr9GJ7fQ0gWs3o1wtuF+yi+OxSKSeehruZk357m4+NV4jZdC6AI1eEcu1O1d0Z8vYPl5Pd56yrrpRXYQj8O+EudcrnT5i2XlO6hLS0uT5/aiLb8oQ/NZLNrZFmE/jgN3R1rjINq7nKmfJz6w0MfyqduAKy7Fpoded4o3z29i5J6HkSsrKxiNRtfcHS43oa7CxUqpQ9WSWm087gNMb51wSU8rAMzHBSP3Z6dkcOGJcXI7vVhOTprsIC75Kc/g66oRi4uLWFlZmUoYQYNmCcuj7dauuxWV8Wa/qVwQ91emmvE3+l7iv9Xv003HmwnXnJmihv7XTP2dnKgpsHb84sbiI+Fj43FWv4zncA6JzRhNs2qnKTYswpROMy0yBxXsv3DoRHb1ZZcP3MKK+6wZgRaKypurZ3GeF2m8QFAZHY+a/Jq/eDE3qxqv3912T13QMWjzP2D6N1AcELm8WQMXHp9Bxr3JK2k2JUbuvhipdgv9Qv6avMPhcOo5IZZFt2MGTzcev8BA7avxGfL0+/3JojsuuFhmzRXBs5SCxcXFyfNO/X5/wot58sslOEeurKwAwOQHfFdWVjAYDLC8vDxZTMdFAPuFysdvsox2w+FwIkP01/wcP0YbPMJ2mzZtmvTheeMLRpaJ50ILESx3jMFzqfPYlVuVJrrSMdaX2yVGYs33PQEj3bwHbdgFF1dAWGFWnBOfJtda4mSFaoq4ZDyrLRM7GBs39AGmfw9AwVDH5L2pqpNuz3D2CGJZauAaW04UnI3mAIBvbfsYTrnk+VMOzzKxbKEHV1YcYDCQqtxaQePAcM8NxecAsljghazxA4ScGIPXpk2bsHnzZpTS/kbGQm9h8paiABiVm+dO3/7D4LG8vIylpaUJuMRx3tLDcsWYDGwLCwuTKpz7NxwOJ0C2vLyMlZUV7Ny5c/Jd7acVGrYJtxkMBuj3+1N+pOAe8mui4qQavsD+6RJT0zRYLFvwwuO/OcVfk5nmBfZd1kkTvRtbfVHtwzyUH78gQBd/DDh63MWP5j2l8BcFfNbFATrbl3XgMXXR62TmuYqxVO8awPBb9pLWR4mRewJGri7IOJ40xvliJOTTBZ9uHdW50/78j/NVF0bGxUm7xXx1vLj46PV62LRpUzdGLux9GMny84UerxcUI+Nij/2oaZrJ+oNfcqMx6mKC51lzKzD9qAHbNzFyz8fIWm4BNuCCqwYeYWgmrdQA03uttb2SGkwdWhd/0UblVaMqH/0BOA48DQanNxs9nFm3W7BsOvE8hgNFHieSLo+lWxDasVfH4uSswBfVnkhUS0tLUwGti4MgrWgxMLFsrJvan/tHIg9aXFycSqD8PT4zeETSjvZxXudPQVUvECPwt2zZMmUHlY9jgG+F6yJKq3y1BBd/R6MRlpaWMBwOsX37diwvL0/+BvgERaKKbSQ8L/y6V5Y5+rE9osqlccH99MJPAdXNMY/JsrGNeMGjsrGN2b8V1LhtJMaQt1YJY9m02qjtVS+OR/Z/tZlWVpUHLyr0Fb2cU2uA6mwfxFVcBUQH9HwMmN7alLQ+Sozc/TESZXostnNiZGLkhmNkvx5ziZF7L0ZuyB2uWuXTJc4QtNYuzjuniLG4nyYGngBO7OxQ8yT0OOZAiSfDOaXTNQKEZeC+PFmzEi4n3rAJO5G+0a49tnZLiDoSML2fugbuIYMDdpYlkibbPuaIq4SsE9svgi+qXwEc8TmOl7Ja7QsgCYpjzFtBneUfjVb3+OtFBPPk71wdGg6HUxVGF9g8NwrQmixDZwDYf//9J2MNh0MsLS1haWkJKysrWFpamlT6xnuIJzbnZwMCVNW3dI7YV9yibVYibQRoa37kFijsd5pX2H+Vt1tY6qJH5135cuzrIkrb8xwrKOnCQXOC5h0G7/AjHZf5ORuqHjqGA0Q3t2qHmN+maaZiK2l+SozcPTFykhka2DyfGLlWp8TIjcFIpsTIvQcju2iX0VOTrEu+DAiqpBpaqzxqCG7nJtglLpVXx3Tn1MF5DNeP5VEncoClzs5bMWoO6GSNz8oz/q5W4kp0tE7FfOM8V3HUiYHp/bBOr5gPTTIuSHWu+/0+Nm3aNKmeRVKNY7zvnAEl+DNouECu+WGASOjG1RIXyFxt4kTdBZacMLh66mwU4wwGg4ktgvcBBxwwORdbKnbu3ImdO3dix44dky0eDgyB6UVIzHU8l6BJXRd1bmHC/oOJzf3zHQ6Ywl/Yj1ycq++7XDFrIaRVWpWlloj/P3vvsiNJsp3rLXePe0Teqqq7du9N8uiQ4pxDjvQKAgFqwAGHegUO+BYEBBAgwIHAZ9ADUCOBU+FMCBAiBXV37a7q6spL3C+uQeRn8fufFlHV3VkHWUk3IJERHu52WZf/N1++zFxt14E8l/6jdWk/cxjEtT6x4nydFOlvOvnIjd1l65ioMsj1SY+1a7h+fmk58mlzpMtL/bzlyJYjPwtH2jrpliOfD0eeKo8SrswBKcWdL1fcYd04tBwDWB+0G4U/JtVHte70PhZXIPUruGj9px5LqjGq/I7JJ0fObsSeO5+TWXrXU32IaDq4KcAdk6XrJReNVNlqPX5c26dNonO9Xq+RW64LfclF55qPkYiSfc5xOF/f/ULKQQ4UVC4alVUyOUb0+lmdPxel4lpd5JmLjtZ1nUi13+/HZDKJzWbTIBYifXVdN+yTiYza03q9ToSiOqZf2n5u4tK0wQNwnloL6JMcJRUnIm0jR9I5n4Eo/VyfuOX8h+Ik6ITiBOjE4sTEuRRPhXDs+RjGfczmVD56jk/2fJzeXlt+fmk58olyJNcUzUloy5FNXbYc+fk4UuXQcuSXz5GnyqPuUpgDqIjIGp8Kk0dxHgXQ81xppyYCDhpacoaswtPioKtOf6wPx5R2iij8dzXSY1EIigJ0rt4D2R0AJAeKWtxJOOb9VRB14jxGVj5pIPIGOXS73UQomm8OuQD2usBX/0Mc/Gm6DH1RYtF+93q9xnH0rX3NLYrMAb/agJITv/nvHtH0SCEpHDnAVR1CbshyMBjEcDiM+XyeInqr1aqxGYnK8GPkmSs52y6PyEjH7HXmQNDtSmWXq5//x6J+fq7/7rrxcsy2fbKau8bHlptU5tpm0pKrE33rZJLobo4QaEeL+3COGE/JpC0fLy1HPlGOTP/z70jL9cPbbzmy5chjdqK20Dzw8LeWI58/Rz7aDZcDjxZVMN+1o16OGZMe0wG6cNRg9frcoz8nOCe+HDE4OPrOTD5OJ5VjY/7YecfAfLezd4uY3Bt1xHFi0/P10a2OR2Wv8tV2HYw5hz7qOZCEpkHoZ6J2mhKhkxR1Av/OMZcXn91u1JmJGiq4RzwkC60bmSiwIAOiYQr6p+Tm7areSdGApPQxvV5blmWDjCGV1WoV8/k8bfHrkxZ/2arK16O12q8GeJXHiciBi3IsHYXPepyJgutCz3V/PkU0uZLrp9qR9vEU2Kp95IhC/6sN5rBAdavkqv6rvqvlY7L1/nkf9H9bfl5pOfJpcmSuHLPxliNbjqT8Wo7MBQlajnz+HPkoN1wOYjnQ9Q6p4ZwiilPFDZo2vS3tnwosd9fqY8iNw4+rcbqh+mfPDXVQ0j4fcyQHsYimI7oxNGRZR6MNb1cjXnzXOnJyoSiIHCNejbxVVRX9fj8RBlE7SAQg1Oicg7mO0fvlDq/99MiX1wFgq1y0HNOJ1qmy02t8kqM6izjsduP648WMTl51fcipVzlznP+9Xi8Gg0GsVqsYjUaxXC5jNpvFer1uRPQ8Su3jpF0H9LzPPtRPjuyPEafrSM91u8yBYU5XuQ0MTmGOT1xzuKW6PoaFei4y1vPdZ/iskwwvuTSp3KTSo3C0lyNh92mV76eQcFselpYjnyZHuqz8e8uRzfNbjnx8jnQecNm1HPllceSp8mg3XA6A6vR+A6DXeXEDpTjQupJcWKrY3HHtzzHwVVDKGam2z/+cQ+h1Tmp6nrfjgORjz6WfuLz5firtAjnoY9acc6rj62+5R/0+XsAHslDC0FQJXwSseteFt0QtFfBVrm4jXJMD+BxQ+DiV0BhfLl0B4I6IRqpGzsaOAayPQ9vTseRSYPQ6rnXg7fV6SZ7dbje9z4TIntoQxOb1RjR3njo2Fh0V/VA7OvY4ni2FtU2uU7JzQHY5q/05JrhN5HTiPu/Y5P1z28udq4SvW2nruTmZMHaVpY9RiVbbVBtSLMi14b9xzTGybcvHS8uRT5cjIyLqeMh73seWI1uOfCyOzMmF81uO/LI58lR59DVcqvQccHqHVBmqADUcV6rfQasAdXtPPy9HOO4g7pR6Hcanu+v4mPS4//dxn1JizsCdaI7JnH52u92DQRSkDTT75X30yA2pGK4L7Zv3j+uRXa/XS3nndV1Hr9dLx5RUNFrnoMNxBzkf9zEdYlPUredwXMGN1A61k5z+KDmHo29OLhS1Uy3Imd+YYCkQaF+5xo85yHrEXPP/Na99vV6nrXNVXnqtTxjclhmlTmRypJoDWtehyzbnT1pHTgdat8s7d33Od3MY4d/dbvV3x6pj23fnIoTHMMDrPjZOT6PhGNfmsEjH0pZfV1qOfJoc2YGr5Em882LLkS1Hfg6O9LG0HPn8OfLRdinUu8NTIK4ddSXUdTOdQB2c87RoG2qMgL2epwCHM+SM0vuo/+mTlqMTzowRHHsMrd+VDDA8leMx8sj1tQH45BJGUx+qt1xkwoFQ+6Ggoo6k57ErULfbTd+J0mkeuqZQ5ACc3zRipOeqHFVHOX155IdjvnhYx6fpCNi424Be5/3wF206Qao+1DacRHKRUb4rWNGW60jBTMle5dDpdB68xDSXquEyzU0K3Z70v/uoFnz4WMoQRUn0GAG4DTg+OQmdIiva9PqdHPw61YPLxD+fkqPK2sepdvepOsnVm9PLMZm05dNKy5FPmyMpehPRcmTLkf89ONI5QP+3HPm8OPLkDVdRFH8eEf9PXde/P3VeDgi9c24s0saDY25IWr9fq8LiGgAQ8HbA83o45xTROGnk6srdtbsTqCHkDF3r1Lts748/stdI1m63i36/n2SxdziurI8SgKd6AHZsg4uzKMDm9FcURYrO8R/QIlJENI86PErkRKJywT5YHOwyhWiQSQ7QtD2OscMTstGJUVEUDxZdc463rfZEhE51cQrAqE+BCTsG0PWY2izAeszfcv6j+tJ+R0R6OaXv1pTTt6d07I6072TkMlRQzo0jB8w5kFfC+hiQe6Sf4pHVHMj6pML7quPMEcCpiWhuHCoz+pg730nWdevXOK6d6l9bDqXlyIdtfWkcGXF4Eq/4qvJoObLlyMfmSP3N8aHlyHhw7Dlw5Mkbrrqu/6+P1vDwmsaA3Hh0IA6AgACO50bobThIuECoB3LhHRIR+S0+KQhe+3TszpxrXalusDkgdyLR67Ru3REn13YOlHa7XWOL1qp66BQ5IlHAjoj0yFzfbUGf9M3a1KVpD5qH3u/3E3Gw6xKypT3VRc6wtV86cVBAyJEQn9XB9BzqA8jUJnUXJj1Px5xz0rIsY71eJxl5REzbUsJTMqzrfdRM3/viKSIqA77nfIU+MQ59oSQkRYR0u91Gp9NJaRMaMdVyrL3dbheVnOo26hMwlaOSsPuQk0bOx7yPGpV2+8pFBrXPbnvepgO9PzXQtrwd/6xY6e34NTlZUoeTIvX6C1p11y6XlxLlsYlJW/al5cgvmyMrGSNj8uh+y5EtRz4+Rz7E6JYjnz9HPkpKoXfKwV4d2ElFwUk7nnsng16j7TqYKwlomxqNUcG78DkPg/Y7/9w1OYPMKeEY2fo41AldXsdkrwaNAR0iEA8fYatcdAEmQL9arR5EViiaPgEYKJlozjmRoara77ikRK6Lf7V/rrecTLVv7sjr9TpNJOq6TgBJXdibg3G3220Atr/p3nWY65/Kliiq6g7bol7qpv7d7uFiX9r0SRB6c5moPLQuZKF1dDqdWK1WyVb43u12GxMw7SPFU3A8quwy+hRfVvLzCcuxSZXbs8oyFyGP2E9MNCKq1+aIyfXghKTR0xzeUb+TsOpVsYuJhOOUk5j2w/FAdaa61/NP4Wyu/235+aXlyKfLkblrW45sOVLreGyOrKuHONpy5KH+L5kjT5VH3RY+51QUBc6yPLzkzo3DBwbQeO60DlqVzG8azVFhOkBxzA3nmJBpU4nKgVYjOjllHCMTNQ5AVknVr9e3kisBQQaNu/kkt9NGo4CrIKsGrUBIe5pjrjspkRah7wpBRvqnUTJ1VgUWZKT24GBGVKLb7SYZqdw4j+M6RpXDsQmHk787s+qn3+837EnfscLiaK/bb1hU5gpCbre5CZ3aFHUqYSppKsmiE3ZjWi6XsdvtEvCqDx3zce+LT7hUhg6S/rvK5tiESgH52PWOHUoEOTyhqC7UXryo37uO1E7cpnUcub7nMMrb/xjW5Pqi4z1W58fqb8unlZYjnyZHuo5o0+XXcmTLkY/JkYoD+ELLkc+fIx/lhssFrM6e66jeUeciaJzvURPq00HqnbY6ot71H2s7Z8R6XBXoxOdj4PxchN/byZGIy6Cu60b0TeuHJNXBvA8cS3KwMQLgSliASFEUjZxkNSQlrLquG9E6ctGJ0rEYuCiKlKMeEY0UFsbhawrULlQHRDWoExkoqeSitNoe7eiEgJQUz432SC51uK58MqG/6XfSEZyAvF4nDvrm19NPX3CsEzfAXHeA0kmHy4/ftX/L5TLJ0SNe9Fcjhjmf1clCbpz0X2Wt9ausdUKFzareTgGv44LjAzo75ctKHHqtjjdXcuSSu0btxrHBMTV3Lb+pvdBvxY3cRDY3eeWajxFKW/Kl5cinyZG5CazqqOXIliM/F0f6uFqOPFz/JXPkqfKrb7jUqCkfIxIE5WCnQtDOq/C1Xb1OB+x3+doPFKSGUhQPd09xAaqTaR+8HXVm71+OpBpPoTLt6m/0lWM5J9KIWFNHjK8ZadWIDOC8XC4bzs1/3thO/yAQUiK63W4MBoNEJBq16/V6aXwOOiobBfhjUUQiX/QLUqRoZO6YY0MegKrK1wErF1Fz8nCZKjkyZp/4aG49RY+pXNwOkAckoaSh/VJZaOqDRkt9IqITBfqJ/rbbbXS73Viv142JjBJ3RESn2su0iAOhoetc8cmKHnOQ03MpGpU+1oaCKN9zdu2ReR+b44USmN+caL0+6UNH+t1loLr0sTgeogedNHjkVNvVlCcdQ843c8Tblk8rLUc+XY6s6WvdbKflyJYjPydHKopyfcuRz4MjT5VffcOVU1yORBCkKt076ODv9SkRcdwNC8f1yJf3SwV3ysAVVL2/ubt3JwMfm5/r5OOE6gaTM24HPyUHf2kc8tG7+LI8bHuq9er1OkEAwPWv0+nEcDiMwWDwYBEw5+ujeMBK68bwc7LGdnACJVYe/6scFGzdXhg30St19qrav2PDScgnnDnHy0WPtS/q6Or4bktaj0e21D6UlP036qYPRP5yPsa1uviXokRIm25TastEVrdxHwWNOoH1p0yYchNBl6vqQa/RP5Wjt6HlVHsuB4qPw/WsWKS651onnVxbPpFw4sxNenyi6BM07b9jqY8xV98xnGzL6dJy5NPlSDgo12bLkS1HqmwfkyMjmnJrOfL5cOSpzYMe7cXHDqA6QH73R5t6vjuFD0h/yzkyhbtvvcE41WcFkhx4Uyeg5OCk9TmB0GfqyBk0oOp51Hq9GqKDgUcjOKYGc19ro34eowLoGsWr6zqBKp89t5o/3h+iC4D1mC4GZiwOUEp67ngu37Is0/k5MKW+HOCqPlTv9G+326Ux0747q+tHz9Pf3KE1UshCTwUEd2qfJClI+USM4wpiuQiQyt0nU+qbudx9CMEjdvzG+bmIJHXmMCEnW/VL1S//fWy5cSoIEuHUcx1XXMauk9ykU7GAyQlYQV1aT25C6XW7j2u/j5GT9jEnB7dbZOiT1twYta/qV235eaXlyCfKkavVfYXNoGTLkS1HflaOlLHknrq0HNnU25fEkafKo22aoYNVZ/AohQ5AFcExNTB3CgcXbUfPcUGduvaYMWu/lQD9XAcel4v3wxVc13XaVjY3LpWNbvOukZmI5uN/ImR6vRaVq+pGSdUfuQK4LLQFmCAbyERTJCB1PmvUUEGNot9zMtZFtNTjNuLRGHUSBQfNc9d2iXTgOETyaNPJPSIeROTUB9wudFLhkxjPW1cZ8xt9oM+0zXcdq47XZaFFbUkjRegcItGUC0CT86ifsaX1ANGcODAu7Y/7Sa4vuSgm57sOnXTVFxyrlBD4zTFHdaZ+rOSqdpAjuhxIe1TOx+MY6fio1ziR6G86bu2L2qHLXOVE27nNCdryaaXlyKfJkXXRxEja00lry5EtRz42R/p7KluOfD4ceao8yhoubVg7oxMwFYwDsDuM1s3g/biDhf4G0BwTsAo2ZwyuZAXYXHRRz8sZqyoR5aiDeR06Dh2ngp8DZkrlksfiCgZ1/TA/WPvnzlnXh6iiAzg56brol2OAmJKHG3Kv12s4Hn2ALHI3ido3lbXfXKotuUwZsxIHelCi1vO0fh3PsSgP50FCnK9Eo+fngMtJnnb0sTfRVaKw3heftOQmMy5nQNMnEvRNZcBnP8fJoY7T+nRyU0AnUqu4oICam3h4wWZVX9oO9qK6csD2KF6u7+rvGvX083LYc2ri4WM6ds0pvPNx49tOJBHR0L3jwinfbMvx0nLk0+XIlPhRxwOM4LqWI1uO1L48BkdKwlGDn7y0HPnlceSpNh5lDZcKzIXiinbjxGi8Hj1+7M4/J0TOd6d1I8k5mYKbFxW6OrwrVtvKkacqi+Ln0R/fVceV6UYPkeTI8CCHw7l1fXgSwWcFDd3piEW/+r/T6cRgMEiLgDlONA9SgWToP9FKdKQLfCOaL3h0O1AdUT+RJLXBXGSYMfoER3cfUvvR6xXIdeLkzkVfGIPrXPvlkx6fPLg/5SZQqmuIxSPO7p8e6VOfVALzqI63nQMXyOjge03/cFL1iZrarfeHY8cmRn7MJ2a5lAmd/OauO1av25X3LzfJ8DqoX21Y69aJgf7ucvc++mRB8Tg30c6RcM4ucvbelo+XliOfMkc+jITrxKrlyJYjPwtHyjF9OtRy5JfPkafKo6UUesfUWHUAGv3IAa52+BhZqHC1KNjw3fvl/VXH4L87To6EtL/u6PrdHZrfeeysMsotttN+q7N5FEn7TFQn4kAwVcUuQweZq6z10a/3eTAYpL5rWgQkMhgMGkTD+0UAe5UhY1UyyclVx8y5Hr1ygvH3jbic6I/qCV1QFFioU/ui7XMOYAiJUI8SmE88FNjdXtw+cwBPfzS6Sr0aPVIbczLW37U+bYdjbmfaRs4+nRRO+Zy2SfFJY6745CHnr+g9518uf71edavyU3t1XXlkzcfhY9Y+ut3r+a6rHFZq8Qhsrp85ObsuHdO07rb8vNJy5BPlyOJQBzc1OhFvObLlyM/CkZbK2nLk8+HIY7qIeKQbLu2YCjEH7K4oSs4hdCDHlKztHhMuAOZORf1atE49z4sKOkckbnS54xEHg8gZugI/RceROx8j0jSA/XUPZe3RDJWlXsvxqqpiNBrFcDiMotinPfT7/RgOh2nLW6J6RMWUpBTUHWRVdw6watjHJheuRwcCtzW+q9xz9uN6875Qj5OLj+MYoGrajOtBAdzTSzwy4xMU9wf3I/SZI5+iKGK9XjfkootdPS9bJyc+zmM6c1/xiLvKJ3ety1p/z33Xul0OuQmfyiw38clFJL2/bps5AtKxez+OTYbVX9Sn9BzFz2OTRIrbn2KMy/gUmbTl46XlyCfGkdRZNPXRcmTLkZ+VI4uPB01ajjzI9kviyFPlUdZwqWMeA3SKOkVOsEooagg5UOAaP4ei52vkS6/1x545ElEAPEWG7rwuI63bI3E5J1Dj9t+93x4NdUCudweD1nxfrZstYPUzdZDLHRGNHZdIl9AXOxK508XDRbGPcJG3HXF4vK/6c/kpEKtT50jfI5uu62OA5iABYKrsXZ6uV2Sm42C3Jc7zrXQ/lQAV5BU8dNw6DvctB21knyMs6s8t4lUSIqKncvX0nnspZX1M29bfFBiPTdS0qH1gpznZOsEq4TuGHNOHyt0ng96O98Fxg/N0YqATFq3vUz7nxqE6c1w8dp72S+0G/3W5tuXjpeXIgxyeGkcG19TRGJv6eMuRLUcin8fiyKJzeBKp8mg58nlz5KOs4coBvYOcHlcw1YE5eLoBqOD8Oiciv/HQnWq0Hj57/zj24MZFxprrn59z7JiC+DEidPLzenKy9X4mYEzXN+tSQNhut2kbW8YF6BCVw1khC6J2vvWtG/Ixg9aoEDahYKJOqETi8nF7UntQx8/ZpUY31CbcLtwOFZi0PcBGAZyJljrksbY+FQjVDj36pb8zdo7R7rGIupOnT0C8uIzruo6iLB6c4wCeIwc91+oiUf0AACAASURBVAlR//tYjpGRTwRzxKbH3Id0fE6MOf/O+WxufLkxeV9O4aPKQLHOJ70+Xs7L4ZjLPkd6H7tZaMvD0nLkE+ZIwx+vr+XIliNz9f1ajvRdCjmn5cjDubkxfQkceao8WkqhO76X3DE97uDBsWMC1HaP1efO4g57zGD9GidMJzzvS84pXUYUDIHFt06K2p9c/3OTICe6/XX3xhIPH/FST10f3icC+BGJ8+1sdZycp4Tj46RuLaciF7kIhpJADhS0P9SvgK7Hcs7v8lcZamTE7Un/syDX687t7uTO7cdzYKY2rQSq+szZs45ZgTAXlXTdAs5899xn6s9GoaN4AHDanuv5Y5OqB/VnfEs/u1xyUfwcbuVIzonZbdBTX9xfnei1KPG5jarefMzefz0392TCr9Nzj9kCJRcVb8unl5Yjnx5HpnPj4YS+5ciWI2n7sTlS283JreXIL5cjT910PUpKoX/2CZafp99zA8gRgtbLdV48CqMK1N+85BRLyS3m5JxT4J9z6FNFAcuv07adpHRMCkQK6A3Z1w+BRdvZ7XaxWq2S4Wg7vnsRDrDZbGKz2aRInC8G1igVYMSYtX8qQ40w8F3PcedUXalMc4RJ0e852eZsTWWuv+eigRHNFKKcfRLZc7LwcbgufDKTm+i4TN0//bMSk+qI9AjAUm+s/H+SaYW55SNDOTzQ/ru8VWcfA9Vc3epfOfLwunP1ap8UqE9hi9ppzuf0nJwfHJPZKULR63yCkeuf207OL73dtnx6aTny6XJkbow+mWs5suXIR+fI4uHNssu45ciH53wJHHmqPPqmGTkj887klKi/qSByhpRzoIjmI+FTgsgZh5/v52jUIne9f3bF54yPknN4LTkj47sbqvZfjaMokE1eJzhbXe8jUJ1OJ21Nq3nrjN8fSUNAnJ/TkS4kRR6aB65ACHiqzj1S6QB8jAxcPznbYDweqXKdql16tMZllGvbCc1lRNG0Cm6eOaYRHvp1LKriExS91mWpRHks6uxj8/6ksdyvGSzi4dhc/m7XLpNjwOi612u9f3rNqcmUjzVXThFh7inQMZ/W9r1/6Cont2NjPRWJ1ja9D/iVRs6xtVwf2/LLS8uRT4wjad9wquXIliM/N0f6+FqOfFjHc+PIR7nhcgLwzuYGcQy8c8ajBn5sgDmAzNXrER8/T+vUzyiK6JQa+zECOVZ/ThaqeO2jAmMO1PjTl7S5jPdtPhyzA4jKebvdxmg0is1m01goSiGXnT7udrtYLpep3l6v1yD5Yy/MPCYvZHBsESJ6UBnodTmAVTmqjHO7J+XINzdRog86Lo0iEtH06/jvgKH1cJ7urKRj22w2Dbvkt2NjoA6VncpC6/d+ADb8cX1uQXdVVVHU9/os9mNiW+EcUOdswrEjhzHuEy4jbYe+6vVOpl6OkaD3E5kQ4cxhgvury0Kv0ehorm0f/zH5aTv+u+tcz8vZ67GJQls+rbQc+TQ5cncfGGJ7+NxEsOXIliM/B0e6LluOfP4c+WgvPtZBaPmYMuiogqkb3Kk6P+V4rn9q+OoQx87TejSKpH3OAZDW50am0R+vJ9cfr1PPGwwGsVgsEsj7I1wXTa4uwBHj1GicRuUABqJ7mhaxWCxis9nEYrFobIGr7eljea7LASBOpQauESyu9bE64GhUgt/VYbX/GpXkGm3PJwX85gt9Oc/J0MeTi6QpsSth6BbFPumiDurPRTl1LGqvOm7aqKoqrZnQ96ZoH5xwqEfHU9fN6LzqSvvu9qx/HkU9BrQeFeeYyswJXCepObLS/9iDyohrdU2Cg/2piWpuwuJ25tjhx3KTptzYfTveY9js9voxDG/L6dJy5NPlyMPFD2XUcmTLkZ+LIx2zW458Phx5qjxqSmEOFFxBKFOjOQ6yFBV2xMMtcSk5ZalS1ID9d3/E6JEN7cPHlJxzCu279kNBxiM4DgbUmTM0fpvP5xER0ev1Gm0eHmVDWs1x6GPSoigScUAEZVmmLW0VaNm+FufhpY673S69VHK5XKbdmWhLc5w532VDf9QZADU9T4GYz+owDhqqd5UzOeTaDrbhW6jqtQp4qpMcoOZs5BiwO9D6Ggmu13acBF2eOdJWe2Os+s4VJxElYfdF/SvLMqUURtQN+eYAWuXh/cuBnutVZZUD8Fx7KiOXZ64OPU9t1CceORnr2HLgrBiV669fp/aZm3B68etVd35tbjxtebzScuTT4kjdTVUny9rHliNbjvwcHKml5cjnw5E5OVIe9YZLy6dGVHxA+vmYUfjxnIHiHKokBOiREi3+mxuYt6PjcFI4ZpA6Bo+AeMlFdbQPfMdhy3L/aJqUjsN51HPIrdYx4uxE287OzqLf76dc9cMLlJu7MkVErFardJ6CMXrY7XapDh1rLg1Ax6nEpZMQjZq443rEQsHYiUgjeJznUT3Vres+NyGhT/SRKBjn894RBXgfv4+L491uN73bw2XkoK79cVtysFe96PtDtA70jp5Vr6vVqkG8+/HJpCaDAznfhrS8f1pc5u4fOUJxks2VYxO73DnaX5/A+Hj9mhxQnwJoLzqJ8nG6/XCutu1E4rimpK/n5fTVll9XWo58Ghzp18INLUe2HEl5bI7Um7ZjONBy5JfJkad48lF2KXQH1t9cUMfAk2tzERcVHN9z53ONtuGg4XV4X90AFYSozw3VFwrnDDNXv57rsvOoostSDUblgbN7tCW1I2TAb1VVpWO8pHEwGMRut4vxeBz9fj8Bh5KWR/50TPr4WPN2kQtjPEbuXAsQc163200vSnSZQDYagVUZ58gBGeUc0eXvoKHEEdEEUScZJUC1h1yb2h72Vtd1Sl84BRK5fmtdWtT2uF7bVPvY7fbrD5RUOS+XbpLWcMVDG8xF4fnsYJYrx0iTa9X3c9fmxk//mYTlJiXqa7mS29ZY23JgjojGZMN9mnPV545NHlyOp8Z5agz+5EB1BU6s1+vstW3Jl5Yjny5HlnY+fWo5suVIlcujc2R1mHrrjX3LkV8+Rx67LuKR1nDxX0Gcz7k7cT92DFR04GrYlBxoa39yxJEjvMZE0QyJ+mnDgVD7o2BxjFz0XH7Xt6+jMPKS3TAoustRRDwAa62/8U4I6RvRtrIso9frRa/Xi8FgEFVVxZ/92Z/FX/zFX8Tf//3fx3Q6jcvLyxSxU2cF2CEplRntaKQK0lLnR16qR9rwcdZ1nSJgGv3I2Y7KWsH2mANyrdqt6hswP+awEB3Xd7vdWK/X6bj3i7ZyEwtNJXHwYLxKml4HcnL7y9VHPaTAIF+1HfVBPrseVBbyLR1zcnDgVR0xdpWP6kPtxev2icSxiZvqXBeX52zIbYxjucid68EnCnqcCQg24td4G4qveszr9/4d64NPrDQq7ph6DM/bcrq0HPl0OTKiiS06rpYjW478XBy5qx/e6LQc+Tw40nWo5VHZUzuiwtCOE105Brp6voIN9fOnA9QBq3Ep2Lsg1JG8nxqVorhgVZEAZY6kvPi4u91urFardJzFvA4yKgf67SSMYW42m+j3+2kXpP219+O7rwO5dLvdlNNeVfsXM/b7/Xjz5k28fPky/uEf/iH+/M//PJbLZcpHV2DlepWzj4++QzYQDy+A1HQMrtUoSi6SGXEAx16v17A5jpNTr5EPnaTocW/bSUSJ0OtTu6B9/ezOrLIiurler2O9Xqcdr1i/4GktbudKUppKARCrXSJj1QHy5zi7ajFGnSA4gSro07f1ev3AXvEF1yPXqL/55IzrdcxaV44otK+5yLB+VwD1/up3+qO+7tilUUytX9NPdAzapj6RPiZrxUfFQi257zpO2qX/al8uzxy5tk+3fl1pOfJpcWS/32u0q3W0HNly5OfkSLW7liOfD0eeuuF6tG3hNQJGpxGMd4ri5KF1OZkoaOI4/MbjeD9XBelgGnF8Z5hj5+jvCjYRh8fh/FeD04IhQRr9fj+qqkoLepVY3Cj5nJOl3/kPh8NU5/7cJsDpS/uUjBjD9fV1/O3f/m381V/9VfzN3/xN/Ou//mv88z//c/z7v/97ROxJ6M2bN2mbWwVKwEkNFZDq9/ufBML0zQ342ATAHY/f1YaoT/PQXe+6u462jZyQjxeNYvlEBcLgHLVf9RUHxpye1+t1AgGP3qkcsDP1GdYt5Ap9JmXM++lyVvthXJy73W4boRyVId+PAbaStEYO3Se03x5R10g1RY/l8MhtjJKbyKhc0Yfbag53dBzUrbZHW4pdTpraV2/Dx+eR2tyYc33yuvEZTQdqy88rLUc+TY6c0ffiwA8tR7YcmSuPzpGmL86lnpYjnx9HPsoNlxuFK0YNMuJhuoQ6DwPkOn0smHOu3N2qboueMy4VEm25M/tnJwUfC/X7nb2SYKfTicFg0HDW5XIZg8EgptNpI4IV8fCxr45DwVDbKMsy5ZOfnZ3FbreLu7u7oPtFNMeii4chGXZQ6na78U//9E/xL//yL/GXf/mX8dd//dfR7XajLMv4t3/7t/jHf/zH+O6771JKA4CrIK657+pUjHW326XrdTtVlb8+Nck5LoZe13UjvUKJgTFqhDNiv2OVb+OaczCNeijIKRGw0BdS5k/BFjByPTJWftMIH+25XWrOOeNhHAosahs6TupXvTCZcRmoXtSO1+t1g1h0HBQHe9WhY8EpX6Nu/eyRdtrOYYViCef675oLniMu75u3rbJEtrl2c2BP0UXRWr8CvfbPcUaxiPNU7toHx16VWY743Cba8mml5cinyZGTySRuZMyR+dxyZMuRj82RuyM80nLk4bwvlSNPlUd7whXRjLbwWR3DO+kOkvuu/7W4MWoffIGunq+gqec4efCfHG6O5UjJDUaV2+120/axu93hxYdlWcZoNEqRx6urq5jNZhGxBzh2NdL+U7f+Ua8CG20MBoMYDof7tIQVqn54p05OclEUsVgsImIPwKvVKs7Pz+P777+Pv/u7v4tXr17F119/HUVRxNu3b+Pu7i76/X5DLkokZXlIi6DOsmymMXC+ThycKABqyEKjUAr02Jo6DEUJQ/VMuoHuKqTy5TwIQvtN2Ww2jfeo8AcJQihqh+ymRIEcjkWe6LtHdZAd8qA9HadORLT/PvGjPq9TfYVUDo6rv0JuZVlGcb82ooiiUSdFZazf1YeULOmrRit9gqIEz/V6nUbatE6XbY6I1L6O5bKfwiufzOUA24lVi05aVKdaJ3X5ccU+HWuuX9onHZe225afX1qOfJoc2ZWx6Q0OpeXIliM/B0d2Op1Y3tfHdS1HPg+OPFUe5YZLlaOg647njkpRReWErnefOlitT43OgYBzATF1No38qKFp3XxnrCrUnICLokgLa+n7bDZrREY4ryz3+dX8Xy6Xqa8aSfAxY0zD4TAWi0WDtKuqil6vF+v1OuWOTybjiIjodLoxGo1ivV4nwhqPx7Hb7WI+n8dgMIjVahXT6TSB62QyiW63Gzc3N3F9fZ0IQZ0d59VImeoBoGQculhVo1+MUSMQlN1ul14kqQCNcwF0Sp5Ksjmdqd0gd0Az56BOnLShkTn6wTVKEhCigh39YwxqNzoGjUxruoXbv/fNQVtJk2MRkaKPGnGkeO677qqlRAXZ97r3ayOKaNiy6ssn8roQmXrVX/lTQPcImU8wlQA1Gq7+59HY3CRTJyx6ntpEjqRyxYneU15chx7F1z468am95qKPLqNj9uJ9VTm15eeXliOb5alw5Op+TWJVVXF5eRnb7TZWq1VERMuRUqfaYMuRv54j1/XBD0glbTnyUL5kjjxVHvU9XApyKnw6Q6cduPldBwsg+AJD6sh993pVwBibLu50otLcche2kkxEMx9dx881m80mtUV9eq6C8HQ6TZ8Hg0HqF7nry+WyMTaIh8jYeDxOL1AkKkRUjXrLct92p1Olp17Us91uGy9xBNgXi0Xqy3A4TIt/WeSrulWZUwf94/OxSCty1PdvIEfXj5M79fnuSG742BVRQLc72mGjEb/Woyf+G/Xrn0dN1C/UXpWUNXUF0lDQ1jF6PQ4e2jbfNYqp1yk4RkSDtJSsvaBfFjNjsx6ZpM3chJLPbMigMtEJiMoTO9FxOwmrjymxY3MuU353/audK8b57y5vPaa45gCu/cR2Ne3H9aw7tqk/+OTCbTCHhz5WlZXqUNNx2vLLS8uRT4sj1/ebYRRxeL8W/tVyZNPuWo58RI4Uv6Zd2mw58vly5KPdcCEkBc5chEANSwfiv+vdYs6BGLQLxJWpxzTC4ICiUQIenXu9fPfrI5rvr9A+adQKQ0GxCq4omNQFSAUH1WgVb6Zn56S6rlME7sOHD3FxcZEW3kIyZQFARyLos7Oz2G63MZ/PY71ex3g8Tg7DfxyPyCOgojnnbrBKJDinRlzU6KlD/3KpDf4dW1O5067qFaCk3w48lLquo9/vJ3nrubStoKvA5TaIPUFc6gMKQlqnXoutYms6Tp1Y6DXIDLk4UDOB0Cijyoo20auCh4KvykDXgdDXNHHYMc7mO1jUBlRvqmPts+fmq6ydAGmb3709xSds0oFc61LS4HfadH8+hkU6IVCMyUXXlCyO4ZgTEwX/UvKtquqBHjXVQ+1RbVknTt43x722fHppOfLpcWRvNLrvfKSbqeVyGYvFouXIliM/K0dqaTny+XDkqfKoT7gimsCtCnIjQZmqoIhokJAqkwFFPMzj5FpVqLfn0Rc9x5Wjv2nxMXnfvB9coyUXiQF8VAZE4cqyTItw1TiJdGiksSj2Oy+xyJhFm1VVxXg4jlhEVNU+xYJ+EunTNgAJ3a6Wdl32jB2S9v/qHErSHv1w4lDZKeh6FJjv9Fkfo6vdqaMreGne+3K5TP+V5Fx/3lf6oW2qromO8lllf8yOiDIxdrVhJQ69jrY9kqqA4WByDADpq0+aXJ70R/VSFEX0OveR4+Iw+fJxqt41sqmTNtcj/aMutweP2Dlm+DoDZK161olkDiP0N43iA9yKebmJq+tO5cZ/1mC43eq5Sp4qT5/4emrNMd2r3eikQfXmBNWWn19ajnxaHEl9k8kkFotFdDqdliNbjvy8HCnjwRZajowH/dXypXCk+4OWR08p1MFqcYDmvy8S9DtyPdfbcBDSNlCw5njnDNP7miMZjyQ6yFEUPLQNrZf/aui0g/Oyq02v12tE3Xa7XTbtIWIPpvP5PHq9Xmw2m1itVjEcDhNRHPpxcAKu6/f7DVAghYIoLH2MePjo2CM1CnpMGHSM2mcHd40SkVOsQKoTEKIVXAtxqvw94qRgro7nNqS25mCkvzM2/h8jLpcR7eukwiOWyFmje2rzSmB81mih+4mPKedj/hge/0HWmqNOnzWSx4LbHABRn/ZR/ZJ6FPzUl3XMPglTvahfefvarupUI3A5AFUbcptR/Soxqm5ywK11q535DU0uQqm4k8M8Pa6RQOzfz/HJN3Vr5NOjeW35ZaXlyKfFkd0jOmo5suXIz8uRD1OFW458/hz52Z5wqQL1NwfrXDQmd+0pR9f23Pi4FmA8phxtx+tWA80ZpjqnO6p/ZnwoeblcNlIKAPXtdr94d7VaRafTSYuGO51OuonSnGxy+amDhb3azr4PzW1WAWTNT0cngAjgf0w3GrFT4Hf9KJAjK6Ja7hR6vQKiRyA0IujX6h9gAABSnxKz6hIw0DQPJ0fG5RFJ768SsdqS1qXgo9E5XU+h0bucbLSunN25fR+b9Km8FEQ9UtTtdqOq9jt1uW8d+tR8QScTlRyQ0VcnX/Sci1Q5AahctChZqYyQreOWkxI+4mNUkuWzyjwXUeN37adGWBUnVO4+keMzdulpKVpfRKT1H+5v2odj/u11teWXl5Yjnw5Hwov1rk5PbTylq+XIliO1PAZH1lXzCZfKqeXIQ33azy+FI0+VR90Wns/6506tAvDO6ragqjD9nnOAnCHkzlWjdSPGwDRSp/2kzhzxqNLps/fRDX2326V8aJeVRlE2m02K5pGzThv85xqIZrFYRFmWDXIpyyJit+9HFVVjvOySw/jUoNWxGaOSx6H+5hvRNTKiRMMxZKdpH27EHOd8rsF5lUjoh1+n8ta/iGj0XaORSrAaIXTbUdtQEuM3QADZeR0+WVKSVVtR2yDSpHJUe/P+qk6U1Kjr2OQnF/lUWeluSeikEe2pH/ookWWPOmrkmD5rJDkikp/ouepbqg/FDAdn/ELtxK/3ftMf9VPkrHXrRCRXjxYlNJ0IOX4qSSADxqz9U6JVfTsWkYblky4dv9qETpbaG65fXlqOfJocmZ4eFs2JmftSy5EtR6qO9ZxfwpFsYuY+0nLkoXypHHmqPMoNV27Bm3ZOO0Zx54qIBnjxW+5OnnZcaU4Qeo0qwZWGAnJ1qIJ8PHqd98vr4DGoRmKIrnmhDq1bUzGIZCyXyyiKIu3UNBgMUlQFw8MYy93hkW6nOuTMOvjnwE/TAgCSVK8QSlEcopIOtK5TJwhkqVG2zWbzINqGvXG9k51er4DJ54hIu1cx7qqqEqHmSBTd81uOCLRvOha1UVI7fReo3Gd1dCUQPaZgriCi/dbvTibeHv3X8xUcNXrkkUT3jcDn41BUfj5RU2Dmd41O0wedJOnkBp1rf31S6ADc6G88xCT1O9rVSJ/atvcz9/1YoT/gEhFtJ0V8S6OFToLqTzmcQ4+Kg6o7JSGX1cfG0ZbjpeXIp8mRafzR3P5a15W0HNlypP5/FI4U93OfbznyYfmSOPJUeZQbLhSqitDOaKcUQJwM/LsLQet3pTkx5c6hr240x+52VRFejzstAAuYuRMqSXGc89UAvA3OZSGvkorKHMdjo4zVatUgkJAXHmuUUZ9KYKiuK67RPwBXZcQf9eccG8dQG1FnUDLgT989wuP53W6X3uPCNrVOIB7503EoaeT668TBdwdnrVvBQ6NTWocSjF6vgMJvDlRaB3pSW9EIppKPA4n7lH52EHHQdZ9yAlDC2Z/YJAtfm6B1OPirXnxXIuyMCK72wycn1HkqlUGLf8/hlYK5ygsZnppU59pQ4vLx+KRL+6w45jr1sZFSpW06IWpxPPaJfVs+vbQc+TQ58pjvtxzZcuRn5Uh7UNRy5PPhSL0h9/LoKYURkRVe7vdcZIfrcgCbIyAtSgh616vGqVED2s1FrmgTwNf69RyNKtGm90PBi/Mhn/l8/sBR1Ei1n9TBm+7ZNYmInj5OVcPUPqoT6uJMQFg/k4+rgKMRLwfWHAlrn6iXPilxa+66EoD2v673efZ3d3fpN2TAtdQFCZFugkwUcJCF6hsZaLTE9a0TpmNEoI7skRJk6ekcmneuclUi0ImE2reSIzatNkT9ERGr1arx7h4AHoDUa9z2fNJ4jHTYpTAnOyUk0id80lHXh7ULGjnziCj2leurytNBXe3aZa54o3XqRFQjoLkJk/5XG/DfdYKgvx0jfdW9nq+RXG1LI3U6llxxP+eY/m/LLystRx7af0oc6QlA6ustR7Yc+dk4Uj7rzVbLkc+bIx/thisnfP3OedrRXHRCwVsdVx3EDcQNWqMJGp0pyzIBpF7vEQ9/VE4/tD9lWaZImYIB56sjaJTO84sHg0FyZkAvR5hECBU8/JE/eeyMwXXCGPWPPtJvNW6/UVND0xQJBTuPLPnEQeWg5+vb0709xtDtdmM0GkW/309P8IqiaMhU69cXayrYluU+79nH6frVNiBgB2kd2ynZ6dhoB/tRgOA7fy4TndggE9Ym8FLP3HqA1WoVi8UiZrNZLJfLw2Lx+vB+GsbpkUcFTh0TgK32CuAOeoOGfJC9Rug0DceJkTb1vSXs8IS80XtukkDBt9SmHFB9supyRgZ6Hv7lMvFJaA5fkJv2lcgax9S2tQ4l5xyp6zjUBrED7bO2lZOJRiCdKNvy80rLkU+TI7dMmqJ+gAEtR7YcSd2PzZHR6T44r+XI58GRp8qj3HDlQP+YoPnMfwah7xOgDr5rpMiV4KDjTuEEQYRCDVONwxcl81vEHuwxBP3e6XQS8GCsRJUcVCA36phMJo01VxpBUYfVKNNgMGjcXdN3cqxxXvrEeRQFeT4rYfrNmeoFGfIOBE+Z0AiDgqoCrho12/OqHVGv6j9iD5zj8Tgmk0nc3t42rjv1biCAS7f2nc/nDXmo0zqIIWMIRZ0QXfpkSv84pjY7mUxis9nEfD6Pfr8fk8kk6rqOu7u7RCSDwSBub29jPp83FsQiD17syX/fQQvA5Q33fGZnL+pZrVbJxtRO0VdVVQ+iQ/wVRXOhL7bc7XQj6v35vIQbm8TeIiIt+MVm1G+dHDy9iHUW6qO6BkT9jv7qAnwnONWPykIJArnym8pNf9fF6qp7tTOdAOn12JhHOzXS6XblhHCMMP0z5zleqzxcjm35+aXlyCfKkaYn6m05suXIz8mRa5nn8863liOfB0f6MS2PcsOFwJyM9e6PDuu5Hs3AqakTR9aB59pmoFovbVIfBk+EQO/qtT1VoEZVUGTEIcdZ0w1YZEpfMKherxfz+TyBvRNqxB4U+K+LIAGv9Xrd2PFIH2+v1+vodrsJWHVsTWCvHxxDVgr+2j/VC3JDDmqACizoC1lHHCYAOBfgz/+c7QAyfNZtdzk2nU5Tn1SfSura7nq9Tn/dbjd6vV7KbY+IRmoFZInOAGaNjuqEQ8ndiVr7s9ls4urqKjabTbx79y5NAObzeex2u5hOp2k8l5eXcXd3F2dnZ1EURbx79y4Wi0UMBoMYjUYpkqlEyRhUb0RKt9ttLJfLB+8igSg4D1JQH6VutXGihuyGSdrOfm1EpBuu0WjUIEOdtNGvuq6j3+835Lvb7dKWz0qQ+JtGzBkv7yjR6Je+t0Sj9/RFfcF9QHFEcUx9VH2ZNCQlCJWx+j4+55E5/U2/6/bBuf9KUscmjE5E+l/xWtvWyF0Og9vy8dJy5NPkyMPgmhOwliNbjvycHLlcrtI5BAdajnz+HPmrb7gwkGMd+ZROAMzqlNTtA3Wj8rvV3ONejquzeJSA8333E4B6vV4nCqw1ugAAIABJREFU4FHnBdxRNMbKeDqdTnosro+FdVtbAIxH3kVRpCgHJMT2q4xVo444j47ZjbkqqsO28FXVMCwlcSIfEDnRCQUfvUbr06ibGjBOrzsq8Z3rVO/aPnLRSCHyA9wUONXR6SM2QooAEVMlU+QFWZK2gE7QsxKkP4p3nwAk1fYiImazWSKP3W4X8/k8lstliqgR1fvhhx8a8h2NRik/vyiKuLy8bERyqQMbUdmrH63X6wd+wEsZKZvNpmFz6lvYAePic1EUMRwOG/Wo76NntVNNEVLwBlOYYDEG1QF90+i2grNO4HQyqCCuEUoFYL5DREpoapuOSchU/UPloIvXdRKGfTg5uY40ku4+TFFc41w9RwnRP3u0XW2bSUFbfl5pOfJpc+RekAeuVB21HNly5OfgyNXu4Os6wW858svnyFPlV99waQf10aEep8N0RoXFIFVBqmi9G9W6MSo1bhdQxCEyFnHYQYTfnGyoT42GR5ekNLCtbFmWaQcgojp6d03kp6qq5LzqJET2ICqiOICJPoKGrBQk+fMoAo/EFWw7nU4U0cyRBuw0okb71KVtIhttR8ncQUJ/08/b7TYtaHadIleNYrhOFMhU1/oYHNn4o/OyLGM4HMbZ2Vmyt/l83rhGbQRQp45+v5+iZT6JUJnQnk5qkO9ut4vr6+sk38VikcbIJGSz2cRyuWyAPtdfXl5GxD5VZ7FYxGg0iqraLyzXhb7UhcxVjhp5pK+AuoIS41HSzdkPuqqqKm5vb2M0GkWn6EasIoooEiFTiE5qNJT+0BbgzW5butMSciWCpxFtB1WO0UfIBSIj6q4TSvqhdqhypC/ad17OulwuG+sYeKeH2qtjDO2q3Six6nh8so3dOXHkongqUyVBxV2K+ibj1CheWz69tBz5tDkyYo9TfjPccmTLkZ+NIydnqQ7lCkrLkc+TIx/lCVcORFSpfveoys3VpXWgPH5HeAqEKF7vvFUwCjjUBfhrNEAjdtQHSLHQMuIAnLrol0fUOKoCBI4DIUE01KsAXlVVyhfOkS/yIzcewjk7O4u6ruP3v/99nJ+fp/qT8fJmcwF17SPAQVSRdomcKViq4WuUSwFKAY22AEfXOTLyiUOn04nZbJbqp6/oAEf1tQYKOl6wFcY5Ho8TgK/X6xThm81mMZlM4u7uLm5vbxNBK3kyISC6io1Qh+pH7Z6/xWKR5MQ40D2yJ62jrvfpBPP5PMqyjK+++ir6/X4sl8uUv45fAPx8Rm7oVbdORocqLyYyEYcoHn6lgKKgFxEN36o7zYWnjF8XaEMIEZGi1T7hoV58hPZpq9frpbGiW/qm/RkOh402sCkHbCcRnSQhA+pnYXpENMhSJ8Hz+bwRkVMC0kmc2gW60kmf4qFGVLVvyKEoDjufuU+oPjw6R1Gs9uO03ZZPLy1HPl2OHI1G9wJobkzA+FuObDkSHT42R6JbbaflyC+fI3O4TXmUJ1y5O0xVkgK+Ak2OXPRuVuv14woqGo1A2E5mg8GgISTNf+73+7HbHV6GCJihEB0XAIFCeczsTkW/qNOjjIPBILbbbYxGo2TkOD+gwfh8vLoVLTm9t7e30el04vz8PPUXY9rtdof37NWHqJJHNwBYxgqIIRslc4+0Kcg46QB0qksilkQqB4NBLBaLlDO+WCwSQOMcOmngM5FAdKKRJp1g5KI8GolivB6VY7EtY4DUSE2Yz+eJJDUyBAlEHICClBmN1Cnwo4PhcBj9fj8Gg0HDfrGvwWAQ33//fbx+/Tp+97vfxbt375IN9nq9BC6LxaKRloFt6mQPPRIx0wmaApgCm0aDkAVjfeB70VxzgjxUn9g37RCVo2+QKmPYbDYpqkcdObDXSS56pt8Uxaq6rlPKCmPjenRaFEU6pyiKhCuKGyobJqYaVVUcVKJgbEosnKsyVL9FnvxXn/Px60RN/VQndBzX+rkWvbTl55WWI58uR7qe9May5ciWI9WuH5Mji7IZuGo58vlwpC+p0PJoa7gU+DXy5OfpnTsg5v+56wekcECMMOLhxI72SUEAHHAyVxqOQf0KBNvtNjmvEh4LU1m0iaEAAuqoSkIAB8AE+GAkPDZXUNe26S/9ADjpkz7WJz1Eo4hVVcVmu2nUhfwVmJGR61GjHOoEWocCBPJW+SDj5XLZ2JaVx/0R+7xxyLGu67T4VevQlJx+vx/T6bQxJiUy/qNbtVN1GEgU2wL0h8NhLBaLRjRKdYjcF4tFYxvZ7XYbHz58aERVcGJIFLlBGufn542dlLB3zkWfEYfJyrfffhvD4TC++uqr+O677xKBoUv+MwZNv1DdLpfLKMsy5YIvl8uGXJCH+zvgy7bNTJo6nU7Um0NkSBfjayQe+2bRr4Itdok942/YFqCJjDQyjHw09x3bxFa73W7MZrM0qcNXiabr5Fb7Sts6cdRIOH1WfImIhCe6pbFOtrU+jbzRZ/U7+srYdDKnn5msYTcewfOJrvqEkgznt+u3fllpOfLpciSTYL1GJ5ctR7Yc+Tk4suoeghmDwSBWq1XLkc+EIxWTvDzKLoU4OB3Vu2mPIimhoAgM1Z2d4whTHzVyjgIdSqcelKIRQwVLBXyuw2lVIfyuxovSeVdFRPNxMuSiURnaJyViOBw2QJzj3W43BoNBAqfdbpfkwfm0jRPRP0iKtqgv7oNndX1YrKyO6capDohReSREH8mjc9UZKR3oij5qighbhgPKu90uRdF0Ia5Gjei/ArTqU3Wsutb8XE3PoL8acSFCRDQNub58+TKm02kiy6LY78KnEZKvv/46ttttfPfdd7Fer+Pi4iI56Gw2SzalpIWM0CVAjY6QMRMPJg5v3ryJyWQSr1+/jh9//DEBDtFG1a3ayXq9TkROexqB1DUVCmIU6mKTDLXPFCHaHM5V0FMMQPZeP36tkUeIGL9XUvNJjeJHp3N4ezzjI58cHSsZ4MsamOAz42ZiofnzekOiu2Bh505u+IDbLzrJTSjV/zQKr7KkjrQ+xSbqnKckxhhV52AShHOKSNpyurQc+TQ5Um2aGwr3l5YjW458bI7cyaYZ4GvLkc+fIx/lhktzM7XQGVc4nSLvVI/ro1l9tKzRDAULvasmAuCAqHfcREP0cSjkAGjpnfVms0kRiOVyGRcXFzGbzVJ0jDEADOy2pBEIojSAYL/fj263G/P5PC4uLmKz2cTd3V1EHF4mOBwO03hwbj53Op20+JPow2QySU49HA5ju92m3FmPTKuRquFqtAAC05x6lS19RQfoSEEFZ2bcrA9A/rSpqRg6PurnPPoJuJIfzjg5zyMPamvYgkakADN0g/PP5/OU/0wd8/k81YFc0lOduo7ZbBa9Xi/+5E/+JN68eRMXFxeNx+aTySTJhP7NZrPkR4wPn0KugCdEQtSt1+vFt99+Gy9fvmwQ+G63S1EzoqZE5QaDQWOShN6xa/50gTEy1UmDR622222SVxnNHbU08oMvq0/7pNOJhHpZJ0A92AXy8ogjOnLCqOs6JpNJ8jdSNJTIsUP6iL+hO/pCvfrEAUzSqLVOgjSlSifeREOVhClOuOoTXK9PKdxvnHCQc674BFPJqy0/v7Qc+TQ58lj6T8uRLUd+To7cCIzCHS1HPg+OPFVO3nAVRfE/RcT/Xdf1TydrkU7hqHTA7w4ZrDq8P8oEUFSg/I4jq5B1kAwa8NDIoC4IhAyUhAB9gBOj1RzpoihSNIfInToaDkb0Q9+xoBEewIr2SRPAMOfzebpWZUAkAgNnfNpPxoizrlarWMchcgBQeE63tsVY1aA80qNExLk4vJIP9esjcXRMdAO5EFG6ublpRGMYG7JF9rPZ7EE0jrr1EbJOLtQusb+ISCQc0XyBIO8zidhvK7vdbqPf7yew0XSD5XIZP/30U8qb73a7iQCQAe+cwb70MTUkq2sjdEJB1I71C+Tx//jjj2liQd/RhU+4AN71ep3sDtlrBFH1haywce23Tk6m02mMRqPoVt2Iw3szG5ND2tDfiERXVdVIO4E0NZqFzWLb6NZ3OtNJzWAwiNlslvQGYQwGgzTRY2yOQ/Sl3+8nDKqqKkajUVqgrRhG/RGRrlFsUtlpRF5lpLiC7HW9CP1QYkCPKgPVl0Ye1S+oiz5yzDH5GPH8Zy4tR37ZHKnYC4/oU56WI1uO/BwcObjfpbCOww1Xy5HPgyNP8eTJTePruv4/P4VIdECqdDqvkR2NQCm4qhCUSPjuBMJdMREcjUZoGobfhVIXhIfjRkTagUcBXh/bkt5wdXWVFhETffLICxES+tPpdPbbZd+TnEZ0IiLtmtTtdmM4HKYooMoHo9XIE7nHKJlcYcgG+ZTlIVJFRJP+Ih+IQo0UB4V8VNfUozqmDl20CahpNO7Fixdp8TLAu9vtX2qoL6jUCQc2AZl77q2mFfBZ/xgzclcCres6gT4RXMZIjjQLddEDC0PH43GKzkwmkxgOh/Hdd99FVVVJP/Rdt7LVaI+2ix0wRiY+EYcJg5KnTjQARkh2t9vFYrGI2WyWJjEaOaJ+XcCM32jb2BfjwGY6nU6a9JDK0SCLupneop91oqGP9tGR+yyAyg5Sepydq5CpTj49VYJ0HAi6KIo4Ozt7UK9O7sAJbBP9MaHTJxj6tAC/YRz0TfGC6GNRFGkROPLGJuiPRvCQlZ6jgA/Wqv7oO78r8agP5yb8H4ve/WcsLUd++RwZcXh9Be22HNly5OfkSH4rovmOvZYjnzdHnn5L1ycWBkqDniKhd4RqVKooflPC4DuC0DtyvTbi4AD6+FLJRRWld7kKhBg3j4KJfHQ6+xxlUhi2222cn5/HcDhMxkS/NdrhTjqZTBKo8zuOg6OMRqPG42PSBvQuXImJggPU9eFt8WdnZ0mO9AHQ4rPKNRfNVlkvl8vG+0GoA4fhjwgLbXHcnRr5rNfrmM1mKVJG+xqlYWzYF33SFBf6BCBEROqPpmSojTIWJ0UiRsgI3Y9Go5hMJiky3O/345tvvonBYBB3d3cpyqMvjlR75pE776fpdrsxmUzSd3xBbQeS0F2ZPMK62+2SvQKCADT+BklpGgtFXyKK/NQGPJLHeBQokROTDC9E0TQSqxNKbAW78jQa/E39DPuoqipN9pTo1C+VoJgEcD6TAGwN3erErJbofsQ+kss1kC5t9nq9mEwmifywX50E00/qRg+NiWBxiLBpJBZ5KoboZE1JWtOJmIhovYoDyF4xQuvV9tvy6aXlyCfOkWXR2Hyg5ciWI5EN5TE5MqWyFge7Qy8tRz5fjnyUNVx0BGPUwergAHFVOOew2I8OI2AlpNxAdLA4GG1ikBwjMsLveiet0UKNhpRlmR55cg79ZEyQD2NSYuI9H4Dk2dlZeu8AhSgJ//nMgkPNsdfUjbIs48WLF7FcLlMESdMVGHOv14vd6t44MlEsCnKGWI4Z0mKxaMjco3fIgHo4T50SwtPdd6qqivPz8wZxuv4BeUhIHzsDtJPJJG5ubpJTK1GqHRDZIscd29BUhRwZdbvdFC1ii14W/dZ13Vg70O12YzqdRlVV6fE8ExSAX2Wu4DAej+P6+jrlQfPIH31wrUZNVR9EJIuiiPF4nEgpornQnYmWRpRZI6HpHhrRcpDD3ln4mkinaL4PBB0iQ504IG/sRSNWmq+P/NQXVY4qFyVB9EwqEn6EHaid6mQEXQDmdb1/N41Gf5ExUdzxeJxwjQmP6p2ceM5B/4xbcQy7YPyKG8hOsRHf43qtz3FUsdgjdykKW7SphI9RWo58ehyZbLs+bMiADlqObDnyc3HkaltjUOlFwPhAy5HPlyMf5YZLH/uq0/pvGg3Q/ygDI+C45hirAendKecjFK4lArHd7jePIL3B3yCt0QHAh75BIPRRDYfFojgIClWyIsrU6XRiPB5HcR/NWK/XiThIzcDIaJd+sDgQEKZOPvM+Dq5ZrVYpcrTdbtNiZjVCjEnJWh1TiQzA5HwFFOTkxqbEgjxIM9AII9drvv1sNouzs7O0QFMjDuSlO+nx+ezsrBGR4jeNyGBPABrH2AVps9kkUtrtdmkbWiYCb9++TfplbLe3t420g+12m3Ks0SH643eiuEQ26Rf9jDjs4sPYIcz5fJ52EFK/YVxFUSRQWywWMZlMYjKZNGwEf8K2AFvkRjRRAYvzVd/4u9p6WZbRjU7E8rAWY7fbJVu+vb2Nfr8fZ2dnD1JH1CcgKSVlXYSveMI1HOM/YI4dsIYA/SpOQBoRkXyGl0GCG9SnOIe+lDSRFekrGl3XCRf6IFKt5KlPF5jcEpVFVoC+RrPBKScZ9KVRTyU4JRLqraqqscFBW35ZaTnyaXLknCcccZiAq523HNly5OfgyKJ7/+TwXoakgLYc+bw58tGecCEcJww67B3FuYhCaYqAGisKom5VnBuOkpQCmkbDcGgcn/ZZGKlkgmOiPJSpkUjtj4JlRDN1IqK5CFCVqGSm4AwJ+kJAbVONW/ukBsiixftOpSge4+QaNXqiN0RuElBI1FL1Bwji6JpLqzIlSqTgQcFWiH4QOWOh6+3tbapbt5lFNlVVxeXlZVxfXzciKxGRHn1TsEmcXkmVNgHkXq8X5+fnaRGuRgEhmrre7+jDo2naXK/XKfJD9AxbVEJV+8EWZ7NZ+m21WsX5+Xl0Oocc8Kurq2Sj2B3AxCRK0y2I7mIfEC92DVmjb01/0tQBXTehkyh8tNPpRLk7+OJkMom7u7tEuKT6IIvtdtvYWSoiUqSMdAQInyACEzL1N2xYvyvYElG9uLiIxWKRztf0DWxWSZa69P1FRB81cqkROsau/sw4kDnn+0RRI286JiYUeq36jkbi1Z+cPLB/x0/kpbilNwGc05ZfXlqOfHocGbFfw6U3HspfLUe2HPnoHFndv2srIsbjcYxGo5Yj43lwpAdXtDzae7i8MXVmjUrhHDgI16jQ9XyUzHF9FOokojcZRJ663W7jKQ+O5pEfVag+mtT2OAax0D/q4YV6+lgVB3TyAWA1GsJ/ImkaoVDHoF4nDQBpsVjEcDhMkb1+vx/dXTdiddARslTjpD+keETsFyqT48o5kJ8apEYLiqJoRNlYsE2+NQuvIZder5eIjOtURxH7FA0cHJ3zHcD/8OFDnJ2dxdnZWcp3p170PxgMEijRP/qPbgEMZF6W+0Xgd3d3KZqjAL7dblPUD/3T/4iIq6uruL29TakL6JBxMBb0ji8ApLShdd3e3iZ5cn4unWc8HifCq6p9DnddH7bmRW8adeZ8osIUbHI6nSYbL4p9mgAAn+z1HlABQUibPhBJI0qM7WmagD6lVYxwX9KIJ76lUX6IA7uNOLxksdPppHaISnMMQtVdlMAPCJU0L8ZK/7BdxQiKgjv4QLoG42CMKddf8EjX2eQK7TqRYWfqV8hOo/v6xznU0ZZfVlqOfJoc2bl/agPu6SS65ciWIz8fRxaNulqO/M/BkY9yw4Ww9O6PTvidKwLUhYM6mBxQOSkpgHM+jq2PE7V9QIDHswpYAAuLPomAcOdMlABF6di0rzgM56s8MH5Iif7yvhHAjN+UnBT4lIwYL2PUlAvVzWAwiN62HzGLKMoyOkWnYTBKlBg6EbPRaBTT6TQuLi7i3bt3qR0lXZ0kIBPyoDmPsUBcy+UyveNhs9kkwGEsmiJChAoCLooirq6u4s2bNzGbzVKu8GaziZubm7i4uIjJZBI//vhjbDabODs7S9E+FnRWVZWioxCP585Dgujz5cuXsVqt4ubmJpbLZQLiTqeTCBiwRm/k0fd6vSRT7Fl3ktrtdmnBL33QSQypBpPJJIEb0RzsGRKDNIn2EY1EH4BnVR22O14sFo3oJuPBT3SCh81AWiyWh2DI+deCL0VEepGn+lW3u18kSy7/1dVVDIfDuLu7S4TEug7doQtdat+xvdyEFr+BMNE5fsSYiGiqH6EH5MGYHGu4Dn1QP9ihmIXvUxdpH9omNqh4gy9Rt064HZeUKHzi6n3T8/QcjnmEsC2fVlqOfLocSX084eC6liNbjvxcHDlbHTbN6PV6Kf2x5cgvnyNPlUdLKQSE9G6VY+SGq5BHo1ESKEahjx4ZMILGaXRAGrmCqDQao8SmkT3u0PV3HIE7fxYyMiba5zpAkXa73W4CmIhoADqfqUMXNwLMmleNwRMt0XcsAADIRxe8avQBkozYO9ewHkT8dP8+h24vGSwLjqkPWRRFEefn53F+fp5yxt+9e5fAEBJUYyRCRxQOGaFH5E+EkBxo6iQKpgQLEZEaMp1O08Rgs9lv7fr69esk+/Pz87i7u0vAEXHYqpZH7Bq1wbY0YovOAbvNZhPffvtt0g3ghU56vcMLB9frdZyfn6dF00Q+iezpBMEjUES1PfIDOUyn03j79m3U9SF6O5vNGqkj6PHVq1cpnRRyByjxR0jQI7PYkQIKOlcgZ2chSPfVq1epvUF/QG+S7iByZDCbzZLOqQfi7ff7aREzbc5ms5RKQwRYdYA/46OKD9hBp9NJ7eJLSjbqg6p/CL/T6TS2o9ZJGTaBr+tnjcTp5Bv7AEP06QJY6BNSnbDtdrvG7lnqx0oGSlIUbDBHMoqzXI9ftOWXlZYjnx5HriRtkHVdPLVoObLlyM/FkTHfr5Eq4pA613Lk8+DIU+VX33Bp4xoJoxNEJxgsAry9vU0DBDggFVUs10A2FJTMORR98RoFIyDvF9KiXu3v7e1teoQPqEJSPGqsqiqBMoDIIkM1qIjD9q6QjPYLYPU7cSVZJWM1Jn2Rod7ZAw44gxJERERXIk39fj+m02na1UeJl4gZ0afhcBhfffVVSkmAePVRc7fbTZGZ8Xgc/X6/8V4mJWYFFrUb+otDaTTl4uIiAcX19XX0er24urqKXq8Xl5eX8eOPPybne/v2bVTVPl/9/Pw8EQA2Qx+2232OOS/oY73bxcVFijytVqtEDvSJR/48wsYH2N1nPB7H3d1dIkSiXEqmEKBGOsuyTOka9PXDhw8pP36xWMTXX3+dtmsF/Hk54W63i/F4nKKIFMZJXjXrD3q9XoqAArIAtfstUaSqqhJB93q9tNh4OBzGdDqNbrcbL168iPiPiKI47PY0Go2SX2vKCbajZHt9fZ0mMdjH1dVVwozhcBg//vhjg0DQMRMPtemqquLi4iJms1kicOSvC7Y1OqYgiq3SPum6agNgBuPwaBtjBb/op7ajOfEaUdZJN5FtJguMDx/TaBz2hx6VEJQw6GsuYqnErJPetnxaaTny6XLk2iZcbAyhsmo5suXIx+bITreZct9y5PPhyFPlV99woQCNutEwndC0CG4EAFEATt/pgAL4Ayz5DUBBuDgUfUBo+jhY79D1PO0vTr1ardJNiYIgY9DHmgCIKg0FaPoAdeidPlEJDIw6SR2IiAd9jThsCwoBMS6VA1GTAylH6j8yZ2eeu7u7iDjkgCNnFkDe3d3Fzc1N/O53v0vXEd2EbAAW6mZdAI/0NQqpxg0w8J33OKleIZRXr17FZDKJd+/eRVEU8dVXX8W7d+8S0azX67i9vU16g0A1Uswi1KraL0zFeSaTSWy327i5uUkR3MvLy1gul41oD9dpNFltQyNQRKFID9FoHgtsSc2hfYi3ruuYz+dJvjg2+fXobrfbxZs3b9LC28ViEefn541ouUYjR6NRip6/fPkyRWT7/X58+PAhyV1JV4GOfP/z8/NUF2Ol/jdv3qR8a3yBHbiInjtw4ytE5m5vbxOx6nbOZ2dnqX9KzjoRxZ/4Db2wyHs8HqcXXZZlmVJJ8DlPgWASx2J22sAelHiQt0fvNB2D48iC3/mMzukLNqQ+j43zp/1F5ooL9AvMzj0N0T5zHudqJLEtP6+0HPl0OTJFuONwDjdEES1Hthz5eTjy+od3yVbhpJYjnwdHniqP8oRL7xhpkP8oQgGqLMtk0BGRFhhqXiZ3/Z5qofUTvUKpGjlUslGnR1l+t46SqGc2myWA4jyAiMfykCLX5wgLgijLw45OEXujUEDWCBCOTyGPua7r9GgbUkAu3NHrVqvIYP95f0yjEIAAC0vn83l69KpGrpExQENfWjibzdJjZByLfqpDEXEAkNQ4z87OUptEH6kHZ/dIx9XVVVxfX8fXX38dl5eXDVv65ptvUr43ETJemPnu3bskB33cTIQNsijLfZpJURTJHgAuiMgBhtSF6XSaxnp2dhYfPnxI5MwYt9ttahsCICIEGaP78/PzBKa9Xi9evnyZdshCd7xolEkKBA8hMyZe9jkej+P9+/eJ1BiLRnLUZ/EBQBxwHgwGMZlM4rvvvku+MJ1OscIUWcM2WYyru2eyBTEYQAQU+xkMBvHu3btYrVZxdXXV2CZWX65KGg3RM2yN9Jz5fB7r9f4lokxSKEQ2dYLGegGO+85ZRVGk3aOYOBFxVGwBwxQXsHed5HANTyGUhIjqaqqWYl9Ec5tnnYQrtiiJ0BeNnHOcc/XmoL3h+vml5ciny5FJB3F40qUL7luObDnyc3Bk1TnssqcviW458svnyFPl0d7DpR2OaOaOI1gdGJGem5ubdIetd/cqGMBWCUoFoHeuXK+RM0CD9lFAxCFH0+/WWcyHwwC+8/k8AQmO7MCu0Qlkg8J18gmIeLQSB8KIAW0lr91ul5wROendNm1CyhBKWTW3zOTRLyCz2WzSjjXj8TiqqoqXL1/GYrFIfSESgkPzqD5iD+bkQi8Wi/T4lyiPRmhwVqIGjEffZh4RCYQwfOyHRbLUudls4vLyMjabTZqsMNGpqiqRy3Q6jbIsYzqdxvn5ebx8+TJms1mcn5/HxcVF7Ha7BskCokRu5/N5nJ2dpUWrABS2OhqN0qRjt9unnrDNK/am5LVer+Pu7i6220N+PtE9PmNntPMHf/AH8eHDh1iv13F1dZXSOSaTSfaRPTLFl66urmI6ncaHDx/iN7/5TYooQtj4JLImYs2EAeAm8kpuPpHvbsmOToedwfAH+oFdlOV+l6S7u7sExETuhsNheicJOtZNOYiK6gSR3wF9ZDmbzRIO1v8QAAAgAElEQVTwQ3T0Gz3QR/ADPbG2wUlGc+TV1hgj48sRCjLFtrkeO43Y5+Qr3oEN+LtGjxUHmLDTF35XjKZeL+gfndEu+NaWn19ajnyaHKk3NDrBajmy5cjPyZHj0TjZHHppOfL5c+SjPOEC6PWxHQPe7Q756QAuAgccUZ7eQXJnjUD07pE6OY8oiW9NiSNBCPQp4vBIdLfbpbtgBEeON+DGhLGu6/To9/3796l+xgw4YySAsq6nAtTm83lSGI5cVVVawKnGjCEhR2RAm4yXqA2yoz+dTifKe2DYbQ+LmJGr6hIAo47VahWz2SwRBgSiqRpK3kTdkOl2u00vyiNCAYERweLdHNiFvl9EdY8eLy4u4uLiIsqyjMvLy9hut2m9w3w+j2+++SY51GKxaKRzAKzdbjeBF7KChNFBWe7zpNX51a403xswQrYvXryIm5ubRDakEkREAjCNjqFTInZEcJAjMnnz5k36jcf+2M7V1VXKWUaHjJP+E8GaTqcxnU5TPv1kMokffvghimL/8krGDaF63jS+Bmjd3Nwkkul2u1Fs7xc6F2XyJyZdmmagk8B3797FbDaLV69eJbLHB8uyTBFM3jVDX6fTaSI6nTACzP1+Py4uLuL9+/fphpC0HCYSTJSYOOhElag7dpLWQ96f71FATaeiHr5rlI1Jp/oev/FfJ9CKtfiWPtUATxTT+E2xQic6PpHWSajqm3PpY1s+vbQc+XQ58qCkQ7qQvkuq5ciWIz8HR1adw6Ywt7e3aXfHliO/fI5s4IqVX82egG1E86WFDFaVgDB4NJrbhpOiu5wgvNwOTaQ2AOIUjGI2mzWib0SAEBp91TtWfZyI8DHUzWYT33//fdp6FOdQYmS8qkDN7aVvCnZlWSYjRtlnZ2cN4yN3nf7WdZ3el4GcGDslPSaHiOtDKgQOTZ0YGAAC+E+n02Ro6Pq3v/1t3N7exvv371OaBo4bcdh2tKqqBLT6yJxH2PqIeb1eJ7kSEdM+YvDD4TDG43FcX1/Hn/7pn6ZoHGRxc3MTk8kkRSM17xvZLpfLuLy8TAQMqEMk6LTb7cbt7W3Sma4p0FxyBS/6GxHpZYabzSZNRCDY+XwedV2nRdGLxSJFg5hkQLLYDXKo6zpevnwZL168iLdv38bl5WUDrIgE0jfs+P379/HixYv4/vvvYzwexx/90R8lsiVaHBFpO2LGx4RqsVg8eDFnv9+Pm5ubhg8Puof3bujidXL90RUTj6Io4vLysjHZGY/HKZ0F/4bYsC2Ijc+9Xi+1hcyxKfya4x8+fIhvv/02Xr58mfpOfRGHxbQsXtYnCxHRWHsBgOsTAPTkUTsmafSZCSdjxLd1UbNOHrV+jdpzHH2Dj7Th/dNcd/8NDKFeZKqTz7Z8Wmk58ulyZJrMxUEH+HjLkS1Hfi6OLMvD1Fs5ruXIL58jT5VHCVcSnaJjALYCJCClhFOW+8eOLLDvdDqNrUK5Vu9w9REkhgiI8S4DhEBuLu3xWBQnwElRNk5CrrxG+tgNZrFYpDQCInIYB3nAkJqCvhoIj3yJ0tBvdjYCFIhwQVIQBwatj+XJXe33+ymiA1n3+/3ozA8LjNlKFCOOiPSSPxx1uVymnOnJZBLn5+cpisGjf/LINX95t9ulRcE4G9GkzWYTf/iHfxjT6TTJkagQ0TnkivNzLToDXG9vb2O9XsfZ2VmKBgLwjIGc8MvLy5STrLZGW8gOPVxdXaWJxWQySQTDjkzIkfQC7Bryoe/oApsimsvkibFQP3nxTH7wqfV6nd6XglzLsoy3b9/Gb37zm9hu93nuTr4KGAD2brffoenFixcNooC8Ly4u0uSBFAtsFn8lRYGxkLN+d3eX3mvy6vI3Ef8toiiLlEfPblbI67DO6zBZgsQmk0l6v83d3V0aCzZHSsvt7W2yP+RKlPrs7CxNjGazWZoUIGMImokQemIbYdaOINu6rhtPjyMiyUNTurBV+oO/6mQX3OF6j8pHREo50kggddEHwD6XEqYRNz7r5EInGdiNRhU1igi56Bja8uml5cinyZFMHBVfyrJMa3Bajmw58nNwZNk92B07JrYc+Tw48lR5lF0KFbARIh0gtxpHj4h0p6pRAR4ns5vRZrNfSNnv73eGAVS4idD0A4xA28bwMDp+RzGqFMBrMBjEbDZLBn1zc5Ouj4j06JbcbAogERHpbhlDHQ6HcXFxEb///e9T9A1gqes6RV0gGPKrVYGAPiC53e4XnkI2OP1ms0mECVmV5X771vPu64j/NyLkbpwoEYtM0RF53+xQwyQaRyHyQc45uup2uzGdTuP29jaREH1XsgbIIyIBZ7fbTTs7QaT0n2jedDpNUS/A8ebmJjqdTgK4s7Oz9PLK5XKZ5IBNkZNM9Oj6+jr1jbSOm5ubBIQ64Tk/P4/Ly8u4vr5OwIGtRER89dVXKfpKPjS2wHgA5bu7uxSpwj9IOeAcjvPOkuvr68aLMDn+1VdfpVQHdArBAA6z2SwR/X/8x39EVe23gKUP3W43Li4u0rgYE5MZjdYRKYUs1+vDe1VIt1ku9qRL20S6selOp5PkQT+x8eVyGbe3t2mB8/v372O328XFxUXyC2TL4madXIIXX3/9dUpbUpuAGIh+Y4c8USDPXuvUxbgaSUTe+BQRN7ARX4qIlCKjE1jIgz4hZ2xAJ5eQBrrRpw76pIOCv9I2uOkkohNwf9pCfxlne8P180vLkU+XI/v3/bt+9V/i//hf//eow9Zw7L8cJm5R3B+MPZeWZUS9vyr5zf01ZVlGvdvtf7s/tr8uDnVkikbky+J+17Sopf77i4uHL1wtoohdLWsF633Qi+vS+OiP9uV+HHosTSLr+4Op6aIhq91/bU5y+cx5/E8YI9dHLbtEFkWSt9aldTb6mrr+8P2AjL8sy1S/9x1ZNK6uI8qqjHpX30ueTVXq2Fm/vD8P6pK+1Ls66vr+ybBMzuGzliOfB0fmbJHyq2+4eBwN0KlAUIRHDxA8IDwcDtNEGwPb7XbpBX5EgDqdTtzc3KTcS72jxBgjIk38iNiRp6sLJbX/EMl2u21E7oqiSHfPvOOBu3EIMgHjPckQSeMY7yUilxjgJyLw9u3beP36dYpCMvnnHIg3IpKciUBBwkREzs7OGtdcX1/HaDSKd+/eRbn7Jl5ERFlWybGVgLmWvGUmzkymkT+LK7mpQO+84ZxIELqk3+RX49xledhCVSOUkPBms0nRSnKt7+7uEhnzDow//uM/TuTM2N+/f58im+Q0TyaT9I6Ju7u7ZD8XFxdpS18ilkyCuKHgUTMTGiYi3BiyIxCO/P333zdu8NCpplEQzSQlAVuMiHSD1+1246uvvkpA8/r164iIFIHjHRlEMH/66af0vg/GwGSo1+ulHPTRaJQmdy9evIi6ruP6+jqlTaAfIpXq0xGHd04QsSqKIu2AFbEHrsXykPaAf5FSgD2ja84D7LGLb7/9Nrrdbpyfn0dEpIgbKScQ0t3dXfIBJl6sBSBapaCP3TPR1acOyI4bT+THjmvUga3e3t4mHQP67EqFHZPugc0RIIDgdF2DkpESjGIe7TNZRs88CUB+jAu/0wi74h9jV1vUm3eOER1uy88rLUc+XY788eZmL5uyitXw7L+LPTzL0sLCLy74dMuRz4MjTz3p+tU3XDxm0w4CtHQIQTA4Bjufz+Pq6ioNkEk2RkV0nBsCiIh2Iw53yBjZZrNp5EAzyeVFjfSHnZQiIi0YJI+YyT8TVcgBotMx8hkFowjyTzE07rIHg0Hc3NzEy5cv000D0Q0enS8Wi7i4uEiPW3lHBm8RJ41E0wh0u9nNZhMvXrxIj5uJEkREWtirW37yQj+eRL169SrevHmTno4x0RoMBnF3d5cm7Dgwd/dMrnWhbFmWKSobESntgggFT6F4csJ46rpOT5VYlAyZcv2rV69SNHe1WsX79+9juVzG7373u6Q/IjHckOm7OGjz5cuXMRgM4sOHD3FzcxNff/11eiLEEzVypYmcUrhJjdinZ2hUqa7r+OGHHxq66vf7MZlM0s46TKqI6Eyn05Q/PRgM4vXr16n+xWIRNzc3cXl5mXaXms1m8e7du/S0ivfEAKzohV206Bf9YbLEE0YmX/SJCct2u427u7t0DEAEMK+vr5NMut1ufPf9D/E/xoEkNOWiqqq0CJenf3Vdp8ke/k1knptzfA0grev9DmaaesIT2ohIN/zz+Txub28T7tAnblqJ6HGt4xnH1O+5IWY3Nuqg7bqukw6I2lEPtgxugY88hdCnzJBmWZYJUzQNC0ImwIDPkTZCYMQJk0I9njIBjoF3jKctP7+0HPmEOXKxiP/5f/tfolNVMZ5MYjgYxGg0itvb25jOZlGVVYxGwxjdc2QRexyeTCbx/fdv4uLyIob3vlpHxHAwiLvpNL569Srm80XcTe9iPBpF3D9VGQ2HUdw/+drrcBVlWcRgOIzFfB51RLx6+TLe//RTdKoqRqNx3N7dxnw2i+FwFJPJJDqdfQBwvdlEdZ+BslqtYjqbxfZeL7f3m0BcnJ/HixcvYrFYxHQ6jW+/+y4m43H89ne/i6qsYlfvYrPe48ZiuYjzs/MYT/Zct1wsoijL2G420el2Y3a/Y1/V6cTre46cTM5iNBrG27fvoigiOt1u9LrdB09wzu7fZUY62vY+AybqOn4vHNmpquj3BzGejGM+m8VqtY5OpzpwZF3H9P5lyQQBvvnmtzGbzeKnn36K+WIetzc3cXF5GePRKGb3mTHdbjcuLy7j3Y/v0tOObrcbnftgatXpxG9+85tY3NtWHQeugVeKoojt/bXL5TLmi0V0qirKqorlYhHre37rVFWUZRXd3h7/o65js9lGp9uJ3ZY1mEV8++3/t3/6NBi0HPmfhCN/9Qpo7vroCHfE3GVyjLxqCKaqqvTOB11wB+BOJpME5Pyud5MIHadjog8JoEQm/CgWxyE3fDKZxGAwSDcO3JVj+C9evIivv/461UuEDMX+9re/bWwDSl447+DgKdBoNEqPf1+/fp0m/KTzETHgTpxUOYyoKPa7+jBWnlAgJ9VHXdfx9u3b+Omnn1IKYFkeSLhTdaLe1bHb1bFarqPe1VEWVey2u+h2erFerePli5exWq6jU3Wi1+1FEWVsN7vodftR1xGD/jDGo0ms15uod3WMhqMoijKKKKMqqyiKMjpVJ4qijOndLCbjs4g6YjFfRL8/iHoXsd3soio70e/1Y9AfRr3bP7rv9wYxHAyj3xtEp9ON+Wwes+k8Ioq4uryKy4vLuLp6EednF7Feb+L9+5/i5uY2Xn/9OupdHd1OL6qyE7e3d7FcLKPe7QHvu+++j9///oeYz+bR7XRju9nFYr6IqIsoiyr6vX5cXV7Fi6sXEXURP757H71uL87OzmM8nsSLFy9j0B/EbDaPu9u7iDribHIW2+0uyqKKquzEZDyJy4urKIsyLs4v4/zsPIaD4f34iyiijKgjRsNxXJxfRrfTi35/EFXZiarqxGg0jl6vH0WUURZlrJbrGA5GcX52Eb/5zTcxGo5itVzFzc1t9Hv92Kw38f7Hn6KIMv6H//Jf4/zsIsbjSRRRRr3bP9HsVN2YTWdR7+oYj8axXCzj/Y8/xWazjSKK6HV70ev14+52Gpv1NkajcVRltZfbehvbzTZGw1H0e4NYLVcRUURVdWKz3sZ6tY5BfxC3N7fR6/Xj7Ow8NpttDAd7IK93dbphJYp+fn5+P3HopIhTWR7WTVxeXiZ/7/f7MZ1OU1Sa83nSqjsqdTr/P3tv1mPJlVxrLvczz/MQkcwki0SVAF3osQH1j5PAC5TQ/+H+HAGNBvSiF5VuFWcyM8Yzz7N7P5z8LOxEUVmVYl5AYroDQUZGnPBhbzNb5msvs522VrQkjQA87BpkAJr/0WhkSaB/WSWGeCLAS6FSqZRWq5WBCJ/h7wjIJJQ8K2wcCTTxRpIxcUEQWEIMWQL7xnNJsmSXv+dc1AHwM4DgOVD7ewTMiHun02UrXB/Tk+P9jgQj/2tjZCaQyoW8Dpu1fvj2G02GA6UDSceDdDrosN1Ix4PSihUfD8qlUzpsN2o3ajpuN0oHUi6dUiqOFB8PyqdTCqKTirmMKoW8TvuddDqonM8ppVipOFI6kMI4UiaUUoq1XS5ULRWViiPt1isVsxkF0Unxca9sGKiQzaiYyyiMT0opViGbUTmfUyGbUTYVardeabdaKowjteo1tWpVtWpV1StlnfY7TUdDLWdTXXc7CqKT8umUsqlA6/lMx91GYXxSfNjr4faNhvd32q9XyqVT0vGg/WZt91zIZtSu19Ru1BXGkaajgfKZtOqVkqqlorrNhkr5nHarpTaLuVJxpHq5pCA6KRMGyqZC1coltWpVZQKpUa2oXi6pnM+pVioqjCOlFCkVRyoX8mpUy8qlUypkM+e/DwNVigXlM2mlFCsdSKf9VqV8VvVKSS96XZULeZ12Wy1nUxUyaUX7nWajoVKK9Pmrl6qXS6oWC0orVhCdlA6kbBhou1woiE6qFAvar1eaDgeK9jul4kiFTFr5dErrxVzRYa9SPqe0Yul0VLTfnX+Wy6qQSeuwPY9nJpCi/U6n/U7FXEbr+Uz5TFq1cknRYadiLqtauWSqoAQjfx0Y+a7jg9RwSU+7O8NUMRDc5HPdo19yZCM8ChIJoLwhA1ic0wdUBt8vu/rB9VIOSQYsGEGtVrNAjnSNQM25MBokZM1mU5PJRGEYGojxBk59EAaAEUvnpePZbKZaraZ2u60oijSbzS6YPt7GvUyN1R5JxoKwqtFut00SRvGpX96Vzl2Aym+VxcXb/6Hf3P6PXzrt/6mj4b7Pv/1/RlLJ/Tx6+8Uxf/sZBGtrPakXKCctuXPkJC3efu+Nu/L2S5IOb7/4zMad5yTpVmcmIv/2e46V+z54+2//s+fHxp0nentvsaTls8/hoqm3Xzn3u+mzzzaf/fuab/7f89iU33696+j+Bz9/6b5v/4VzPD/67ns/zwpkTBKSJnz4cDhYa10CPl2zYLXiOFaj0dByeR41/AtAQF6A9EB6KqSF4QMcKCxmZQCGbDQaqVarSZLpzv0mkx5AkH2QNCKllWT+TkxCguXrnvDPMDzX8fkEm+swHv7efS0djCasm2fZAD9iGj8DmCXZuDDGz4GOpF3SRezi2ZLj/Y4EI/97YGSlUtGLFy8smWLlzSdXxCGaNxyPR02nUxWLRVNy8IJL0kuiSzJMcszPuX9WGlk52O126nQ6Vtsbx+eW+8w7q3uoJajDlZ5WHF6/fq2XL19aY4vtdqtisaiHhwd98cUXGg6H1vyA+EuMoWaYumjuGdVHGJ73utrv9xoOh/Zizj5j0nnPMezBx6PHx0cVCgUNh0OLg8Rv4h9yOFZmaAcfBE+118zBbDZTpVLRaDSyldD9fq/lcmnjvNls1Gw2VS6XrRW9j3Uk5j4uI9lF0rvZbKy2nNpwfJZYTczfbrdqNBpar9e2KfTV1ZVubm70+Pho7fr52wQjZfHrvzNG/h+VFHIhAi0SAG6QYI4RwaJJsmVUScZ8URjsO7V42YUPTH4wvJYziqKLc/B2ykDCKPKm7TWkvmsOzo3sCkemmcJ8Pr/QsyPLY9+Kw+FwESBp/ToYDHR1dWXGyj0RQKixgn0g0KZSqYvAdzgcbP+IRqOh+/t726RwMBjY3hapVErH6KBTuFMQnOtfgwAj8zNJYSmln4H7fPBWJWAVtv+BNfxM2WjsSk+Dp3OTWJz/f3kO/29/j4EoAmYPBp7hsgCXa3E5X3TMmc7fP/370lGexoE/+bNrWSnt23+9vVgcxZdF0U9V0/YDxvTpA5fjxxxdjsHTuEmugNud/+kZ355Dcv+J7VxP86CLa/H905g/FRoruLwPPw7+s/xtEEhx6qlNNS2DR6ORMejdblez2cykrcvl0uo5FovFhTxyMpnYfiUwZSQK+Xzekhy6oVEs/PDwYLGDlr0EfEkWAwAKpLHUo+Df+CixiOQjiiINh0Nj4X2tE2yYP5BpUiNJjIQRDMPQkr3NZqP9fm+JG0BIssp5SAphEr3sgXsGfDxweOaQBB+mkHOQAJLwJ8f7HwlG/tfHSGRmxKlisWit2Bkznl2SyRp5AUSyTqJ8OJw7BOL/jAMvaxw0g0I6Tvyi8QgvVbwk8DLh5V3U7sDA7/d7lctlux/k+STlcRxrOp2q2+1a8s0qIysHjUbDpOK8MGKjJPV0yOPlnJd1pF7Y++3trcUp3/AEuSvznE6n1Wg0NBwO7Zqz2czsE9wgDhKjsHMarRDfiWPUDbGH1f39vd0zOJLNZq1hlq8RLJfLmk6nqtVqVg4gPa1oMA403uJFhhUi5qVWq2k+n1s3T/wVjEsw8un474yRXo74/PggL1wEOYIzS3XPpRR+uZE2j7Q5ZQJ4a+VN3mu3pcv9Mzwr4pc1mVCuT7BCO+qL42jw4Fks2EC07mEYWtMHGjTU6/WLeh1qlFjGpQUvzwsDBUBJsmLndDptOnzGkzHzzwU7Rce7bPa8k/xsNjOHrVQq5jzr9dqaLizD7/X1//X/WHAmePi3/91up9lsZqDC5ofH41M3LLrgwFzx98+ZB79Mi34cBo5mEuPRSPV6XZPJxGwnDM/1QeVy2ZaUYU7T6fRZ4/72mabTqa5evNBisbC2sYfDQZu31280GrZPy2q1Urlctg6D1AnAhubzeXU6HbMBmC0A+XA46Pr62s5FcIRFOh6P6vV62mw2uru7s2BFspROpy+6IDIH/jNhGNqmi5vNRtm3bMvpdDK2rlgsajAY2JwwRrDIzWZTo9HIgn4YhmYvsFeACGwQ7Yc9k7hYLCzBxhdhwBgbdqvHB9GAw8gCWpkwY/ZBrcVsNtNoNDIfgNHKZrMXdWWwlyRPxAESTe7RB0FAcbfbWXJDkgKw7nY7Y3PxVz6DDxITCKr4J9eGZUbO5OMIKwYkg7D7HF7//ly6gM8TI6iZYUzpHupZP74AfM/icX/+3p/HU4DSr7R4RhDQTiSF/7kjwcj/+hjJfFD3ywoGL7C8YBIneYGSZPGYOO+TLi/39HGEuFQuly8wElwtlUq2EkIxPnOWTqcNP5h3ksvj8WjPlM2eu8kSd0ulkknIptOpGo2GrUTwIk+Cy7Pz+Xw+r0ajYbaAhI3YzQsrLzfPMfJwOBhG0n6cBJzxwIbCMLSXPe6ZXICxpLU/ZAbY/i6MBKPIJ3hGSAPm6ng8Wq7CXLMBMjYFRm42GxsDXjJ52ab2jJc5ru9XZ+j4mWDkrwMj33X84hcu3vji+Kn7BwHTO5JnIJgcJgD5AsFwMplcGKnf64DAQ9BmEkjuoihSs9k0RgU2jDdbmD4fpLl/Jt6zLn7pWDo3fTgczm1laUzBEiuDjpHTMY4A5IM0DpBKpS6Kf2lggWFwcH8Ec5gtzyzsdjtNJhMb83q9blp5goOfM4wFQ/VLtz6QUkPAcvF0Or3Q/2OEfI7rw6Kk0+eCVF4kPLOKFnmxWFgxMvUBAKeXruz3ey0WCxWLRdPm83KWyZw3YMQeeDGqVCqqVqtWSD0cDrXb7UwaEMexdfnxLx4AFWDiX0D8c/P74/Goq6sr3d6ehYjlclmvX79WrVYzVtuzzQQjX4wJO/zceU+nk+r1uj3Pfr+3YmmCxXw+t5fHyWRiYMPLKMGZvWM6nY6CINBgMDAbYux5fjqfxXFsbBq2h9zAv2zX63Ubl1qtZqCJxAJmD1vCtqXzC1sul7NCXewU/8SemTPP6BNrGDfPTJHMwVYDdDwzCYYPnn6lgXnHFmFGpSc2Gnun0Fh6ajPrQZB7IUnAJokhJFuMC4wyCQYMPPaD5v95IswYPdfmw17jT9yjvz8Or8knifhLgJIcf34kGJlgZIKRCUYmGPlxYOS7jl/cNEOSsSi8efr/w3DwJumXKjGw58GMzzCoyBcYKCYYI8IBYPBhX9BKExB4o8ZIMBqW4GENYAa9vpXBZeKWy6Wur68vzgFgsQrCxOHsdAYEcHibhxVjAzx+z5JotVpVsVg0qQLLtH7iuUdWNTyrAKPBs2CYrN7s93tjBNBNE9zY+Z153O/3pt+OoshaqhNQOL/XnTNHzAuaXr97OZpj6RyEuV/uMZM5t/RlCZzzSTJ5DAEMO2Elzp+DwEhCQhIAw8OyvCRjOpGkMLdo3mFIJ5OJVquV/v3f/13L5dICCnUUQRBYstTtdm2sCEQwdwRxxotn9K3p9/u9Go2G6vW6Fc5z71F0LiTHBoIgULlcNlYSdi6bzarT6ejVq1dqtVq2LI+PsGpJ8oNd4QP8PAgCA+R6va5ut6swDK343W9+SrBj6Z/z4O88O89IApHL5VSv19Xv902Hz3zB6Pnxgq0iACNhQv9fKpVsTPg77yuMIzbimTsAli/m1SemXlLgVy687MDfL8kDDQS83EZ6kqLhuyQPkgxYvQSMcfPxAwAi3gBezxN+bIYDAOIeGIPkeP8jwcgEIxOMTDAywchfP0a+6/jFK1zesFh69/pMBtcPLI7Ig/IWCwPC0iVSJ/9mikaTwaDjChPNciETzkRSKDmdThUEwcVbMvdKYOGtXjoHFLSiURSZNI0OSSyXw0Z5potz8n+ChnQuYJzP51aAikN5gMCIAVsAsFarmaGwDF4sFk1P7usDGG//Fs8Gt8yZZzrQ+cLQISVIpZ46ZsGkMkeSNJvNFMexOp2OMYnoh2lviqEimyCAEdCWy6U5DS3oYa0IPLClBGdYOuYCWQIAiq4X5hC9L5I6WFLGkHkm0SGYYwveSSnSzGazevHihe7u7rTZbFStVo1FZMw4N8XAkqwAF3YZ8OP8jDN2PpvNzEb97wgk/J7gDrsK20fgQcf93Xffaf52Hxr8EP9Cq01y4Bnz1WplyQefqVQqZi/NZlOVSsVa4sOMYpckDx5QfMBDHoCUhMSiVquZNt6z7dI5cHt2ClbMB+7j8dxmu9VqGQMMAAO0bORKvMFn2ZQbn/KMOPeQTqdVqUvlvewAACAASURBVFSsDgB2kXjjE21kFSTVXg7h/YQg7gGFMQP0SE7iOL6QbpDk+3N4SYR/eeIZfJLO54lxMKnJ8X5HgpEJRkoJRiYYmWDkx4CR7zo+SA0XRkLg5o2YQOXfCLlpX//jAzHLkgR0vyzKwCO/wJgJbL7zDMyBJNvLhEEicfBvylyDJXr05ofDwWQgBD+ebTAYGKvAz6MoMiaKt990+rz5GwbFMud+v7/QZ9NtiAmUZF1cMEKYPJa4Ac3dbqd2u20BmOujyZXOgQ2AxfGpJWJvK8YbPT/gEcexut2uMSsAHYENSctkMlEmkzHAYj5LpZLm87kFJOarVqtZwGKMAQXsCsPebDaq1+vGxsHA+Pag1J7lcjkrRi6Xy8bO4kwkH2h5keysVisLnHSskWSaZ2yOgEsBahA8Lce3Wi3byR0751yADHYBIHn2BZuTnpb/vQwHRg1n5/5rtZqxaYAugdRviDwej5VKpazou16vazQamQyCAIw/SLrYmBEWm2cHjBeLhbHa+Dl2DEBzX5wTqQsyIvwDOQ6M7XQ6vYgl2CRMM77KuUkGKTznb9h3hFjE2JCw4m/YYyaT0Wq1svsjgBOksQXslaCL3RP8uZ7XyMNO+/H20isv38DusBWeMYoimzMvy+DfnpEjhhJXib983oMWPycZ9WCTHO9/JBiZYGSCkQlGJhj568fIdx0fRFLo35J5u+XGCQQ4wM8tJ3rZgHS55H86nczInv8NMgz+7Q2oUCjYwDDpSBG4DpOKY/t7TqVSpmkm4PguO2wmh7yAyTscDhfL8Eway8SVSsWWu3Ec2L/j8dx+k8nFmAHnXC5nrWwxfknmoCx78zvuAVaG+4QBYVkZACRgwPg0Gg0dj0eNx2NjVx4fH20eT6eTjT8gB0MFGMOKEPTT6bSq1aoFTw8cOCl6fZ9QrNdrzWYzez7GerFYXLCIOBOghe2wDwX3jGOWy2UD2VQqZawgLVCz2ax9jyPSvYr7i6JI0+nU2KFUKmUgQkLFPMPm8fw4u/+ecctkMgaAz/fCIeHgK5/P6+rq6oIF9iz44XDQarWysQNokVL4JIQEgnHj2n7JH1Y6kznvGTIajTSbzYypZZNkui5hPyRLPD9jSUctJA2efcIekacQS7wUATaVxEySNVshSDN/BPBs9txdKpVKWeGyT5AABeIbiSbSLECe2MD5mSfuEekM8YnYw/jg24yvZ4l93PM2yM+4N/8z/zkv4WLOfLz05+f6+Ab261+2fNxJjr/+SDAywcgEIxOMTDDy14+R73rx+iArXARNHoDJgOHBAP0NYwB0x+EmYUSeTyx/w8+YDA9eBPbT6azp5vzIHDBaGD6/1IvsAHbC/75eryuOL4viuD/fhhJQ5F54Q4eNgKnj85499IwYIMrbv2dhjsfjhTyAscHwPTvnDQX2C6bCX2+9XqvRaBhLSSEtAAc439/f294qPFs+f94JHKeDHfFMGewiz+ABnODnwYQuMwTN0+mk+Xx+3k+sXFa9Xtd8PjdJB78HoNEkw5ZwH+v1WtvtVs1m056PQA4IAIieZaMzlWdJYHmeMyeZTMYYJeyBz8K8UDAKeyM96e0pwmUu8AdYSWQGdAokWZnP59aVERYLKcl6vbaWwIfDQVdXV1Y/4TtG+XHmnryWnODpgxL2GwRnSQes7XK5VLVaVTqdtn11mHPYrlQqdRFkkSOQ8JBUYEskVdiGZ5tIpqQnlvru7u6iiJUY4u0KnwKkAREA2Eu8fIJAxzR+79lY7Jv6Fq7t4xa2gxQLP+X3XgrCffpEmPjK8/A5QJJx8Ywj98u9MO4+VsAaejkVKyYwmMnx/keCkQlGJhiZYGSCkb9+jORzP3d8kC6FBDVYIyaEgOxvgOAnPS0F81A+oDDJ/m95KH7mgzEOTfFvsVjUdru1tpeAFpMHmHk2h7dwjtVqZUwTwdizZsg6uCfAhsAFI8ObuWcCYCYITDyD36QPI+f+uDabK/o3dYzg6urKnpvuOX6pHamGJAvymcy5cxFsFOcEdLlfltlxPGQcsG/SU4DxYLpcLpXP59Xr9S46LeFwBBEChj9XEJw3H5xOp1Y07YEKFo+/gcmElQEkCMTYqqQLZgxARuNMq2WWnKWnBAJWCDv3S+LMg9df++V1pCHMn08sAGYPdBS9TiYTky4wLiTABNhUKqVer6dWq2WbJeInzDfP7tlvmGUCvfdnOj0xFnxJT2yOD54U5cMa8nvugzoNEj+fYPp5L5fLxjZyj+jVPdOZSqUs6UmlnjaJLZVKmkwm5huSzAcI/iQ6/I7xYp44CPJhGP7ZRrPYhAcL7IDfU8T7/DyetfNsuGeXOa8HB+93+JsHC796QozyMYD788k45/DxVJLFZ35OgpQcf/2RYGSCkQlGJhiZYOTHgZHveuH6xZJC3tq5iL8hgo53RowgDEMzWhyPQYZ184bv9avPjY9zAy7NZlNBEFhXE7oV+X2nWOaFYYA1IhCxbOwHH52qX+5Fz8xnvKFxUFDrO6fgCFEUWTEv7KJfMvXdZGD0GG+ACudGAuE1tgA2zF61WjX5AsvTFDtS+MheGN5YcRrGkiDGfDKHjNfxeDSZCQxVsVhUvV63651OJ9M449QwJOl02paZYYXo2HQ4HKz9KUG1Wq1aMlitVu25sBmCF5IRH5ABeezWF5RTFHw4HGyZXZLW67U5Oywm5yUJGAwG9jO/ZwpOTDCDwYrj2Ng2WGdAjv1RptOpadfRwmMTuVzONPSZzHmvj3q9bgXByHVoqdxsNtXtdg1opSf5A4XU+B3tabFBAh7PAlM1Ho8lyTpEAZwcJGT4CcBNET3A32q1TB4hPbFf+BbnwHeLxaLtJ4LUgPnHfhlPzgf4wLz5oIw9B8G57oDaAWKN15djF/gr8hDG1QOmfw5sg8QiDEOzT2yR+yEZxeY84BH38HtsFlv2gAGweIAillJ7Ij215uVzAJ1nQ5PjrzsSjEwwMsHIBCMTjPw4MBL//rnjg9Rw+eU+BsrrIglqAAuDyGeeMwosDxN8paeuPRiAHxwG3Ac8DKLdbtvu3H4gCICACfrZzWZjy7Ee/ObzuV3DM45eZsOz4BDs5QBrwXUJ8Mfj0TaUlWTX9ku/jCHAyZI7Rkx3GxjLyWSix8dHrddrc3yMIp/PW0DGGeI4NodmPCXZPgqLxcI26xsOh9biFekEemE/Xz5I39zcaD6fq9/vW1FuuVy2TQX5W5yPwEpA5NkovGZHd9gmOmvB1GFHtAjGEQEvHJE5hHElkK9WK2N//BwyZz5wwJB4xoUlc5gw5m46nVpgee432FoURRfJAwkStkoXLQI37FcYPi27L5dLLZdLkyfEcaxGo3Fh84wHHYQAKuwYIAeUkWeQHMCGw8B6lpE9cBgX2Gh8GF/1gY+AdzgcLljvRqNh9gU76IvUJRlzCGBh0/hQLnfuysZnqIfAt57HLb7nXvEr4hUSFZ8YRFFkDDT2x7MjN/G1BPzeB2cYOR8jAUq+eHYPjEEQGIAdj0fzWz8HxA3GjfthvLBR/MKPoz+Ht9nkeL8jwcgEIxOMTDAywchfP0a+64Xrg3UphIki6Pk3TwaPN8UgCOxt8fmSHZOE0/KWTZEghgtwcDA5LIFLMoBgYN68eaNut6sgCIwdQTctyUCFCV0ulyoUChaY+R0TQ/tUjM3LJtgwbz6f22Z2nIMiSb9EPx6P7X48+8d5+TlabsbbyzwwxtFopO12q2q1agboQRqN+mazsXNQ0EsXKALr6XTuwrNYLDQejy3oMsa73e5CCgAY+BarzWbTmLBMJqPpdKrT6dzKlkASRZExcpxzvV6bphpggD0FSAAtz0otFgtrk4tTY19o31mupr4AmUQcP7UVXSwWqtVqpt9Gpx5FkfL5vLGlPgB5rX273baAyTJ/Pp+3xISkh4CADZIs1Ot1Y+rW67WxSGx0mslkdHt7a/eLvdHpqlgsajQaqV6vm83wfPl83rTp3o8kmcaaxIVkEN2zJKt1INjA5sIUSzLAg50KgsC6XJHQeJ8YDocqFAp2/81mU9vt1iQwBFE6hvkgSBE+cYVn9ckXz0kw5d4Jvj6R8geMo28FjIwmm82qUqlosVhYbQZdn2AuYT+xEX5H8AYAAApWEfyKBfdN3OOefGJDcs6zA0aMk48XfsWB//O9f6nCN6UnwEuO9z8SjEwwMsHIBCMTjPz1YySf+bnjF69wEag9m8HPPKvB4PBQXotKsANkpDNAYYQMnGc7eJvm39wLXWDoqIKkAUZxsViYVpN7YoK4X4oqYTHq9brp3llyPx6P5lC0DsVwABdfeBxFkTFFMJosr3JfOCPL2oArbBfLrxTo4jz8ezAYKJ1Oq9frqV6v29I95wQIYVxOp/NGf6VSSa1WyxyFsWfZt1gsajweG9PI0nS/31etVrNAgSECQNxLt9u1cUauIUnT6VS5XE6ffPKJsQm+iw6sBPebzWatzS22RODCjpBQYC+wO2j2sR3pSW9+OBysjgFHzGazGgwGenx81OFwsEABCw2gYWOwoz6B4prc2+Fw0Gw2s2VsQIagRktlgACpCLbii5Cz2axtWEnQKpVK1i3qp59+uggE7XZbqVRKi8XCgm+tVjPfSKfTxnCTMCCxgcUFbBmfKIrUbDYtsfL7mvgiYwDpuXwKMAHomGuYPLpEef8meWAe0PR7CQ8F18fjUZPJRKPRSPP5XIPBwOxisVgYgBMzfID2bBySjXT6XDeCHItGAfl8Xv1+X+12W61Wy/b6mUwm5kM8l/TEopLk+JhG3OQ5Saj9F3aOr3mZBuNJ3QPPge8Tn4lB3BN+wjhLly1yAZt3gUly/PyRYGSCkQlGJhiZYOTHgZHvOj6YpNAvb/PQ0lMLRh7cL8F5Xaa/eb90iqHz914T7AHhdDrZhNzf39s5N5uNJpOJ4jhWq9Wy5WwcDT02y5iwDYVCQfV6XaVSSePx2DbgA8AymYztQo6DsLGdJGOZaLXKsitjheFst1sdj0f7TCbztFcEQQm25XQ6GYPmg1qz2dR4PLYl1kwmYzKCMDx3/CGwSrJAgdMTNOhcFEWR7VYvnQP8bDZTvV5XJpNRp9NRuVxWq9XSF198YZITGLZisahOp6NGo2HMHewErCDzfnt7q/l8bgYL+wVDEkXndrJoxgHczWZjmvFXr15ZoCbQwNDBynNvu93OWBRvT2iUAeZMJqN2u21sC3aG0+52OwNd2tjSrhV2t1qtmq0jg8HOU6mUlsvlBSB4KQf3g89IT8E0nU5rPB4b04otcF720yFxgGFm3xMYIuQ0MF7sx0LwhnmGlRwOh5rNZhqNRsYUDYdD3d/fm+9htwRFGG58luV6gIY6AoLncrm08VosFsa+B0FgbWY3m40Vh8NY+6AI2JEo1Wo19ft9RdG5Exufg8nCFkhEkMlQS8EXNkSsAHzX67V6vZ6KxaJ6vZ7JVgBLEh+embhIUsQcI/cBvPgMCYq3gzAMzZb9nCH14VmiKLLNPolT3AuMLDHLvwj42E2cYe6S4/2PBCMTjEwwMsHIBCN//Rj5rpeuD9bjlwvzxunfuGGR/Ju7l1ewPM4yZaFQ0Gw2sweEKYEl8Ut8sCKeodrtdprNZjb5sGbS01KmX17k/tPp8w7z6MRbrZYmk4mOx/NmdL4VaLlcVrvd1uPjo52f5/BvuxRSZrNZa7cLw0BAxyhgvVjihYXYbDbG5LDrOQGC5w7D8270OCUOh8HQMtXLK3huv2+HJHW7XWUy50Lq6XRq165Wq/r2228vCjbRiqNVpnj3cDio0+koDEML8rAmsDxRFKnX6ymfz2s8HqtUKlmSAfPg5zWfz1srVb9TOfYBA4sdVqtVAwCckU5F3jHL5bJdk93RJVnHI5gt2gJjp9gmBZgw1QQ0/p5uULCjJF2pVMrYNtgtQAAQw2YBEiQE2AG+Rccm5jCTyejx8VHValVxHGsymZhPetYQezwej7Z/DfOORABJQ7FYVBAEWiwWmkwm+vTTTy0BIUlrt9tmwzBCSH0kmdaaawKw2BuJTxzHBnAAH13CUqmUASm6c7T7MF9cEz05HcAI8kEQ2N+QPMC4Y0uZTEbL5dKkW1wPwCNZuLu70+3trTHmo9HI5DsUdxP7AHySBuyH36/XaxWLRWN9iYs+8fG+SswB7DgviWkcxzamPqkj9gIkvt0wYE+chYFNjl92JBiZYGSCkQlGJhj58WLkB+lSiBHzVumlBzgYb6jcFBIJBhYNOM7kB16SGQ8MDwwWAyDJWCiKJ0+nk+7u7lQsFs2hD4eDOQWMBxppvkqlki3Xs7yfzWb12Wefqd/vm777/v5ew+FQmUzG5A2wjkEQWHEk4FKpVFSv1zUcDm3SYBUI8rCJfpl4sVhoOByaPIOlZJ4nDEML4AQDNuvzbCngy9hhNOPxWG/evDHNdRAEajQaevHihbFZ19fXxgpSnPjixQsdj+c2wujb0SwzFshW/PxIT1rb0+lkLAnsDDvWR1Gkfr+vq6srC6IwiBS5Xl1dWUD47LPPtFgsTKrBs2y3WwM6mChJ5oAwbsfjUfV6XfV6Xf1+35a+qY1gc0uAaLlcGrOH3p3f+9aoOOTpdLLuRoAH4HI6nff3mM1mdl4AA1uWnhi8arWqq6srNRoN9Xo99ft9C2YEnvl8bvrpZrNpiY3fBBSAIyGCgST5wUd5ziAIVK1WTV4Di8uzMM+SjJnmgHn0RcnECAJ+GIZWpzCZTDQcDiXJgt9qtVIcx6bR/+1vf6tOp6Nms3kRcEkiYIzn87kqlYra7baNg3RmKJn75XJp9+slFUijYOBhQdGO12o1zWYzPTw86OHhwVhdEh3kEjBrHD5B9qsS0hNzN5vNLsZ+u91aAsBzsBoBoBNbYZrL5fKFzAUwR/YF6CLDIOaymgLDR2xPjvc7EoxMMDLByAQjE4z8ODDyXccH2/hYkg0IxsEyqfS0LwIaVF9c6IOPNzi/TAfbxUNxXpYYcUiuz98xMQT5drut5XKpUqlkBkGXF36GDnu5XKrX60k6OwYbQ3Y6HY1GI2OiABMCNEGFnd8bjYbq9boZa7FYtGDhJSP8fafT0WAwMJ06zwYTyr3CPPliRwCSJdPT6WmHdPYm8W/wktTpdDSbzTQYDFSpVKwgkWLc4/Gofr9vzMJ6vdZut1O/379gTCmYxCApdN5ut3Y/dPKBraAYEv3/8XjUbDYztgGWkk5Io9HI5BzUBlQqFX3zzTe2VM3+GqPRSOVy2SQrBPf1eq03b95YsoCdTCYTG5t+v6/5fK5vv/1WlUrFClaRF8DwIa1ZLBY6Ho8GehRNE2xYXgfsYZEJkizT+z1HttutbWSJJvrm5saCO6xeHMeqVqtWaAsLTFG0T+RIBNDiB0Gg29tbY3ZhcwnsyBO473Q6bTKU0+mkh4cHffbZZyoWi7q7u1Mul9NoNLKNQulmBpsHY4kN4e+A6+Fw0Hw+V7Va1Xa71ePjo0kiUqmUte/NZrPqdrsGwCSRSGJIEGezmZrNpiUTzJtfYSBppRMVKwvpdNrAFTAhoQ3DUJPJRL1eT/v93mwAm0ilUsYek2z74nFiHswocSqKImNtkZrUajUVi0VjUwFrDsbYJwBINgAqnpMmDCSWPAu2ir1wj14ehn8mx/sfCUYmGJlgZIKRCUb++jHyXccvfuHyA+KX7pkwb8iAzOFwuOgygsaXf8M4wcB4CcJzZoTrMjgMBGwaOs1ms2n3ixPx94fDuaUmrB0tcu/u7nR9fa1Xr17p22+/1Ww2MzZjMpnoxYsX2u/31maUjj04Asa1Wq3UbDZVLpf1zTffKJfLqdfrGVMDQzifz3V9fa0oijQYDIyRgPGioBW2CUNB48rSMIafTqdtXD2bdTwerVsRgfbq6sqMbrfb6euvvzZ2jbf6x8dHC6ydTkebzcaSAlgXb3iMbbPZvJDLsI8CycJisbDgCggDMAQuWDdqBAaDgQWIXC6nzz//3JZ8AVu63rA8Pp/Plclk1Gq1tNvtNJ/P7fNxHKtWq1nRKOMznU7VaDQURZEFQdgQDxRhGGo+nxtAI19AakIwoUMYLI4PpiRDvtgznX7ayd7vRfPw8KD7+3u12+2L9sTSOTkg2MOCIhEpFouaz+c6nU7627/9W/30008Kw9DsL5fLXfgRASaXy5lef7fbaTAYWGI0Ho9Vq9XU7XZ1d3cnSXY+2OjHx0d7PoIrrFg+f27FjF0GQaCHhwe1Wi0ryJeeNqcslUqWOPzzP/+zFbTj3/wfOQBjh/8QvMvlsqbTqd0PMQb2D609zwnI8P1isdDj46MajYYlv7Br2HK1WtVoNDKWLJfLGVuIf2GjBG7YZpi/QqFgtrfZbCxx4++YP2wyk8lYly/GggSF2MxnaSzgZSE+pgK4gCyJV3L89UeCkQlGJhiZYGSCkQlG/uIXLpb9+eLG+d6zotwgRs5ArFYrCwws6RGMYKMkXSwBEkwBKN7+JRlDsdlsbLkUUAFMCExoUJFJAAoEisfHR9Otc05YRpg5lirpqkJQ63a75jCr1coKDQle3W7XNLij0ciWutH5rtdrGxOClCSTR3gwZrmZ8SsUCtpsNmq32zocDsZmUJjqiwPX67U6nY6urq603+/1xz/+UdVq1RwdMPvkk0+M6ajX62bYmUzGgj4MLnufSDLGD3uoVCpm2NPpVKPRSJ1Ox8ZoNpuZPZVKJXW7Xc1mswtdeKvVMomCJGPLCMw4myRb9vZJTqvVMpkKWmIKfUlCoihSu90+O8rbILTb7bTZbNTtdo1pwc5g3HK5nLGrdPxCc87/uZ9arWY1EH6PFQI/4LpYLKw+guCGDCCTObcRPhzOrX+r1ar2+73u7+8VBIG+/fZbffHFFyYjqtfrNvZ+KR/bk2TJCUnf6XQyhppkYLFYWJJEYKrVahYE2ZsGaQ/zwb1KMsAfjUYXHb9arZaCIFCn01EulzNfIynabDZ6/fq1ms2m6a9JSJA2Ma5xHOuHH34woEyn05pOpyarAbxyuXObXQIvBzpx7MoDFpKGfr+vm5sb8xVsg/bTyFKILdgqSR/1IyR8MLCr1cr08JVKRZPJxP4WFtbHw/1+b8k5sQ6WnBiCHS+XS7Xb7QuGj7hNTCOWc0/JC9f7HwlGJhiZYGSCkQlGfhwY+a7jg0gK/Q0wkLw9Y1j83MsYeEukWw6sD0uEnBMWgbdXAgYHS9wYRqVSsc42qVTKNurzE805PSM2nU5NJsDns9msGTja2Ewmo9/+9rcWzCuVirbbrXWM4n4kmb6bYC6dC56n06murq602+3UaDS03W7V6XS0WCwuOiZhlExusVi86M6z2+2smPa5Lptnh/3C2Nh5fbVaWRDb7/eaTCbGYnJdmKfdbmdA3u/3JcnYKYKFtwfpzALW63UNBgMrTIVZIzBKshahnlXhOR4fH83hYFkajYZGo5FdC014FEXqdDqaTqcmuUDHm8mcN6VMpVLqdrvabDZWdM650XOzoSF6aHauZ4xpn0orXwAJKUYQBMbgsgkl2ubBYGCF6t4+kOkAmDBokkymQ1CiyBTtOTb18PBgcgNkN5PJxPYa+eKLLzSfz/XmzRv99re/1Ww2M3/abDa2iSNz52tOADjuKZU6t/S9ubkxyUmxWNSrV6/0008/qd/vKwxDDQYDiwd030L/jv9ja6VSycAM1jmfz5sEhYTwdDrpxx9/1Ol00meffXbRPhjNNvP+448/GgMFAI7HY2WzWdXrdfMhpD9BENjmlQRfErblcmnMMskH9SKTycRkR4XCeSNLLw3CB0n2YGdh57EVkg2eCTskIUIC4X2NJIXYB+MXBOe9gohvPBvj6pl/GEDuFWaRGErsZH6S4/2OBCMTjEwwMsHIBCN//Rj5Thz48ssv/+KH3nX8/ve//7JWq1283TFoBDQOBuH5zcGWIFvwmnRYVZgXjM/rjRk4Jt8X2SJxoJUtE8eSJFKMXC5nnZdgVQimBAY0qXEca7lc6pNPPjHn420bHTW63sViYWzk6XSybkiZzNMGf61WS3Ec6/Hx0QL7crm0vU54dpbO0dTv93trHwvzAYjDVO52O00mk4s52O/3tm8Kb+20rQVEWTpFSlEoFLRardRoNEwDLD11cPLzjb4fpgLJAGPk9ekEMsYQFtDLaqgnYLkaxomuQs1m08aNpXOA73Q6FxzD0OVyuQv2iudqtVo2jrCLBG/si3tEvgBAEBQIQv1+X4+Pj8pkzl2F2JemWq2aPdNSNZPJaDKZ6HQ6GWM6m81MVx+G4cVyP8GSDj3dbteW77lf/s2B3CUIzoXeh8NBg8HA9hjZbDaWvNTrdWsLC5sH20tyR2BFEjEej/XixQtjjI7Hc0HsYDCwoI7d5vN5q4GQZP7x8uVLq8ugwDyOY/V6PZO1FAoF/fjjjyoWz5uM0gUMoALwYKqQqMCoI1UJw3P3pGq1qlQqZUB6dXVlKwtIGkgkqZUgcYJNg7X3+5CQlCDTaTQaF+wYYwJY40vYEbEUm2KlYDqd2katvk03TDgxjVbFJAE8N2NIks9zSE8b83o5GuwfTCs2NZ/P77788sv/9T448TEfCUYmGJlgZIKRCUZ+HBg5n8/1D//wD//z57Dgg218LP357stoHTFsJBEswyGJQO8LOOA4GDjsChPhnYTl5NVqZXpKlvBZ5stms3Y+v2zLWzjAxLIlwZwAuN1uzVAajYbJJv70pz8pk8noiy++0MuXL7VcLvXw8KDZbGYGuNvt9PDwYBrsTqdj7JCXBwBc+Xzeroc0gaDL8juFojBqYRhap6M4jm2PEkAXbT7LrBgIHWHQSFerVdN1E3wqlYqur68VhqG63a4tb3NPSE2kc4ADoL38BaYUdpExhq3xxamSLJGo1WpWDAuDyXE8Hq0Lz3a71WAw0GazMTkLCQSdpBiLSqWizWaj+/t7W0pHduEZxCB4agvMM0hPbImXUXQ6HZPQ0MkIpguwhNVdLpeaTCYG3pIsqI/HgMI1tgAAIABJREFUY2sZC1udTqetKJ0kxd8vQaBYPO9vARiRvJCAlMtl08d/+umnBs7dblep1LlINwgCq7Ug2CO9wZ+QlBCUGF9kBoAMQI+PptNpVSoVNRoNk+mcTicDW/bxqVarxkah/SeAP+/cxD4vhcJ5PyCu12g0TFve7/dVqVRMh10qlSzZ8XPG+ZCD0J0rDEPztW63a1r0VCplCR0AvN1u1W63ra4Exh1bxH4BGdhoEhbmyhdtw3AydsRPzkXcIPhzT9g1MQFdPIADI++T9SiKbP8hAA+7g831MpLk+OuOBCMTjEwwMsHIBCM/Dox81/FB2sKz7MYAEdRxeiaIQOZ/z81yLlg49l6QnvrfE1gx0Gw2awOHcxQKBet8xFsuINRqtXR9fW2DR4Bg6ZLJh53hfvzSNUaNwfzpT3/SaDSyjjfoY9GioonlrRvWBTDlur6dLWMKc+CDKNpnxg9mhvN0Oh2l02nd39+bvMTLDfhaLBaazWZ2nTAMDUT8vVE8SxEzb/gAk2dbYSZJMAA2HJDPAIAUxXa7XQsSyA9gVAnksH0EPNhKbIcC3Pl8bswjLU7RX9frdWMzl8ulSUgKhYJGo5EymfOGlcy9X4aXnvaWYQxLpZJWq5VteFkqlbTdbk2uwP3BvrDkHQSBXr9+rel0aqxTq9WyBAB7o0MTMoxyuaxaraZcLmfM083NjQGUZ0pZwl8ul2b/tGoOgsCe0xeWw2hKZ1aNa1MwejweDdjxQ/xnMploNptpPB5f1JRgI8xfoVAwVpKkA/ter9d2TpKaOI51dXVlEhp8jODMeMFCUTNB4rVcLjUej5XL5QycSBSQAsRxbEzvYDAwlhcdPDbLvBKcK5WK2VGxWFSz2dRoNDK7ZZx8wwFYaX5HckXsIuHxhb5IS/BzAAJpF0XN7L2DbyKxYW5rtZoBMB3MYPiC4Gm/GcAcVp2VABjF5Hi/I8HIBCMTjEwwMsHIjwMjfSx6fnyQFS7+z0W5MMbob1iSDQZL9xRU8ntYVLSjXnaB1jKVOhfP5vN5czAkCNKTZAOmoFQq2aZwsIUsb3vAo6iQA42mfzOG6Xrx4oWxX+wfwlL74XBQrVYz58CIaCu7XC6tFecnn3xyIePAURkfEh0/nhgsY+W1u+Px2AwROQMBEL06zouEg3mYTCbGDMGy+mJQWJznS8acc7/fW9cfD/owWovFQuv12rTYs9lMw+HQAoO3H/6PHtwnIbCGsFLowwGb+XxuTLAHOFrTopHmGsfjea8U6YnlhJlhHGGTkfGwzL1erzUajUynPZ/PbZwJ/KfTyTokwZ4ej0criJZkrJ1nvOfzubX59fIYgJL9WJDV0PkLHXM6nbZ9UggeBFYSB4IxrB16aP+MjNV2u9V0OtV4PNbhcFC73Ta2uF6v6/7+Xul0Wu122zoRLZdLY7eQsURRZAwViRfjE8ex6vW64jg2ffbx+LT5K3GGgwQMH+P/dOuiuLler6vZbJq0ZLvdGkgNBgNjBdFs88yAIHNQqVSsEBeby2TOXY/S6bQVQnv/9Pu2YF/EHACOxAj2HjDFJokH+An2GMexBoOB7aPCtSlef97tC0AB5H3MJfkBSABp/J0YnRx//ZFgZIKRCUYmGJlg5MeBke86PsgKF4yAZwIYDByRz+KULJfC6DCgGC/tYGGVeNP1Mgj/b95W/QPPZjPVajVjmGazmX744YeLnxG0uB+AxMsyCJDL5dKW5wGiRqNh2me01RTtUviJw2ezWQ0GAyuKjOPYNg9MpVJWkMnyKQ5EUMaoYTJZOocVzWTOhYSMIbpqmDuAkWX0MAzt7wh2Nzc3Frjp8pTJPHW+gpVAt8y88EUwwuhns5lOp3MxJEYK+Nze3mq9Xuvx8dE218NpoiiyIAlQwXpIsuvR2YlrorvHNuiohV30ej1Vq1V7ds5F0GPn88PhYJ+t1WoG4ow/gTKKIjWbTVUqFZNvSOfgy/iwxL3dbtVsNtVoNC4SJ0APXTRBHOaGZXwC1W6307/9279pt9sZs+UZZoIpoMPzl0oltdvti+eez+d6+fKljsej7fvBnjjIKPBj/LBararZbFrxsZcv9Ho9k3oghUKHT/BleZ/AB8Ax1tgpMQNQQUsO+wfbizymWCxapyOkUQTs1WplyQLtsBeLhV6/fm1xa7vdGotKRyTkLmi8+Xkqde4O1mg0NB6PLbADZj6Z8omXv0+emwJ5/AoJBCD1nH3H5rEfYgbjxfewnPgN40QiDhNIkupBmjEE+Lj2XwKU5PjzI8HIBCMTjEwwMsHIjwMj30VKfpAXLpaOWdrniyVyQIM3RQYJUGGwvA7S61rR4RLsCQw8OOwe+lkGLp1O20DG8Vn/7ltQ+sJJWEK0qSyL8nPe6pFLSNJ4PDbWi24+dDfBEPk/z99sNq070Hw+V7vdVhzHtszNG7zXoMJ0UkjJGztMAn+DxrhUKl0s3aNh5m95Q2eOpHOXn+FwaEGVc/slexwOFpNlZ+/EkqzAFIMmMWAMUqnzBoej0cgYCcAJoGP5lqVm2MtUKqXxeHyhV5ZkbAtOjxNNJhMr8mYJHmZTkn1fKpU0GAw0HA4t8BM0CKTSuV3rfD63OgGvJ4b9bDab+uyzzy7GCykL9QfYK7IbHBv2lo5PjBdF6TzrYrHQV199dcFWxXFshc0weNQwlMtl3d7eWiCB5VwsFhbs0IITcEhGSAZ8AoIPkzwSnIvFoiUMXoedz+dNUoI/eZuHqWKu9vu9yTnwU2wR0IQZJngzxzB8sMTIZThHHJ/bCff7fWWz58J12GSeS5LZBYkdXbKIQ/g20izAEwDg77Fx/k8yiI9jZ3Ec20aqzJ2X8D2XdVEUzPyQjMLOsaKBfXlWnDln/LAfknu/cgBb+JfkEsnx80eCkQlGJhiZYGSCkQlGvvOFKwiC/zsIgt67PiM9abZZ2oTp4O0PcPEyCRgaDPG5vMFLJOI4NsYN7SQSBzooMTgUccK0sLxMMGQJEcPyUhmCGcbJvVO0BzME0BHI2UfCB2uCRTabNYP2S5CLxULz+dxawqKDhmHwB5Max7Hm87lp4NGs+jd4ijdhGAnUODRMFDpwAjBFyZKsEFrSBSDjTO12W/V6/WIjPcAR8KfwkMCA06D1nk6niqLIOuHA3MCkwWowd0g6YBOjKLrYIwL7gd3CUVmiZr8U5AwESqQwMHDYI1ICpBQe7HK5nBqNhsrlsn2OLkrFYlHD4dDY2lQqpeFwaMFJkur1+gWzikwijmNNJhPl83ljdLGpbDardrttxa0UOcPwMu9IYxjLMAzV6/V0Op0sSKFZJvi9efPGfIgvAhnPjz/iE7TyLZfLZnueZSdgwwh6wKEQHqYW6QYyI3w8jmPbB8XbF4XSgBJxAMBjXGD0Yd2QMBEssRM6PrEvjvcrYhAst4uN1loZ3TuJLvFEkhXd+8SVuWAco+hpM1zGDuD0yYIv4GV8JJkv73a7i/oX5g3m9Pm/kREBxNybj+v4lY/ZyfF0JBiZYGSCkQlGJhiZYORfWt2S/sILVxzH/18cxw/vPIOeujB50PCgABuw3+/tDRm2Agc/Ho/WD599HPxbpp+k0+l0AQL+LZQ3W4rlCJzs/cASPgPOxDIhDBgOVywWbZCRTaRSKZv8yWRi3Z0wyO12e1GY6JlMWEXpiTH7/vvvDdhwJJzFs3ncB+fzAJLL5WwPBwI1IA27SIEyhk3g94wHQOmTAApZAR/06jg7RYkEnyAIrKAUh6AYlXMQyAAR3xKV5/ZARuBFz346nazlKVpv6akugU0gCXDMCVKVKIrM+Rlj2EgYDJIf2BiCFfeKU3qZA8GOQtH5fK5SqaTdbqfHx0djVZDSSLJAiF2hlZ/P55LOIMQ+OfhBLpdToVAwtgf2R5Ix0J1OxzoJ/fjjjwZyPCuMItKObPbc8hYGmgJxfNcHe4IksgCkKt1uV9Vq9SLwebuAIQeEuRfa7JJAlMtlux4BFjbOy4MIhF6uQA0C+nTGFc06No68CdYb5heARp7BPcCycW5YWHzBJ7JcD5v0wOAZML9CAOvO2PqElO8ZC8/CcQ5AyNv1ZrMxn4eR4x75LAkDsZN5Ye6JP8nq1p8fCUYmGJlgZIKRCUYmGPnXYOQHkRTiZDwEN8X3GBzB2y93crNMGMumDAZBhIcm2PO3nnFjUPwk8XMCDiwaTEscx8YGAkphGNo9cj/c02g0MmNhAuhyxHPDkAF46IMx0EajYcvk3333nd0butTNZmM7jZfLZTUaDTUaDbsPHJVla9rmLhYLHQ7nTQ8JeDBUBH1JBggE7UajYSDIsjDjRrE284dUIY7PS7sedChqJlDTtQfJAEvzgBhadpwChqrValn7WkCfxAPHrdfrpnuGyUPfzv2jLaYdLhtmsnM4dktRahAEurq6kiRjfAA05m+xWJgchGsAtATcWq2m0WikxWKhOI5t40Xmmb1sPBB4MIXtJDiQHPnNT5GHsMcHshgCpiTriuTb+7KvCmxPOp22+adQmHEEiAHX0+lkUhq/+3qpVLoo6idRRMufyWRMFoEfU0cBGOPPjIEHYu6BuQBUuRbPhh3RycgnsnRAG4/HNoaADUzm6XQyZtEX3PukFjY1nU7bmMGSM05ev059AQEamwOwJNnYHY/Hi31LYM0k2XMR24gXjBljge16xhVQA8z5HHHudDrZPcJQepkEzB4xLTne70gwMsHIBCMTjEww8uPAyHetcqX/w9+8xwHjgdMRSGFveDjPSvEGjiGdTuduSjBpfhO7KIouWlz6ZUXOyUDgUH55D6YCY1itVlqv1xc7uxOYYRVYKgUk0LTjpHQ1obiTyZtMJhcFx7whc49cB/bG65Ln87mazaa14ywWi2q329Z5JpVK6fHx8cLg1+u17c7+6aef6nA4mDZdko0lhoFzEJiZH4Aljs+yBlrHUjAKYOOgg8HAAI7lfIyU1pxonXFadNIAEYWkMKQwlDCyfnz4HWwl9kBgORwOJteBYWXu2M9jOp0akBEED4eDtXGF2UGTTIcoEhYYPJyLYEwggX3JZrO6ublRoXDe1+L169dmkyQf6NDRGBNYqAlgaZ+fE3wJzoABAYz52+12xoTBVMP0cX1snkALcw1DRHchwJTlfB+kkAkx95z7ue8gnWFjQ1qy4gckWLDhyCfoYFWr1Yw1wm58bQGrBMhpsBEvd+A6PimiUxS+7SVNsKveV7LZ88aNMH2r1Uq53HnvluFwaIXrPviT+B0OB7sfxokEhOfCtwCRzWZjoE7y6hNYQJC2wcRXNPvIlbgm+7Qwn7DjPgF/vkLgE3NsnntOjvc7EoxMMDLByAQjE4z89WPku1a5PtjGxxj28x28CQoMXj7/tO+HlygwsAwQBo+2FebDDwJvzZJs+ZPBg93yS5Qs0QNagBqB3TuKf/MncGEY9/f3enx8NGOklz8TMBwOVSgUTO/Mc3Ev6KrZSZ0uNcViUdPp1DZ+hPUiABI4Adh0Om07wqdSKdNhPz4+qlqtajKZaDgcGmOIZjmTyRhLAwNVLBat8JHzU9jJRpVsnAlrx2d4LsYjm80a+OEIzActZPkZ80JQgHkjwDwvrOZeCXYEv/l8bkwrDotTAz75fN4Kk0lksCmfqPi9IB4fH40tpLMWBeMEaB/EGIdCoWC7yO/3e9VqNVuyxsYBL57LL4uTmOFLvhPSeDyWJHsmzo12frVaab/fq9FoGBvD/iT4CfY5nU61Xq81mUwURZHJKZCXUPxNkG2329put8bMMlc8JzUIsFMwXfgAMgx+zv/DMLT2ufweP43js2SnUCiY7/hz4pfMO4ksNgariGwgnU5rtVpdbLaJvImkh5UGwIFicubveDxaa2nikS/kZX8f2j+T5PA8MGeeua5WqybrIknmHhgn4hVMHufGfj0r7VcdvOwFX/TMO7Hcr5IwFowz50+O9zsSjEwwMsHIBCMTjPw4MPJdxwfp8evlDyz7Mshob2GsuGnP0HiDk2TaTgaChyQIMIAYEA7H0h8sAAMFoyLJgjF/T2Djs0gEcEwMlWcKw1CNRsMKPuncwrP3+30rsM3n8xdjwbUJ5MgaaBVK29Q4Pu+dsVwudXd3p+VyqVqtZhPrl2nr9bpms5na7ba9uXc6HWNbqtWqer2erq6u1Gq1VCwWjZ0AzDFCmIMoioxFAlCRD7C0ig48iiKVSiXbxdwv66Jfhtktl8tarVY2bul02kATsObzLNUXi0ULwtgYDApL0WysuN/vNZlMLhISmKUwDK3gE50zzCLLzgRzGGWCB8EaBgNb8gwHthwE5w0T+/2+MW+/+93vVKvV1O12dTgcNJ1OLQDwXOzDgYQA+4dx6ff7SqWedjYfj8d2H8+LPWnDDLuE9KhUKl3se8O8eIZVOjPSsE88m2dRkY7AsBOUsR/06vgs+2rgq1yHoAX7TyKBXXL+m5sbpVIp20ke2wS0ua6XogAGPsBio8w3NsIzkEyQ2JxO571JYNe5n/F4bMmzByG+kF7hj5wPf/HzyvwT/JEVYX8wzd72sFNiFp/x7CAMKXOIHz9f1eCLmEmC7Ve0AJJETvifPxKMTDAywcgEIxOM/Lgx8oPUcGEMvrMLA8QyKDdHMMJgGHRJF5pKr6EkmMEG+TdL3r7ZEM4bEcuo0hNAecYP3SYFrwRCzpHL5awoE4YC/SnLruwQH0WRHh4eVK1WDQiOx6fOSHEc2+8wdpYzYcVwUFrp1mo1bbdb3d3dab8/b+hHpx+WfiWZA7PnAUzTy5cvbed4H6BzuZwVZ6PfHo/HFjCm06n9niViD8Ze71+tVtXtdrXdnjf7Y24rlYoBBSwOTsX+LIB8JpOxvT2YT9gP5oGxAiCZv/1+r8FgcME6sIEh94CjMgZs+NjpdIxN3O/3WiwWlgQQHKvVqslNfFE38xWGoer1uv0N0odaraZer6f1eq1Wq2WMFMwKOn4c+XQ6GZuIFIbkw18/DM+7oZNcEADY5JK/bzQaKpVKajab6vf7xhRVq1U7F22aYX5fvHhhTCqbKgLMsPLYng++0llCJMlqLNgsMZ0+1wgUCgW1Wq0LW4Kd58AGsJkwDI2JZCPFVqt1EawJyvgAAd8nqTDz0jmo0pWNFQBiEgDiE1aY/clkYgEeu2y1WiZ74VrIX3zXJYJ6Op02ORgSLb4AfIAYHwNwYBT5PPGVJI84DMB44OD8PD8AyeHtkL8nxvJvmLtkhev9jwQjE4xMMDLByAQjE4z8xTVcXorgNaYwb2jG/SDwpslys2dtcDQ6nCyXS9udHIbPM4MYMIbvtbIEfSQQp9PJghJBCwAjUBGkcRze8ikmjePYHDwMzzuP42zD4VDdblf1el3L5dKciCJPlvWRIVSrVTOSIDi30Gw2m3YvGGwURQZ4SCgKhYLpx6+urjQcDu2tnyVkNps8HA4WJAFQv2Eif8N8cR7flnWxWKherxsr6KUMgA5slyQbv+vrawsYq9XKNgM8Hs/drSgShmXYbDamdWcecSp0+uh00WHTVpU9LnAgAj8SERIJlrr7/b4VUfOFpIb9Q2A3YWiYGxybMeFeWIqP49j2k3l8fFQ+nzdmjufmeqfTyYIQzB1jBDtFUAeEqC+YTqfmaxyeaWPzSZI9L9fAH2EN8S8/5gQQ/DKVSpmun45pSCs8K4gGv1wuXySIsK9IQthvh4TTd01iDNLptDGqHmiw5efSAthWAO1wOGg0Gmmz2Wg8Hl8wigArCQWSER9AiQflclnT6fSioDwIAv3444+SZAkFtsxYc27GmJ8zT77QFyDi2QA24tzpdDIG0ssZYDAZDxhWz8pyPs/UARiSLuYB9hhtPayjX2VJjr/uSDAywcgEIxOMTDDy48DIdx2/GD1x2MPhYPtv4ExIJWD1WE7c7/fG+BHIMHDe2LPZrBXdbrfn3a398p43UCQHAJqXYzC4GCCT4XWdvP3DGhHoB4OBJpOJ2u22MUhxHKvdbtteEOx70Gq1lMmcN41EQyqd95OAlYFF4G28Xq9b4ehud94RHRDCKJFgAGKffPKJisWirq6udH9/r3a7bfsUeK1uGIbmyBRBExRub2+VzWbV7/eNDaRN6nQ6VT6ft2JpnDqOY11dXdncMfckAux+jjHiMMgq+DyAxblhMn1yAGhjMzgTrFY6fe4a9Pr1awN15oZuQCQkyBPYbI8ldgIGwZ+aCcCUILDdbvX999/bNWDngiCwugTkJ4DOeDw2ycp8PtfDw4M+/fRTY4cptJVk7JIkNZtNW4oncWDsYZGoeQC4W62WdfhivH1gJyADwNQgkNC8ePFCp9PJGFuuS9BG4w3DBVPJPNBGGDY7is7FtpvNRo1Gw56BMdvtdsa+AngUXTOXsGrYYqfTURzHGo1GtrdJqVQyEMHeSK68NIGagl6vZ9KaOI4NGL0Gng0YfVtcwCgMQ63Xaw0GA7XbbQNxmNrlcnmhJU+lUnY/nU7H5DnsAYMEA/8i2SZ+EbyxUYr/vXYekMDXiC/4If/+ufPxO66NnyFF4TrYKd/7pCU5/rojwcgEIxOMTDAywciPAyP/j65w+cDujZ8L0+2I5Ur+z/dxHNsD+OVeBpo3eQbCa6j5OQ7N7wjwaL7ZGR3tdy6XsxavgBAGwyDClPAmW6vVVCqVtFqtNJ/PzclyuZxms5kOh4MtQ/L2zaSPRqOLpf1c7rwp4HA41Gw203a7VbPZ1Hq9tnOhpeUNm/aftVrNghUAy33DYCA7SKVSxhJls1k1Gg0dDge9fv1aw+HwQkKBrhemlPmBCaAjDkwRDIIkA+p8Pm/LzzClSCdSqZRJEmD6OD/nSafTJglgGdyzFzg43aVw5kajoUKhYOODTIPagdlsptFodNE+FUal3++b7IYiahxzuVxe7MmBfGK/31vwoIXq9fW1RqORgiAwe4/j82ak7O1RLpfVbrc1GAzsWQAKkhyWvUnKarWaBWmSpGq1qlQqZQDgC8RZ2gesOScMJgxaKpUyGctms7Hl/fF4bPUX7EnDdbEvH5QI5nSf4jnoTLZcLtVut20eac3c7XbN9vDfQqFgG6QiaYBNa7fbCsNQzWbTWC6ecbfbmW/7IHo4HIy9lGRBXnraMwgmDhkRch4Y52azaZtT7vd7XV9f682bN5b80QIZVhObx+8k6fb21mQ8jJGPc8QNpA/FYlGTyeSiboKATkxk/IlPzA8xFMkIfsi5OR/XY275e9hYEnEvXcPWkuP9jgQjE4xMMDLByAQjPw6MfNfxQfQhsDcs6aPVZDK5Kf8ggASTyySw3M1EeA03bBS/44FhujAsBgXnYakaNhCDR+7Q6/XsdyxfHg4H5fN5e+sfDAbGvK3Xa9XrdQvcDH69XtdgMFCv19PhcLCN+dB4V6tVY03y+bzu7+91OBzU7/d1PJ47N+VyOX3++eemt+cZTqenQmW65rDx3Xw+NxYgCALrklOtVhWGoebzuQ6HwwXT2Gw2rbsQe5jA8tDOE5Bip3TkK6VSyQASZpClceYXnT8yk91up9FopOvra3v+6XSqSqVywSBQFErXK9gq5B2FQkG9Xs/2ygBQCCAsD1P4ikMC5MVi0difwWCgMAzV7/fNThuNhoEnIOa7YTHmSGFyuZzu7u7UbDaN2Xv58uUFK/l3f/d32u/3xgwTCNjokqV0QBz2EC03zwn47nY7PTw8GBPU7/fNpyRdaK3T6fRF56fBYHARICVZZypJtveND06ASKfT0XQ61WAwULVaNZaLwA+oMfaz2UwvX75UpVIxuQ5F74vFwgrBc7mcbdRJQlGtVm0PFp4ZbT0Ja7Va1c3Njd68eaNisahPP/1Uq9XqQj6BFGm9XqvT6VhsoM6BRGo6ndpcPz4+aj6f69WrV4qic7vt6+trA0PYsZubG9tYFPaSVtWAMcH6dDrvz0LtQLFYNJsjFpJgMRf4BYABICN/4W/RzcPcw/Tyb5I9it9JIpBL8TKADz2vG+LfJCfJ8f5HgpEJRiYYmWBkgpG/fox81wrXB9GHeNaDgkL009wkb54YPYBDgK5UKvZWGwTnLjYEA9gLgqWXQ3Ct4/F40RYTZgAtL8wfDsCy8Gq1Mj024IDsAfBj2XU6nerrr7+24k6CjST98MMPtrHgcDi0VrbS0y7oktRut5XNZq0LE+AHILGkjkZ6sVjodDqp3+9bUeXV1ZUKhYJ1IioWi3a9TCZjhceedQEsNpuNms2mrq+v9erVKwvuzFWlUjHWpdFoqNfrGTARuKUnps3rpVkefv36tXUZgjEhwP2cwcNg9Xo91et1M1rPWsHChGGoyWSixWKhKIqs4NnbgHS5gV0YhrYzeTabtXqB29tb/cu//IvCMFS32zWHxvn4O+51u93q4eFBh8N540yCY71e1+3tre0Pg7QAx8fWCW506EIWAHONhAbwmM/nWi6XFqgA29lsZnZD/QIyEMC20+mYRht2qFgsqtfrmc3Trpg9awg2SEk4F5207u7uTEIBs5nNZnV1dWVBEGAhSchkzpsz4re5XM72tnl8fNRkMtFkMrEaA1j2h4cHjUYj5XI5/c3f/I0Feuo9kPb84Q9/0GAwMIkPRcOffPKJMZl0LCMZC8NQg8FAj4+PkmQSJeQQknR9fa0gCPS///f/1jfffKPJZGKscRiG+uyzz4zNv76+Vj6ftz1XpLPE69WrV3r58uWFT0dRpNFoZAnhbrez71nZQLpFDQFsqV8Zwa98soDUg2SJYnjsj3jw/LzUREiyVQySfHybuIn0KTne70gwMsHIBCMTjEww8tePke86PghdudvttN1ubfd4dJgUP8Kyeb0nb9XoPpvNpjqdjskKCDgs96K/5u9gWjFSpAuSVCwWzUlgk+I4Ngaw2Wza0iEggr6aSWPgYJ/82y+Tvlwu1el0tNvttFwutVwuL5acCRq8FY/HY9vAkG5KBHqWiGGrcrmcLa1WKhXbiZ0xY5Jhwnq9npbLpfL5vI2zlwYQ4AAZNMytVkuHw0E//PCDMUk4yXBP0ys9AAAgAElEQVQ4VD5/3nkcPTBHp9PRer3WdDq18eH+cSq0wblcTs1mU9Pp1DoFUQS53Z7b9hJkMXhYE3T1aPp5Jpiy0+lkLBhByndKgtWVZA5BgP7d736nh4cHffPNN/r7v//7iwJzOlIh8eAZ0L7DWt3f3xtzNRqNdDwe7X7i+LyfzGg0MmaW3/P3XhsMiMKeLhYLYznRKBMEYUczmYzJbJrNpiqVijF9vtA1nU4bQ814khAxttgWRel8DuaIoMS9w+zW63VLCJfLpcrlsm18CqPl61CQO6FxB8S3260xrJVKRe12W3Ec23yRhAKOr1+/tvsNw1DfffedjsejPv/8c7s/6hPQuMM+4++SNJ1OLcnNZDJ2/dPppF6vp/v7e2MfqeuYzWZm2zc3N7aHC3uxYIuw9STDBHHAg2QFYCEBIt4R5Dn8RrR8Br/gnr2+vNPpXDQhQDePn/ESBSvntezcN3UZ+FNyvP+RYGSCkQlGJhiZYOSvHyPfdXyQF64wPO+74Q2VzjoMEEEgnU4bY0OQp1Cv0WjYEr5/WN64Wb4HnHg4zs+mhlF03k+BwE9QIwCNRqOLvSsOh4MVaTJoLDNWKhUrGpzP56pWq6YhZyn3eDyaoxJMHx4ebB+NbDZrjsHzrFYr1et1HY9H1et1ff/99+ZoFHsSvNDM0gmFfSP2+71tZjifz3U6nTs2AU6wWujTCaqwEASR/X6vXq9nenWek+d79eqVUqmU2u22sVAERvYs4b7YNDOTydgyOTun4zSSTKcehqHG47ExMjCryBHK5bJms5m1D2WZl3miVoCAyzK+L54kCclms3Z96hn43Y8//qhOp6OHhwcLfARcwFLSn3V48rUI4/HY5ubly5f66quvjEWm4BgGGz1zFEW2oeJ2u7UNG2EZ8QGeA/sgYGUyGU2nU+tchZ/5pXZsBfkJjFoYPm1ISHKGHIc5poUssiNsQpLZDvIhahl8ggeQ0akMxpfPslzvZUSwXLBsAMg333xjzNxgMLBzBkGgyWRiY/fq1SudTifryFUoFDSbzdRoNPTpp5/q4eFBqVTK9OUEYgCO8YXV7HQ6xvAh28Bu0M6jj6eoGOkDYH04HKzQPZM57z0ynU51d3dnbDsMN53PsJMoii4SQRJsEm6SRcY0DEOrsSAeI39iLn2tAbUmxGWSOZ/EY/PJ8Z87EoxMMDLByAQjE4z8uDHyF0sKAQUuDFvAmy3BNAgC0xEz4OgjJdmbrte2E0RhS2AbkDrw5rrfn9s/Elhgg3AujNgPCNre0+l0Ia9gApE0oLGGQfMMTxRF+uMf/3geyLcaUpbeKbxFXsBydy6X02g0MnaKoBeGoXWPCcOzbh52b7fbmaQDAF4ulxYghsOhGeBisdBPP/1kQQiGaDgcWvcZlv95k5/P59rtdraXxf39vb3Vo7UPgkD/+q//qtFoZLrsm5sbTadTDYdDDQYD3dzc6OHhQUEQWCeh5XJpMgPqC0qlki2hw/7QladcLqvRaOjly5fq9XoGLq1Wy1ib9Xqtm5sbTSYTqzuI49iCIAzyfr/X3d2dLSmjTYZtgoGpVqv64x//aPaz2+0saBQKBWN8COowaVyHZfwwDE2njxSIAINfeNkJRd9xHKvb7RoQE9Qomn14eLDgCdPdarUkyWQ9yCx8APetaLE97jGTObc0JsmCySYwoenHZo7Ho9Uk4O/4AtIfzhuGoRWgM84wR4AH/4fN5tq+m9XxeFSlUjFfnE6nyuVy+v777/WHP/zB5Bdo8Un8SNSazab58HQ61VdffaVCoWAyk/3+vAlop9NRqVRSu91Wt9s1qQeM/HK51MPDg16/fq3RaGSJHXvYwBI/Pj5exDbkC/7ZkG4Q0F++fGkSikqlYvsKIW+hjoGkkFjnpVxIbrgPgA2W3jN8xDdfC0AM9jVa2BFMHvbhGfzk+OuOBCMTjEwwMsHIBCM/Dox8lwrkg7xwccMs4xHk+D1vrAwazsiN0uUFEJJkRaA8tA/wBE7e6mE5mDwM8vHxUavVyt66cRDeXGHs6OACULF0TQvfWq1moMl5pPPye7vd1maz0WKx0HQ61Wg0kiS9fPlS19fXph3PZrO6ubmx4EdhKQZxf3+vOI41mUz08PCg6XRqy/QwFKvVSre3tzb5/B8H8su3d3d3qtfr1oWoUChc1ABghAR1ghr64+l0qm63axp8jIlAOxqNrHAVQMZRl8ulZrOZMpnLjSdhgNB1M1/X19dqtVpm7LBldDlCekLgQZ+ey+X06tUrFYtF9ft9C1Kwf2i2PRPhzwuLSUvlr776Su12+0Iig24fVgVtL4WnyC+wD+mJVabwerfb6f7+/qKQk2J5EqfpdGqMG5IHZAT4B3PIWAEq2WzWbBjfgZmTzgA9Ho81n8+tHS1MEcvjMELM1/F4tGujQ6fgu1AoqFqtKp0+7/0xHA5N6gJLW6/XLeHLZDLGTMLscU3qL5DPdLtdq6HI5XJ68eKFyUJ6vZ5Op5OBAgwxwESh89dff23SJZKFIAhsDtPpc3e2MAz1u9/9zsab4M08EsQp4gVYoijSfD7XaDTS119/re+++84kLIANAd8XL+fzefMFVgqCIFC327XVD6QhxCV8j8ScOMk4ezYZkC8Wi+bnSK9Itr18iHoHEjzqeBhvL1djNeMvdWFKjj8/EoxMMDLByAQjE4z8ODDyXS9cqS+//PKvxY2fPX7/+99/WalUTL+Nc8IywcIRoDEwmIRsNmvsAG/stM7F8Vj+Qz+KxpNghdNhlLxlEpgYHAYbloQAg/Mz6JIsyKMX3+12ViyMTIDleQp+YR5xZDoTZbNZLRYLW95lgh4fHxUEgRqNhu7u7qxQFe08RgETxPPlcjmTawyHQ7tHDCqfz1snqPF4fFEk+1yuslgsVKvVlMvlNJlMLIC2Wi0NBgP99NNPevXqlR4fH419Ywl9MBioXC4b6OKoBOJOp2NzlUqlNJvNjDWB1SEgSDIgotNSLpezZX2KklkCRppBV55cLmeaY9gqnMOztnRy4vlhh3u9niaTiQ6Hg90j7BrL+pKMwaWYlvHk94VCQfV63ZaywzDUaDSy8SkUClbPgb2zJA4jBgtHJ59s9rxHxn6/t7nnfAAiCYsv6qUbELp5xo97pkYCeyD4kGB4mQZ7plBki2/ClvLsJI+LxcISE3ylVqtZh6tyuWzMGywfCdbxeDSbTKVS+umnn2yeb29vDUjpJBQEge19Isme9/PPP7cW00EQWBc04k4qldLLly+N5adwFzafcYHNy2azur6+NvCGvW00Gmq32/rkk080n8+13+8NKEhssSOKuWHzuZcoitRoNLRYLHR/f29AQcIL4ybJ2k8Tk2ClOXxnJeYIzTsJPKBO4kH85Yu/41wk2ofDQev1+u7LL7/8X78IOD6iI8HIBCMTjEwwkmdPMPLXjZHr9Vr/+I//+D9/Dgt+8QvXP/3TP33JGyRBzzMHDAJOjoPBvsGIIXvg5xg9xsmbJiwGE+A1lgRelnG73a72+70qlYr9LZNHtxjYOAyRpW/aw6IbZUmeTj8MOEWXs9lMq9VK7XZbjUbD9J5MxHq9Nt030gCC6W9+8xtrnct+HGxOCGiMx2O77nQ6tb00CP6wD8vlUsfjUS9fvtR0OrW3/NPpZOyLZ6MI/hgOe5nAyHz++ed272j6mdvNZmNO32g0LIhLsr0luD9kBWij9/u9tSCVZAkEtkMXIGwHx6AOoNPpqFqtarFYKAiCC2ZlvV5b0EUmgGSGc8LuAHblclmpVErff/+9fvOb39g8+V3esRFkJmjdpXPCw+7wXIdiXdq5BkFggOABFElPsVhUHJ87hdXrdVsaRw8dx7FarZbNMa2KwzA0jfPpdLLWvPgbxbHY+qeffqr7+3srQo3jWJ1OR9K5Qxj+TAJWLpdNBkB9APf2/7P3Zj1yZcfV9sqTJ+d5zhpYxbHVatlGw4AFX/kz/LtsoF/Y+hPvv3mvDBiwLclWD2yySdaY8zyP30X6idpJS4Tobt+I5wCE1GRl5Tl7x44VZ8WKCLT5OGCCKDoxsb7sK0yk9FCgzXnGVvg59o/ubi6w8d9u8Mj5931f7969Uyh0aP9MUf56vbbgDyaRIuRisXj0jLBYgBv2ms1mjxhvAtX3A0kyGPi7yWRiAfZ8Ptd8PreA1mU98Vewb/gdpCjvX9Tj8J3ufCECLz5HgCo9tNPFx3LhUwluWFsXZIIXro+7AowMMDLAyAAjA4z8NDBysVj8wReuH900g7dBQIHFhbnj7RU5gyR7a3elFHQugWnhoJPiI7242RzamqIBlmRvybAI8/ncBtOtVitzvL1eT6VSyZgw9MfoO0nhs/CuhpY3aNL0OE0YvidPnhxpO9HYJpNJcyCuZGE0GikSiejs7MzmV+RyOUshD4dDZbNZc0gUFfu+r6urK+12O11fXxuLBeMDS0VxZKVSUafTsXWDnQiFHgoO0ahGIhF71tlsptPTU+33B913uVw+YrDm87lqtZo5B9/3rVsTBwkW1w0EisWiyuWyOp3OEdDBhHAfSCfQi3ueZ4XbSGnQI7P3ODjfP0xmdz8LewyjJ8lAhxR0KBTSixcv9Nvf/la//OUvj9iNcrmsWOxh0Cf2CfOLfbqsHgwWTvb9QmOXSYH5wpY56BREr1YrG8oJsxmJRAzIZ7OZwuGwdbkqFotKJBK6vb2178R5Ij2JxWI28BSwZb2wKRyrq22GIURCg5NE+oDmnfMfiRzmjcznc2NM3boNGG7qg1w2HjmV5x2aDuDUKLhlf2Aa3eLk7777TrvdTufn52Z/2WxWL1++1Gw206NHj/TDDz9Y1iGbzdrZpR0uZ54gtNVqWbc5wIRAw5UhsJfIi1wpED4Klp5noSYCLfn7QTKAiQ3GYjFrgkBQ5fvHmnPOH8EvZ4z7w9/hr0Ohh2Jm9lJ6GNz6IblEcP3+K8DIACMDjAwwMsDITwMjP3T96BouvtyVKHCDSBn4O9cgcQ48KI6Y1DoHGYfPYXy/4whvqRg3bSfpSgMgxONx5XI5S1XTg58FZOPRlrJRACEOksPFYYcJWiwWptlGnsDBh8Vytftow3F4AOwPP/xgAwthcyTZ7I1cLqef//znViwqHeab0BUKpqfT6RjTQGqe9WZteU5Su7Bzg8FAl5eXymQyJiNiEB+djkqlkmmrcYywjPzezebQ3tVdY3fOAy2SXfbh98li2CuYB/YUp4cDoLAYRxqPx42t4QBvNhsLXHDgpJyr1aq18X379q3ZFLp/AiVXYsOhjkajGo1GGg6HFsTQ+tf3fXvu7XZrNgCTSSEz94zD3e0O3ZmYPL/b7axQHPvkTC0WC3U6Hb18+dJAdbfb2aBVHF00GrXZI/l8XhcXF3r+/LkFdsvl0rTynDFYXzftj2aZtL07A8Zd60QioXq9bjbmnm/AHPkQ9x2LxTQcDu1+OAvIR1h79gHttCRju+r1usrlsq6vr9VoNBSNRk061W63rY0zsh7YTUk2jBEQ3mwOhcndbtdsrdFomJSHs8Ma4CdYPzezwRoVi0WbQcTnlsulBWX4PM4/fo713e12ZlMACHbKerA/SJDcRgqAFffo3jvMnSSrO/C8Q5cqgDe4/vgrwMgAIwOMDDAywMhPAyM/9NL1ozNcHGo0mbzRszg4GzdtR0Gbq5HkM65+msXk97NJ/D4eFKfA4UELDRDhvBKJw8wKfi9FmejcATqXweOgwCziKGiXOZ/PValUzBmg3Z7NZiZDgGEqlUr2zBT9wcz1ej1NJhOdnp4aS8Ukcd74YXUwYHcAoAuGOHv+W5Ixc3RFgjWAfWBNaT2MHhjHNRqNlMvlVCqVbB8BEdrEptNp+3dYgs1mY0DBmg4GA0uPc5ii0aixnOwPxcM4FQAqn88frSF63WKxaOl7mBicHWDOPcA0knInMAFIp9Op1uu1ksmkdRAiCHDlMjhhimwBLoZ4jkajo/S89MD0xWIxrVYrtdttDQaDI6lFKBQyZkt60Aozx4P1ZH5GOp22Llek/QEU1tDdi+FwqHq9fuRkAX3Yyevra+12O/3lX/6l3YMrEfA8z6QDdO+iTexud2g7DSgSBLIPrBkOCpCpVqvyfd+6LeFMQ6GQrq+vVSwWTVrFGrAXpPoJkPL5vFKplNrttr0sMCizVCqp2+1aTYPnHWo1Tk5O7L9Ho5FJYjiLDFudz+dqNBqqVCp25mDKsAvWCrsJh8N2bzwvLCcdvNxaBT7rSiGwIaQc2DbZBeybM+OCD2vOHgMmrB/PwJp73oNunWcjgA2uP/4KMDLAyAAjA4wMMPLTwEi+//ddP/qFy/3i3W5nrAip5NVqZelTSZaWdnWrLJzL4GG4HFBYChaNzZB0pF2lEBM5BTpffha9OfdMAAE7wcJyT8gaVquH9p84PJw7bIP7u9gYV8fOYDnAkq4sjUZDvV7P0tPT6dQ07LzJw4QAppvNxpjFx48fWxp8MBgcpdxxqKSc3XvGEQPK7GWv17NORJPJROfn50eSCIwxHo+bE8RBhcNh62gEYGcyGSt2xDnjaPb7B210NBo9kohst1tjEJk6D/ux3R7mqcA0wa7hqFxAA1T5nRSFIuXhWiwOAybpJtVoNPTixQuNx2ONRiOFw2HTwHNIsTlsn6ADm5zNZioUCkokDvNSYKxgZ5gbsVwe2hpzb2jQcfD5fN6026FQSDc3N0omk7q/v1e5XDb7PT09NdZ5PB4bCFKknMvltFqt1Gw2j3Tw0oH5woEDKNKhjoL7JFXPmUwkEsZ2ISMiwIPRotaD9QJwCTYIHChYRgqD8wKcer2enj59qtvbWwOiXq+nSCRindCQjXBvSFi2261ubm5UrVZ1eXmp7XZrxfLlctnWkPujvTAF4rQyXq/Xur29ValUMiCEeSyVStb2lrPPenAOAXTWmEATaVUoFFK32zX74rOcT4JzziM/g5MnqCfwIoh37xO/4Po69h2/h/8loJZkZyu4Pu4KMDLAyAAjA4wMMDLAyJ9k8DFfgPN3WTBSrBw4bha9MIvGGyyGz0Hgd5FKh2XBAFlknAYsFt+H86Mgz9WQu0ydJCumc9k9tPLoeDm0w+FQ5XLZGCTesD3vQYMKkLopTRiAu7s71Wo1040/ffpUg8HAuhoBurAwaKNdmQaH0jXOcrmswWCgxWKhfr+vQqFgHWRgLrlnDAwjZ00ymYySyaRev35thz2VSsn3fTWbTTWbTT158sQACW0u6+l5nrEEFDifnJwcMWi5XM4KgzlMMBiuZpaLe+PAwwDTbpYUMSDhpvtdRmS9Xltb0Wg0as51Pp9LOhSpTiYT6woUjUatZSz3T1pakgEfUg0Ku3GgiUTCWrjCTmPH6IolHTEvsGBuhyGYYNZ2NpvpzZs3xlgjJXJT9G7Rd6FQsMGfnBMGEiLZ4buRHK1Wq6NiZOlQ6M1zz+dzlctlawlN0XqlUlE0GrVaDhhvAqVYLGZDQ10Hyx6yppxhnj2ZTB6dSX6Os4sP4XyEw2E1m00LBr7++muVSiU9evRIo9FIzWZTk8nEAjR36CK+iyCV4A1gms/nFtxR3zKfz234I7NNAEP8lytlkXTkEzebjbVSdiVnPBv7JOkouIaZxY8Cwqwt3+VKHVzgcJlosgvcI4G/C/zB9fFXgJEBRgYYGWBkgJGfNkb+JC9cFPTxALBPbucP3vwlHTlr9L44OTaRz7CAPISrSXc3hHQg6UgO9GQyOUq5wzxwiGG11uu1ZrOZOTS0nBRhAhLcJ0WfvEnjEDmUGOZ0OjXn7epqSbujfcWoYX8mk4k5fO7ZLYblTX82m2mz2ZhBwwx1Oh3T+CaTSeuU5BoDz8XBg1FMp9PqdDparVZWIAwrut/vdX19rUQiocvLS9PQ03aXWgWYRGQzy+XS2CA69ODYYJVwtC4z6rJXMBNuMWo4HDYgYP1crfFutzNAD4VC1s6Wz0ejh45UdCmC0YNxxpnB3roXBw7Wy5XkALQMsCwUChoOhwZ2zL7BKcOy4FD6/b7VNlBMy7BS7rvRaOjLL7+0AAVnPZlMTCYkHZwENRQ4jFwuZ+cTux2NRioWi+Y0uR8KeLF5iuhxtgSMdAPCOfu+b8XjdDGr1+ummXclSPv9Qe4EQ4nTJVhstVp2BrBdN4ibz+fGfKLz7na7BoqJRMICBwrjy+WyJpOJnTdY2VwuZ4NXy+WyJKlYLMrzPOt+NhgMjth5mFf8IfIk/A7BGs/kyhWwTXxQJpMxlnS3e2jHzjllT/FRBMywbrCx7D8Buft59965R4JXNzAkaMVv8Jng+rgrwMgAIwOMDDAywMg/fYz80PWTvHBxqBaLhd0MjoW3Sg4vaWUMA0fpMizz+dwehodwD9tqtbLfzc+4KVz3rds9wLAxgAq6WkmW6t9ut0fyAd6i0Qev12vrNBMOh8158GbsMmKxWMyMGdaQz6NRxUntdjvTuVOwjMGx6dwf7Xpx2q6OlIPkaqXb7bZms5mleJESoL33PM9a1nL1ej2dnJwYA4Sxp9NpPXnyxFq24jRdaQKMWjQaNQZmtVppMBjYjI53797p/Pxc2+3WDoMkCyhgsEgLsx+bzcOQOiQzu93OCn8B2mg0agWb8XhcqVTK6gawRTfFTADBXBeYuFevXqlarZozAdzc9D9peeQaOAc3SKX42/M8G7QpyTTMi8VC5XJZy+VS19fXVlvB74TNxQkgddhuD3UR/X7fNOKTyUTZbPYo1Y59A74U2NI1zWXYsEv33mBa6/W6fQ/rsN1uzQliQ2jAcf4wqrlcTpvNRo1Gw8CW+hTXsQPgMEYAFQEFQRJyHwY3MlQVeUgsFtPJyYl8/zDIMRKJaDqd2rwbHC6yHFiqZrOp1WqlcrlswMdaFotF2wN08hTDc5aQ5xBEs6YEkNguPiWdTltQzTrQwADb5KziXwjI3z9r7jmG7cY++SzBN74a23JZQZe9w5dgt8H1cVeAkQFGBhgZYGSAkX/6GPmh6yd54XpfJ+mmK9HT8vaOI+NN2T0w2+3W2lrC0o1GI9No46AxAPf383kcKgPi2GjeWgEkFsh9W+We4vG4OQ3+LBaHGRg4s/cX32UbYRAw+slkYoWHOJpcLmesUrPZ1LNnz+T7vqWYuT+X8cT4SNfSOQjQgQ3t9/vmFEgto3/e7XYqlUomTcFJAECNRkP1et0AhDbA4fBhmF82m7W/6/f7xmROJpOj9D6Hn7UKhUJqNpvGzsAcsi6z2UzFYtEYKDphEQDgkAADtMkEJhQJwyZJssOXSCSUSqV0fX1tUgnkBaxbPB435hWWLhqN2nT0QqGgUqlkLX1h0JDCdLtd+335fN5az242G+v0xWewMxhY1ow9q1Qqpud3tfeAXigUMlb77du3+vLLL03njI7clRRJB0DL5/MqFArqdDoGSjie7XarfD5vf88e8V20YnVZehwh7B1/T2ABAwkDPJlMdHt7a9203JQ9ZxjmFEZ2Op0qmUyqWCxqPp+r2+3aPBj2gWJfWDmXBT8/Pzet+WKxUK/XUy6X05s3bxSLxWx6PYHFcrlUv9/XbrezoESSAToXIIjEg4wDHcEAJvwAAQzZAVrw4pdYP0kmtcEvYq8E3QAT5wp/iX9jT5E5wZSzt57nmV9hvWFDJdnv4ueRkOA3g+vjrwAjA4wMMDLAyAAj//Qxknv8vTjwxwLGH7pwNLzx8f9x7KRNXS26q7Fmg1hU3jRdZ86DuW+9fAbjJgXN/4dR2u12tpE4JwDJlSAAPGwCn+HtnIMM+7Tb7UxyQdoSp86mcH84fooNKYalCwxMEaAD6wcL6TotHN5+v7cCWwBls9lYahZGSZK1UsVJ9/t9kySkUikbPPjmzRt5nqezszNjdRh45zoRjPnq6soYmkajYR2YYrGY8vm8dbDBUcEg1Wo1RaNRDQYDnZycmCYXm3EZK+wGpx8Ohw1sp9OpASPFpzhrbCcSiVhLWb6HA5ROp9Xv901fzoHO5XImX/niiy8M7GB6CSSQl6AfZh9gd1grJCSe5xkAA8hIaNgrmOlwOGxtjpl3EY8fhv65zPJqtVK32zXJDRIFnhPnBRgRDFGYjd0g6eGeKPKlpiKVShljiyPiDLx580Z/8Rd/oeFwqMlkomq1qnK5rPv7e4XDYRu+CSOHg43FYqbnJvCgkJ86Aeoy5vO5dYpaLpfKZDKaTqcaDAbKZrMWQFJIzPkajUZ69eqVHj9+fKSB59zDWsP+j0Yjuz9YTXxIu902ENtsNlb8izSMbk84bhhc/AXrjnOez+eq1+tqt9u2/u12W77v20DRzWajUqmk8XhsgRTzX7hn5EiuBMMNImD5ORNkO/Aj7ksU7CW+Gp/M+QNoguuPvwKMDDAywMgAIwOM/DQw8kPSwp+kS6Fr3LAK3Axvnzgi3hzRb/P2SYElrJ/78Gw2zobf56bQJZnDC4VCJg8gXcihcZ0CxY2wgbwdz2Yze1tG/8oBx/mGQiGNRiPtdgdN7XQ6VaVSMcPjDR3WkQPCpnAPklQoFLRcLo9S4sgl3G5WLjMJuME6YoxIQ2AWEomEpaljsUOnKEnqdDrGYGUyGe33e52cnNjh8bzjIYswY64UBCOMRg+FtbCeaMwxbECuUqnYerKng8HAZi3wfLAEODaXGZIOjpduRrvdTsViUZPJxLpPwe6EQiENBgPd3t6qVqvZWnqeZ1IM9pH9ns/nBnaAIGypy3ZxqOlSxfdFo1F1Oh3d3NyYpn4ymZiTQ6KSTqc1nU7t98xmM81mM2O5F4uFarWa2TdBFc4jm82q0WiY0wEcYOBgpGHMW62W2RhdoMLhsNVvFAoF06T3+/2jmgzkLy5AspYA3Gw2MzBvNBr6xS9+YQzeer0+YqtgpzjbBJuAX6PR0MnJiUKhkJ3dRCKhUqlk2n0CRpfl7vf76vf7Oj09NSnDcDjUixcvbJgrNottdrtdhcNhnZ2dSXpgfGGu8RMuk8W6Z7NZVSoVRSIR9Xo9dbtdNZtN5fN5A+Vv5W4AACAASURBVAv2QJINh4Rt2+12xq4RPLpF8Z7nqVar2bMtl0sLigjG8afuCxEZBWodOFsUjBNI4yNcRs5lBNkbvpd7D66PuwKMDDAywMgAIwOM/DQw8kMvXD/J4GM3BQz7gpNbr9fWjtUtiqNwlQd1U6oUt5JyhMUB7F0nFQ6HzVmhY6Ugz01jusP2SO0j7UgkEsbKsdC+f5hzEI/HdXl5aawCRsBiowePRCJqNpva7/e2+OFw2AoVi8WisYQ8x2q1Mh3yycmJORwYqvF4bIWqq9Wh4xAGzsFl/Slk9X1flUpFm82h0Hi3O+i8YWMAvvl8rqurK02nU5XLZWWzWZ2cnFhRI2CQz+cNbAqFggF6sVjUs2fPjIUpl8tH804A0nK5bKzLxcWFdrudda/KZDImjxiNRpJka+cCXbFYtJS923lnNpup2WxquVxa8TEHA/CZTqfWqlSSsXSsyXa7NWDv9XpqNBpH+t3RaGQpeN8/7jZFgTUMTj6fV71el+/7KhaLKhQKdgAZzsj9u8wZoEKHrWQyqVarZUGL53nKZrP2/aVSyViofD5vBbs4V5g25BgEVeVy2QItNNgwSQRG4/HY2DzP8zSbzRSPx4+KTmFO6VhWLpeNOUskEhoMBjbvhP2AhYMBoyCZ76LOo9Vq2VmnviSdTiuXy6lQKBizjX0Ui0V7xlKpZF2QpANrfXFxoUKhoH6/b4wf9kyW4Ne//rWurq7MzwyHQ+12OwsW3LqGzWZjgRl2PRwOjSE+OTmxOpl6va5isWiOGMc/m81M2gFLDTONHIkWwLFYTK1Wy4LyVCpl6z+fzzWfz41V49xMJhO12231+337uclkYuwqAQ3BBUHOYrEwG8O/Yn/4gz9Gpx5cx1eAkQFGBhgZYGSAkZ8GRrqZr/evn6SGC8aEB4IZ4It5U0T3itHDeLCAi8XCOrbwO2DmmBjO2ykL4AIKbA6OA8e83+/V6XTMWZNydnW/OJ/39cPT6VSPHj1SKBQyFqNYLBpTRgqWFLvL2LGRaHlJO6IHpwiQdqj5fF7RaNQKRnHOaPh5m8ZofN+3ieie56nT6ZhBuhIRDlEul1M+n7diYlrhzudzc3QUxMbjcdNXoxVnfabTqW5vb3VycmKyChiFYrGo6XRqDoi0LcHaer1WPp+3/eJZJ5OJksmkDc+DOUomk6YFhoEAxFkXWrlKsqDFdRj5fF6dTseYWxjB7Xar6+tr6wpUKBSMXWJfSMdLB8kOBdxMdPe8Qz1DMpm0AImgCNau3+8bU4VeHbDC0XFu9vv9fwuGYLdpn4v0hU5JFxcXxmgTCJDWv7u7kyRzuq1Wy34W+QJynrOzMw0GA2Mo0dXf3t5aip5gybXlTCZjEiHP83R6eipJBjzIJQjWCEr4bwIjgsLT01OFw2H9x3/8hx49emTnDBYcUCGo2Gw2KhaL5gcYoMk6z+dzsx+kBQQVhULBQJ/7JSvA8EwkDgxxLZfLNhNotzsMPmWgKLbKcFlscrfbWac6/mBrsJQEildXVyoUCha4ILNwGUQCBTpjuY4etlB6AHH8G+cNmdv77B0XjD3rRjCFbw2uj7sCjAwwMsDIACNZowAj/3Qx8kPXj85wwWThxNlwNoaiOsCBvwckksmkMpmMab2Xy6UdTIwedgEWiw5Ew+HQ0rHumyigJckMgIVxaxBgCXG8OD+KYXljhZ2Lx+PGlkwmE41GIxug5/u+qtWqpY9JIa9WK41GI2uDGo1GzUHCXkoHDShOcTKZ2FrCUrnFyxS4Ij2A6QDU5/PDTAuM7uzszNL+lUrFZkIgO5AOwHt/f2+AxJp3u111Oh1jfXz/ULTM2rMuOHPYqvF4bGwAzwSzGIlE7N+n06kKhYLK5bKxvAA6TCPMCDUEMHUU4DYaDUvXczAAPtL6sKQEOUgK0DJLsnVPp9Mm36GY0l1j2DDWF8fImrlgBUvIocf5JpNJLRYLjcdjK0am4JaU+fX1tX2egIciXLTfyCckmV0gKcDGW62WPM9TqVSy4nBsk8AHx7fZbMyJElRg88gcYKlKpZJSqZTi8bi1iE2lUhqPx3r58qWxt0hRarWazs7O7EwxR4WzkkwmVa1WFQ4ftOCwye12W5lMRicnJ8asYZeuLht7Pj8/VzgcNvvc7/fWSanX61nrYHxSMpk0AEMjj44exhJNuiuT4kw+f/7cdO4EyRTgw9wRBPG5ZDKpwWBgxey9Xk+dTsfACD+43W6VSqWsVoFAFNvHl2K76/XaHD/BDxIXwApdO8G/JMtMEJwTsHPvBOhBDdfHXwFGBhgZYGSAkQFGfhoY+SFJYfirr776eARxrl/96ldf8TaOjpQNwDjRaboMVCwWswnsbBZG7Gov+T0syGQyMUfB4lFA56YMYVDQekqyA49DRuPKwtJGV3qYLA0LxJs698Iz0tYSY2b+gltgTvtNmB3YCPc5R6ORksmkGQkFwjCb6NHp8ILRw56t12tzmBg7z0o9AD/raqw5IAQBnuep2+0aoDPQkMMOI0gRZDweNykGzhkJB8P4cDBo2F32ElClgxCyF/ewSg8DNylWBXB837dC41KppFwuZ2wLeupQKGRrix0Aoslk0mZrlEolSdLr16+Vz+fNASCjclsxs/ewPfye+/t7xeNxffvtt/r5z39uQBUKhcwOarWaHfThcGjMKOyS7x+KepljARuHXSAzCYVC1rGM74AFGw6HisfjFlw0Gg3rDkbw1263VavVNBwOlcvlzG45FxS604oZ5hypwHA4tPOHRt/9k8/nTW5BcSt1D+zlYDBQLBZTpVKRdGAZf/Ob35jUgMGV1WpVkoxdzOVyajabdq6Qa/B8/X5fjUbDtP7tdlu5XE53d3emzydAqlQqajQatobr9druezQaGdO/3+9NioReHLtrt9smSWG+D3aSTqcViUQMPMlCUMSN/cTjcc1mMw0GAxuW2Ww2LRjjXnzfN2kNPg8GED/lZg2o5eHnJVkQGolEjFWWHgrT+QwAwzn8r5qS+6+++ur//ijg+ISuACMDjAwwMsDIACM/DYwcj8f6h3/4h//z+7DgR79w/eM//uNXvA1zc24RLA8fDoftf9nI/X5vrWlhG3DMkuxN1O1ugzOA2SOV6EoLOJjICKLRw4A+5A84B9KMMBm8FaNXhuXCwd3f30uSarWaddAJhULG1vGsaGiRQOCEN5tDoSwaU34W5gQWYjQa6d27d8rlcqpWq8Y2VSoVm+8gPcxtYWI3bAF6anSwvJFT/IghwhJEo1EVi0VrC4rjJiWOE8U4KUrebrfWsaZUKhn7CXMKiwsLCWOG0dOVR5KxvIA7YIs8Ybvd2nBDnAX7gmQEgMJpcm02G2MAcUYwgaSMI5GIgQlOgsPHenHwJFnxcTKZNIdOMWqj0TCbhaU7OzuzLlFMtMfpr9drlUolFYtFSTJbBeTZz3q9bin5UqlkBaNugObqiJHxJBIJ3d3d6d/+7d9ULBaVTqeNWfV93/Ywl8sZa8M5Wy6X6vV6BqquLIWiYRwTUhUCDhhw3/fV7/d1dXVlHYqQ6+z3Dy2Hy+WygR5BWjabtbOEPCSfz+vs7OxoUCl1DbDgaP6z2ax++OEH9Xo9PX361M4JbYlh6viOm5sbbbdb5XI5C+KQYMD4UTTc7XZtfd++fat8Pn+0ZwSL+DwkXtwnrC3Fx+122+Q0sVhM9/f3VlTebDaPgknsm+5fbtADYGHD7r4hg+K+CI4Aevy429UJyQR2FbxwfdwVYGSAkQFGBhgZYOSngZHT6fR/74XrV7/61Vc4BBcweOPD8QMu/B2FZrvdTq1WS/l83nTVvOFTsMcDUmiHwycVyhs6xg3QYCC5XE6z2UySrPvParUyFoeFT6fT1hkHZ7harczgJFkqm1auMBE8E9/HBiBb4JDRohV5iKuxpgB0v9/bPAgmgxcKBdMnAy7uIEoKqjebjTE78fhhmCF6WDTdHBIkAXSEgnEAAGEQK5WKfQ4WLhQK2QwJ3/etEBsGCUAnwIBpzefzajaburu705//+Z+bvh8wxo5gXUmBs9cEBuFwWI8ePdLXX3+tXq+nZ8+e2VrAbsA4YgsuwMCmFItFLZdL3dzcqFQq2XBImLNisWiyhnK5fAT80oHRg1mSZFrni4sLvXz50hw9bNZ+fyiYplAZhni9Xms6ndp+I0mpVqt6/PixSRw879A2t1armRPZ7XbWSYnfDYvH9+52O93f31tR9Xw+tz2WZOxZo9EwaQdyCWQfgDf2BcuFXdFhazgcKpvNWp3Hdrs19jkej6vRaJh+fDab2bmbz+e6ublRKpXSv//7v5t0iYGV7Ek4HLZ6iLdv31oxfCwWM2DA7+RyOX3//fd68eKFfN9XLpfTZ599ZkEBgF6r1f4bU+dKXq6uriyQe78QlzXEB+VyObNhAJ7vo30vznu/36vX65nT5xyPRiNjNm9vbw2QCLYJGvBBBImuxj6ZTFpADICxbpw17Nj9Ge6LM+iC0n/ZX/DC9RFXgJEBRgYYGWBkgJGfBkaOx2P9/d///f/eC1ehUNB+vz9y1EgPeHg04hgiDpY3/1gsZsPUMCLpQb8Os1YoFI7SjxwcnCeH3dUPu2lCDh+Mned5ll4HKNwhaK6UAo05LBbsFOwRAImDpJiW1KXv+zo/P5d0YH82m43K5bJisZg6nY4VyXIvrCXPMBgMbA1JY6IXp1AUowMsYHXYD9LhkkzyMB6P5fu+SUgYdNfr9TSZTJRIJEyOApMEcBcKBYVCoSPWlALfSORQsJnNZlUoFKzol9as7DFOCqYABpj9wOABPX5uuVxaJyAKK3HGTCJ32SiYmGw2q3g8rn6/b3KA0WikRqOhWq1mmvXNZmPtc2EECYAogO90OjZwcLFYKJFI6NWrV1bkend3Z3IIBiPm83mlUimTinBOYrGY6fZhql69eqXhcKjz83P1ej0tl0tjjSKRiO7u7lQoFHR3d2fp9n6/bzIkCodhNgEN5Dmc1ZOTE5sxw/oidaEw2PM8k5FMp1ML8EajkarVqhKJhDqdjgVo3W7X2CykF7SKxXFybwBbLBbTzc2NqtWqXr58qVwupydPnlgRO2DCfrZaLevQRJCEH0J2QhA3HA715s0bVavVoxbDyDlms5lyuZwk2dmhKJ7OVzha9osAkKLxVCplbBiF4Jw95tZEo4cW0ewN0pXt9lA/0ul0dH9/r5///Of63e9+p9vbWxWLRZP7SLL9kR4KwGHk8IcUtBPUEbC5LDo+DL/F3rPGLhMvHQK9Xq8XvHB9xBVgZICRAUYGGBlg5KeBkR964fpJ5nBJOkrlkY4mFUfHFlgcDiSMAxvE/yftNxwOdXJyYo6TxYtEItaycb/fm86d9CgOlO+CzWETYOdgdTgYkUjkKOXsOjnaivLfOK1ut6vZbGYSCNLjdFgB7ChIHI/HSqfTxhDRdQpQcosQpUNBKiyWC2IYM92R3JkUFHoCfhgYb/8wqKlUysCNg4ocYTAYaDQa2V7gfN09LRaLikQidg+AM0WvAFw6nVaj0bDhg8+fP1cymdSrV6/s90ejUUvhs958z2KxMP0xgEohNnUNDKrcbrc2UBKJCIwIen5Jxr4AFNQFML8CKQzs0maz0c3NjZ4+fWp6bxxru922/z8ajaz4Gjsol8vGqkSjUUtZS7L7gd0BvCg6ns1muru7Uy6X03K51LNnz/T69Wu1Wi1dXl6aRh4pEEGLW6CLTIaaA+4jkUio2WxqtVoZkHBOU6mUtel15Sv39/dKp9MmY/B93zqUNZtNC3RCoZDJeFqtltn7+4XUzObpdrtW6xCNRtXtdvX8+fOjPQbUsMFMJqNHjx5pPp/r9vZW9XrdQJSgAf3/er22M3x9fa0XL17YmafjVTweV7PZtCJoAgjfPxSyZzIZ3d/fG1tJ693T01NNJhPl83lrv0wb5+XyMICS+6KZAB2tkFsQ+IxGI00mE6sbGAwGKhaLVszLPXPeWG8CEH7G930r4I/FYur1epIeJFYE58hakAUhcYG5w99I+oPdmoLrw1eAkQFGBhgZYGSAkZ8GRn7o+knawrPg7gOg/XVT567WGUfFTZOqY1HctrGkgGOxmKbTqaUF4/G4FYayUBRx0nISIOEN3E2p0z1GeiiQ420bZ42Wk3tCniE9MCAYOJIEd6jgbDazDkOdTsfAZLVaWUqZgsztdmuFqovFwgYw0ikK8Oj1epbeBGDD4cOAvnK5rNFopFgsZh2iWCPSvIB2q9Wygw9TBVPm+77q9bp1SII1xAhJPaPXhVVaLA7TyykQLRaLGo1GBlTffvut/vZv/1aS1O12jckDgGKxmDGA0sHJTiYTm/1xdnZmKXdmMTx58sTYoOFwqE6nc5Q+3+12yufzkqR2u23zHWBwY7GY6vW6ut2uJKlUKtkaF4tF64xDVx7P84xdBPhZ481mY07o+vramMXHjx9boNXv9zWZTKxNMrbmpsx/+OEHc2I4WQ7+3/zN36jdbqvT6ahSqajX65lkZTAYmCzj3bt3KpVKqlarSqfTevfunSqVip4+fapsNmtzN5bLpe7v71Wr1dTv942VzeVyVreBw6GLE88IAwQzjJ0DjgR4tFuez+dWM7Hb7XR2dqblcmlOf7/f288S9HQ6HcVisaPCVEkWjE4mEysOPj8/t/bZ6/XaZrlIB+A+OTlRrVYzdpROSfwuOiDd3t7q4uLCQJNzCJuN36jVasZSwsATEPIMaNIBNQInMhBkL2ALkUatVitVq1XV63X7ftg2WEq34JyAG3YR6RLZAwbFIongItNA1oEXBGyRIBqWMLg+/gowMsDIACMDjAww8k8fIz90/SQZLg7Dcrm0VDxvgZLsEGEc6K9hHNzNRmPreZ45bfetEWByHwx2y33rJL3KIccRkqIl9c6hwPkiTeA5eHsOh8M6PT21lORwONTp6akxI8vl0vT1+/3eOgXh4EmPcv+e55kzXq1WpvWu1+u6u7szVg8WFOkELJ/v+wa0/yXzUbvdNq307e2trXe73ba3eJ4Pw6YzDql7N7XNYEIOEQXJdL1Zr9fqdrvGFMKWUmjp+76ur681n891fn6ubDar29tbdbtdpdNpdTodC0Jg3kjxf//999rvD206y+WyaawxfLTQyE6oDSBAATyoTWAfuf9kMqnHjx+r1Wrpm2++Ub1eN6YKTTdMIYcPZ0hKHK0vaXcc3nK5tNkf3333nWn4YW3evHkj3/ePNOQEO7Bm4fBh8CFAdnl5qcViYWubTCZtTQaDgZ2FcrlsILjf7/X27VsrvmWwKMwisgSYc2obOAsw8tg1RdUEVQCcK6Vx2WlaNcOkYq+0ysWG7u/vLaXPWULCUCwWrZ7hr/7qr6zuoFgsqtFoGEPprg/toCnspvCWs3t/f28sN1p8NwsQi8VMEnRzc2M+xfd9nZ2dqdfr2RBIBmWiKcdp032Kgnj2N5vNWjbDlVAADnzvbDYz+dJqdWh37BbyI+dyi4Pdz9NtTZIFONQc4F9hqJHIcJ8wjdSUAODYbHB93BVgZICRAUYGGBlg5KeBkR9SgfzoGq5/+qd/+qpcLtvbHywoNwsjBpPGTWPQsGPSgQV8H3RgcyRZcXA4HDZtMxuJpplFIx0ejUatowlvvS7rx3cj34BtCIVCVoDIMEi00XzGbRHKWzMpSMBkuz0MUqtUKvI8z4xEkrEh9/f31qVHOshE9vv9UUEt8gwXWNGw872s92Kx0H6/N+YLphRQYsBfPP4wdBGHEY1GbRglxcSwnbzBN5tNYzb7/b6tJeuJ3MAdbIdcAsABKE9PTw34ttutsbtIL0qlklqtlkkkkC6s14dhg5KsYxbdpGAfEomEzUIBACjsdRlQ7gt2uFarmcYZ5hbtOEwt9RY4f0mmx18sFmo0Gjo5ObEDCygTiDx69EjhcNhsRDoELsvl0uQuyEZg4er1urVmpQie4AV5SjKZtLoPZEXYT7vdNkkTBcgEFkxsJ+Ag4EFShKZ5PB6bvAk2iZoPwDSTyej6+lqxWEzZbFbr9dq04wQZ/L52u21nH602ZwR5QyqVsufDP8BEYePMQWH/V6uVOVrmz0gyGcp0OrXPUHTO8/MsFIPDJANE6OhpWkA2wQVtAt73td+JRMKCQRw5ALrdblUsFg2caTWNY8ce3NoLZE7cH3uPJI1AHhDBZ7rsv8v88e98Hy8L/P//koMENVwfcQUYGWBkgJEBRgYY+Wlg5HA4/INdCn8SSSFObLs9tHQkVcq/4cRwFjhKVy7BggEky+XyaAMoQiQNyP8iS8BpbTYb07jCFiQSCU0mE9ORs0Hvp7rdzjO73c4mf8/nh8nZODHPO3TIoYgxHo+rWCzq7u5Og8HAWDgYNle7jqaXzw6HQzvUksxxshYAH3NDNpvDLADS+KyXJFtPiiDRpabTaQNW9oRUOGuH0WBc9Xpdi8XCGBzuiaGP0uFt/uLiwn4Pf7darSyFXCqVrFj65ubG7psWu6xHq9WyAuRcLmcpY5gF2g0TUKTTaWOTcPQwjBwwGBaAmDaeq9VK6/Xa0vgU1H722WcaDAbW8YfAgLoC2A0YZ+wnnU6r1WopFDp0pUIb3Wg0TJaA7AAmkMDALZBfrw8zN9D0t1otC9C+/fZbPX78WBcXF1oul/r+++9t0GEkErGOZOiJN5uNBQSk5DebjX7729/qs88+UyKRUKFQMOaKMwfDyH9jt9ScwAoi83CBFs37bHaYbE/Bcb1eNyYdZng6nVogSaEqjtvzPHU6HZ2cnOj169dKp9PGmgGY0WhUhUJBrVbL2r5SIF6pVMzZApyj0UjZbNYKyPP5vIrFolqtltl/LpfTeDzWeDw2xr5arapYLKrX6x1JtaLRw5DKwWCgQqEg3/f15s0blctlbbeHFs+sGRr7drtt4ELQBIjil3zfN4kGLOlyubSAiSCBoBpfy7mA3QaksAlsDIaWoJTPE4BJOpJghcOHltAAFP8WXB93BRgZYGSAkQFGBhj5p4+R7s+9f/3oFy7eHHnj44Dxx71h3pRd/aQkSx0ieYAl4uFhgzgUvPkCALwdwyCiD+UAo5V3WUR0mxgGU94lWatcmMj5fK5ut2uHB0OAEaH7zn6/tw1g7gmHiDWgaBBGjHsolUoKhUIaDofWEYruL/wMz4BGGAeEc+r3+xqPx6rX62aA+/1e9XrdWDZaYPK2joOEjaAjECwAunVYG1LErCFa/uFwqGazaYECaWdYokgkoqdPnyoaPXQlggVjaGS73TYZg+cdCq0LhYLG47E6nY4VLv/ud79TNBrVF198YUC2Wh2mo282G3W7XV1cXGiz2eju7k7n5+eqVCq2R7TrJXDAqcNqJZNJdbtdFYtFA5HRaGQpeOkAmOiHWef5fK7RaKRaraZ2u61UKmVSGQqRsVWXnXTtslKpaDqd6vb21rpEzWYzvXjxwtYIPflgMNDz58+PbB+tNg6F87DfH+bWMKOEZ0DSwFmKxWJWqOp5nhXO03WL551OpxbgwaZhfzjOy8tLqy+B/SEQgTGcz+cGKDDmgBu66HQ6bcXeML5IXjzPs6CE4KLdbtv9LhYLC8L4TqQgr1+/VjabVTqd1v39vUlwWA/08wCC5x26el1cXGg2m6nVatnZLpfLmkwmKpVKdm7YE3xCKpWy1raADYw0djcajawWZbvdmmQF5nmxWNiZctlySQZSyF5gIQmOyUx4nmcBKZkPnhWf7Nol8gmkPYGk8OOvACMDjAwwMsDIACM/DYzE3/++6yd54QI0eEPkv0kXwoR53kN3JdKdFJvh+Pg8b4nuApL+JIXPwWCBYG14u3VByZVIcNjQfLusAW/8m81GuVzOWlkyUBAAIdUKMwRQAEAAJxKN92c68Nxow6PRqE1gl2Qpc+6RjY1EImYUGKIr1ZjP57q7u7M2wfxbtVq19Dbryvqg4V2vD11qKNqE0XDf3Fl79pd9QtNKETOHH108Tpt947kvLy/l+74VUw4GgyO7ubq60nw+N73vcDjU5eWlaYxxSo1GQ5eXl+YsKLKGqeN3AprVavXI9nCspVLJUuAEJewX6XCXRQUISPOfn5+bQ0arjCPi+XHMHHIOKQXG5XJZb9680eXlpTnrXC6nRqNh6/3FF19YXQAsD2woYMXzlstlzeeHYZL5fN4cOp+jUB5nxHliPgv2QdchfhaQQGrS7XaNjavX6xqPx3YfBBzYNA6RInr2Amc2n8+tfW40GlWv11M8HrfhoTB2pVLJOl4RnNAFDfCj2LrX65n0YrPZ6Pr6WpVKxdYCmQaSC7cIG+YfuU4+nzeb4UUkEomYtAL5D/eFJCOdThsIcA6Gw6EFyAAq/+bKf1x/BgjAEnLOQ6GQBa4E3i5z7zJ4fMbNiLzv1wkWCTjczExw/XFXgJEBRkoBRgYYGWDkp4CRnIffd/0kTTNYOG6eG2SBVqvDQEQOcigUMscuyViAzWZjhWz5fN7efklxu4sBqAA+6/Vhujjfi1PG6JFfAEgU5bFoTBCHoaITFE4IdoUNZwBfoVAwQ2dzYKwoAqVlKwMEOaz8LIwBxgmLxRrhZPidrDWH2X3DTiQSxhJIh4LYdrutk5MTk37APOHkWUvkJTghtxMVDkSS3TuddHDYtDhF30xKG0fr+76KxaIVIVPwGg6HdX5+bo6NFrowrM+fP1c0GtXbt2+VTCZ1dnam8XhsbVoTiYQeP35s+v5Wq6XpdGpOtN1u6/T01OZr7Pd7K0idTqf2PIDZer02bTf1DACB7/umd2dNYK03m42azaYxSuy3y1Qzz8U9wIA1NQYnJye2NsVi0aQ5Nzc3KpfLSqVSNhfF1ZID3gQI6MEXi4Xu7+81n8/N+SBvOD09NQkIewTLRWDGOSaYAMQl2b1LMl01QIsdh0IhTSYTkyBMJpMjZgmNO0AFcwTLtdvtlM1mraMYunAAmlbDo9HI/AQSDAKAq6sra7l7enqqXC6n4XCoWCxmRdHdbteeGTkMYDefz81+KHSGCTCPugAAIABJREFUvet0Otb1arfb6fb2VrVazUASpjEajR4FwgQU4XBYnU7HBoISJAN6aMxZe8AYP0AQiB24UgqAGEkVGnbX9jgTLrDwx2X18IfB9XFXgJEBRgYYGWCkFGDkp4CR/6svXDgNt/UiN+em83BIGMrRTfi+FTfCkiSTSXt7Zh4DB4XDj6NHo82h3+/3xrSx4CyMJHsjT6VSpqlnDgASAUnWOYbnwgkXi0UzBgowYdm4p/1+r+l0al2GKAgmBcra7HY7+7tisWiD3lKplMbjsd2b7/vGFJBG5nlh+nDqiUTCugVxEHu9ns7OzqzNK44QgI/FYhoOh8a4wDROp1NjrChqxYF0u107eBhysVhULBZTo9HQer3Ws2fPjFGAzeDwRqNRkzcw7+Kzzz7Tzc2Naf5Ho5EuLi6s5TESisePH1v3pcePH6tQKNj9ojvPZDLGxtIhy2WIpYPD45k5aK7GmzQ4NuWyJ9vt1iQCBDDj8dhaBC+XS/V6PWNxsU3OBw4G6Q3MDYXNDEmkqxI1GqVSybTjnndoXey2H6blrBuQkO6GWQ6HDx2VKBAnfU+wh8Nj/d6XPtEliX3Hac/nc5uHEYvFjmbVoG0noGNN+A78CRIYtNEwetJDW+vpdGrsXDgcttbCsJNIDJbLQyvmWCym8/Nzk0/haPFbBBCw7tyf62vo4uU2KeB3EIghZ0HigfzH930LtlzWHD+BtARGXpIBmuvMqclw5WgAErYFmLCW2DxZCQANm2CNeX7OB3aAXbhMYnD98VeAkQFGBhgZYGSAkZ8GRn7o+tEvXC5z9z5Y8Obpvg26gM1Gug/vbuJisThK8XNoeXgWMpPJ2JT2xWJhTAoHH4YKB0ZXF1pusrmSjEWEhej1eqbD5j5hG6WHFCZpZt9/aLtLup2N8TxPw+HQ7h2NMPciyQqmKeLl/mEjAbf9fm8OCNYUdoDDgsyiVCqp2+1qPp8bIwb4IGeYzWbGhsAGAK5MbHdZG2QVyFlIp+92OyskjsUOcxPookR69+Tk5Gj6OOwEYMFhzWQyFlR4nmdDK/v9vgaDgc7Pz63Y9u3bt8ZS5PN5nZ6eGoC59kRQs9/vrciT9YfRlGTPzWHE5gAaFxSY4UALZYpECSQYvOimoNlvDjySIoDi66+/PmpTGwqFrM1ruVy250AaEgqFzJECaEhJKpWKseIU6VIb0W63jTHCzmF00Liv12ubTeNKkgCB/X5vbBFs8nK5NIlOq9U66hjlyjlch7fZbFQqlcyxcw/YlyuFYq8I/nDW2WxWi8VC/X5f+XxeiUTCgi9kDuPx2BhqmGXa3XqeZ4GLq99nH2Eb9/uD/IOgDpZ7u91ap6zdbmcSC4KGfr9vRcvD4dAK58kauHIXntG1WwAWv0OdBwAA07rb7Uy+wTPyefTorg8H3LgPAg/W2/UnwfVxV4CRAUYGGBlgZICRnwZGup97//rD1V0fcbk6Xm6I/34/5cq/ccij0ai1miRNCPPR7/fNEaMhxfmhrXV10aTvYQd4E6VNJI4Ap0AxIildjJo3ZYpHkT/0+31raRoOh43xwyjZEN6Ot9utGRsbUiqVjLnD6fL9ODEABPDi9/GGjUF5nnc0GwTQYTAk98abOsxQOBy2egAcIQ4daQqSAobcrVYrY8s4GDwH9wrDWavV9Pnnn+v09FTh8KEAO5FI2NBGDuN0OjXG0vM81et1JRIJnZycWFebcDhs90hAwNDFxWJhDBvMWiaT0Xq9toGRPBcOiNQ3nwXgAFpJJrtJJpPWtYcDyPcRCLEf/D5+nlkXblocrXo4HD5iaDzvUExLa1mYsel0ahPoE4mEisWiMXrIf5DTDAYDa/MMINM6udls2mdgrfnuaDSqfr9vwUa/37eADGZqsVgcdTlyWXUCDECdPRoOh3Yv/PdsNpPneSZ5SCQSymazR+ea84lfmU6n6na7Nlw0n89rNBqp2+0ag0rmBbaSrmbSocXt6empsXbYKhkDptZTZA3Q8NyAM9p6gIHAGECElUNug9/gLLlyMiQwnU7HAly6P41GI0UiEds7/CnrStDrsoCw0/y8+xnuDXCABXX9Nv4FbT0+BOaXM4wfCK6PvwKMDDAywMgAIwOM/NPHyA+pQH6SFy4WCycoHQ9i5GcwXpg8NJ6uZhJH5RZk4qT2+4OekqI+Vz6BEyR1yAK4b8YADXKI5XJpWmjSrywYP8cGwUKMx2NNp1M7UPxhhoEka43J/Y5GI718+VKtVstkDOHwQacLK9bv9614k/uNRqNHzodn4ZDNZjPrAJXJZIxtYMNJ4fK7WEOMDAeFPhktLUwe94eWfzweaz6f20FKJpPK5XJWUAoLgN6Ye4KJWC6XNsQukUgYwxGPx83JA4CkuwFkApJwOGztSne7nU1cLxQK1uo3k8koFArZvQJ8HO7FYmH2CRhz79vt1uaSoPlFT03bZBhU1sUtwOQZm82motGoSX3QfnueZ/9Oe1zuLxQKmW3V63WVy2Xtdju1221jYejy0263FQqFzMHm83nTLSM7IlX/3XffHRXdJpNJA+Plcqlms6nRaKROp2NrRCtZnCqf55xtNhvrTLZcHjqKxeNxXV1dmSb69evX2mw2ZmOuzTFhHnYLpgrHDThiO+xXJBJRr9fT7e2tXr9+bUEawUQoFDJAGA6HymazJs/hD3ZBUJFOp81Wsd3lcqnhcGhOHJ9CF69CoWBtsAEJ5Ez8fuwXP0ORPaDN2aYWB2nJZnPoeubKRfBt8Xjcmhtgh259AyDCZzabh2GhblEwBccuG8mz8G/8DoCRnw+uj78CjAwwMsDIACMDjPzTx8gPXT9JDZerL8XwcObcPA/g6thxjhhKOp22DkXuGyuGBgND+1F3tsZgMDDHwedgGNBhbrdbjcdjO9jSQ5coDiutIVlc2u3yO0ldbrfbI50r3wVTBMDwXTgoDg4OBGdLGhvmBOdOqtNlQKUHXfVut7NC4UgkYtruarWq0Wh0xHBEIhEVCgUNBgNL9fL9MJMcgHK5bAeqUChIkhWJol2GdaV4mkOI5AInCwsUjR66TK1Wh7a5PC9sI46EtXdlFDiHSCRiM0DYf5cJ3W4PrUIpBgYksEMumDueZbVamVQAhwlDAvsBywQb69YiJBIJs5tIJKJut6vb21vlcjlzMhxudPKcE/Tn2D6sCXUNDNnE6dDNazAYqFwuWxcml6Wdz+eqVqsWpMBIxuNxC8ROT09NNsD+0sGK4ZecHUk2CBNZRDQatfkx1ASMRiPrZNRsNtVsNvX5558rk8mo0+lYMS2DDTkryWTSAjjS9PP53DTT4fBBw391daVms2lSp9FopFDoUHCMpCAej5tUBZ8EcG+3Wxv2SWvfTCaj8XgsSRYUwXAiW5rP5+ZjsDPkUdvt1p4rnU6r3W4rGo0qn8+bg8enwYBy/3RlYr+4kEIAIgTd7zN03DfrB0tIzQr+mXPK7yOoxB8TsAJM2KHnecYqY+vB9XFXgJEBRgYYGWBkgJEBRn4wwxUKhf6/UChU+NDPSLKDhybbZUq4aTSTpJX/6/cfpRI3m41tEMwdBgEj5joBFp0FIR2PfhPd6nA4NO02AIGOdjabqd/vmwOEjWHaNwWSFJfCFqzXazMipAuAEpuDw4vFYtYJiftCpypJNzc3kmSp6FAoZLNARqORsQwwn8g4+Px+f+gec319rcFgoMlkouvra3vT52fv7u7sgPBv+/1hlkkqlTLgrFarymazRzpsSTaDgfV/n60dDofqdDrqdDoGdL7v297RFQm2tFQq6fb21gKQ7fYw0b7b7Wo2m5keGEYOg4f5RB7BPgPy7BmzKHCW8/nc/rBHoVBIt7e3ms/n5rSx5fF4bCwZ6/748WPT67Pfq9XqSBZTKpWsJoCZD8heEomEKpWKac+x7Vwup8vLS+XzeZPPXF5e6vLyUmdnZzZPheAqm80ag4x+G6cKEOCwCoWCBWA4Hc5VpVIxuQFAtNlsjPnjjO73e2UyGbM95mzATgNyT548kecdCsMLhYLevXun0WikTCZjP9vv93V3d2edwiKRiBW3+75/xMQyUBIAu7u7s/3A38Awsa+uLIcOTgRP7Bt2AEMF6+kGCtQ34BNwrLD69/f3xvAh7Xr06JExtL1ez0BHOoAJdQn4r3w+r/V6rUwmo2q1au17+/2+Sbdc2QTfT50J7PZmszFfAVDTXAHfCovP2kqyug18GGCHj47FYtrv9+bHXLlFcAUYGWBkgJEBRgYYGWDkA0Z+6Ppghmu/3/+/D35aMokD+s9QKGSHmXQeRhkKPczJQOLA27MkK/4lRcjbPYwJD8pbKr8TI3TfLilupMsMOnnuGWcEi0KaHzYCZ+OyjbAOkuxNnsOJjhsDRoax2RwKKovFom5vb1UoFGzQXLVaNSaAz9INZ7FYGDMDgOXzeTMy1hXGBt00wwCRmKDLx1l2Oh1VKhXF43HrqLRcLlUoFAxcAU5J1qaUA4i0glSuy9J5nmdzVmAah8OhSSDc4YC73aHgt9/vq9lsKpvNmhODvYLlBFTy+bxNIr++vlYoFNKzZ89ULpetBmG3e+hK42ryWWPAngOyWq10fn5ua4iTxSZcB7NarUyTzfNKMntFx04nJMCaVPV+vze5w/X1tWazmQE48zPS6bQKhYLu7u7UbreVy+W0Xq/V6/UMPLE3dNis7d3dnT0fjCbsDRIKajiQ88AkIWUgKJpMJup2u8pmsxacoYdmH9FT53I5FYtFtdttG/jJGSqVSmo0Grq4uFA2m1UsFrM2vb/73e/UarX0s5/9zLqEUavgskW73c4Kfbkffoa1JpDwfd+YagrdLy4u9Nd//dd6+/atGo2GyYYkGfvHME+cJvKpSqView+ooSWXDrOHkC+4tuuyerCBgBAF+QS4FGO7kg/OF7IJfBXBlFtrslqtjD1kPfCP7vBXfDPgxDpiB/hI/m42m1kTBu4J/xdchyvAyAAjA4wMMDLAyAAjXfXAH7p+kqEqgAZOj3QeBoXTc5ktt6CSh5lMJkqn0+bseIuEteJtmgPighMA44IBb6zVatXaSsLM8FYqyQ60q7ONRqNm2BRnZjIZTSYTxeNxa3ULc0LBMJvH99HFp9Vq6eLiQp536CSElGE2m+nx48f2vcwr8TzPGBuKVZfLpbFn7mbD9pTLZWMVeZuHvVqv16ZTJXCC+cSoKQBmP2CY7u7uTMqCfh6njlZ3s9lYOp2D3u/3rQ0pz/D06VPrijObzVStVq0LTSaTMacFgJDexlawtS+++MJYRBxUJpPR7e2t6Z8pQGW2BE7Bdf4ARL1eV6VSUbvdNvvbbA5zUABimKJY7KFNMOl0vgswxdkim3n06JFev36tWq1mYJxKpZRMJpVKpQyQYXUplEbXjCP2PE/VatU+70pa0CLP53O9efNGz549s8/MZjONx2MVi0XrVLTf7y24wDkC8tQ6uBpp9hYJ0tOnTzWbzVQoFMyBp9NpNZtNs5Mvv/xS3W7XJEGwxev1WvV6Xd1u1wKz6+trZbNZC2Y4V24BLg4V9gvn6mqs9/u9+YFkMqlXr17ZsE2aPhCgkg2QZMMge72erQN+i4wEzDFriX7fZfoAMX7/YrGwIJRAkkCm3+9ruVyqXq9boDYcDrVYLI5smLoTsiAM0GQNWH83mCaAABBcKRn2RsDm7rMbkAGMAE/QNON/dgUYGWBkgJEBRgYY+aePkR+SFP4kbeFxzLzduw7flUPA3LBA3DgOnv78tHddr9dWGMpFURvf7bKAHJzd7qG1J61wcaoYYqFQOJJk8HZKSpr092azsYNGW1RYkXw+r1js0GIVXTkaYPSgTOcmJbnf71UsFtVqtewel8ulTUlvt9vmJOhOEw6HjTEg5cq6MQuEgwALM58fJsvXarWjglOkEalUSoPBQN1u19qjxmIxtdttW4/JZKJqtarxeKxqtap2u63t9tDSExkKTCzPls1mrZiY1q6JREKpVEpff/21aaWZQr5aHVrL3t/f6/nz51YAKsnWAH26JCtARh8dDoetDiAWi+n09FSlUukoOKDDEg4qHo/bnnQ6Hb19+9b0/Bw4V6eOTfA8OBVsHHuFRcKGCaIo8qWIWZLK5bIVwiONSafTWi6X+uGHH4zVqlar5jAJUnK5nDk8ggjsgXtxwZk0uwuY9/f3ury81H6/183NjUqlkj0z9svvSaVSJg0JhQ5a9Hw+b0zkycmJ7u7uTKKwWq3UaDTUarVUKBSUy+XUbDaNGeb8E5AVCgV99913Gg6HWq1W6nQ6ltJnzg5DGN0MC/KZdDpten1qVdCzr1YrXV1dqdvt6vLyUt9++60FQYPBQKVSyQq9WQN07rBg2WxWyWTS6k/u7++1XC71/PlzrdeH7mwMrWRv8Rms2WazsVoTpEaJRELj8djqc9hXwInABxvDp1DsDRDgY9y9G4/HJoHq9/sGGAzHJDjn9yAz4t4kmS93QQwQCq4//gowMsDIACMDjAww8tPASNcXv3/9ZHO4SImzyZLMKXAzyCncIlUYFYyedCIMEOlGZg/wpr7b7cz5u+yCJHtLh8mAiWNB9vvDBHiKeZFWkEZkyF8+n9fbt2+N7eJNHpDjvkOhkAaDgT27y3TwfOhn3717Z2/9pLQBGuZHoO2fTCZH8gqMDwcM47nZbGzQJOtaLBbV7XbtAMRiMQPicPgw0K/dbtsBp1AYaQWgt16vzUkxn6HVaikSiVjh52x2GKiHBAPGEUdH56kvv/zS9h+2gBamvu+r2+2qWCxqMpkok8mYRGYymdjz4hilg7yGAzOZTKyjEywSmutkMqlEImGAi0SgXC4rk8no4uLCpDIAFG1eC4WCpeqxGxy325EKhoTOTbBcZ2dntiawOxTa9no9s3nP80y7TS3DP//zP+vv/u7v9Pnnn+vVq1dHUo7pdKparSZJVmxLATET2bFVaj4k6e3bt0qlUppMJlqtVspkMqZdp4gam3KlHG6QAiDe399b8SvSm2g0qlqtpt/+9reKRqNmu7CrrCH1CWdnZ+ZHarWa6alh79PptBKJhKrVqv7lX/5F6XRaFxcX+uabb6xgleGs3Odms1EqlbIzncvlrOA9k8mYjnu5XOr6+tr0++l02ljn2WxmLCvAR8aB4IirXq/r9vZWkozlWy6XxoIzcNOtr3HZNor7i8WiyXa4d+yRYnSez/d985PdbleSjKUH1CWZXVM0TtDtZqzwN/hqScaYIruQHmRvwfVxV4CRAUYGGBlgZICRnwZGfuj60S9cfDkOnfahOH7eWvm5ZDJpG+QyprzNhkIhjUYjcwYwJrCD0kP3pu12a9p0ggEkBxQuJhIJ3d/fK51OW+pPkqXh+TuMjy4l6/XaZmqEQiFz+K4unnt49+6dGQg/j54cJwQbgea0WCyaTvf09FStVsvS1KTjcf6wooAwQJRMJtVqteR5h0nqg8HAHPyzZ8+Uy+VMZuEWpbq6bYokOZiVSkX7/d6c4mAw0OPHj00jDAjDSGHwGCn3Tq0B0gdkKzA2kgw4YIhc54o2eDQaGYMCA4SOHfaGNVmtVsaYbbdbDQYDOwAwNPF43JgwGMdIJHJUqMwhf/LkiRUFs94ETNg17Op2e+juFQ6HdXJyYkyg7x86Hb169coYs06no7OzM9VqNXU6HWNXWJdEIqF6va58Pq9//dd/1eXlpc7Pz43VwSnu9w+DBCkypwMSjoezR/1CKpUy9q3T6diezedzDYdDk6CgQ4e95Hl3u53JiAqFgi4vL/Xq1Sv98pe/NCb35cuXNk8F/frr16/15MkTe9azszNbz+VyqUqlYvImingJwB49emS2fXt7q3Q6bQ4TX8Mz02EqlUpZcexqtVKpVLKfQQqy3W6tNsb3fbVaLdXrdfNJSIfYf9/3Va/XNRqNzEexTrTVDYUOBfUw2/xumGQCSRccCAqRvGB/BL+e5xkYLJeHtsPYYb/fN5+ED4XZTqfT1h0NWQ9/tttDtyWkKvv9/kjuhk8n4GBvcrncj4WMT+4KMDLAyAAjA4wMMPLTwMgPkZLhr7766keBya9+9auvMpnMkdHCgvHlLAybDzBwqCm+5OcikYfhgtVq1Vg4GL14PG7pRDY5FDrMW3AdOanTfr9vGuBEImEpZUlHWmM2D4crPbSplA4gRvEj7BMHmbdqfh5QodgvmUyqVqspHo8bGwTL5hYSswa9Xk/ZbFanp6dmYDAbbDzGFI/HdXFxYU6X54pGD8PiEomEGR/rjhYWSQm/f7VamVMEoBuNhrUd7ff7xj65xYiAAOuIsbIfAEQqldLz589Nv89Axu12a0CAnh1nLclY3EQiYfrlUCik+/t7kxHc39+rVCpZ+prDDuMZCj20K0UakMlk9Nlnn2mz2ei7774zMN7tdib7SCQSyuVyxhTyWQBruVxawW8+n9fJyYkFPtvt1mQFpOTb7bb+8z//U7lczgqOCYyy2aw5D9rdzmYza4263W7VbDaN3SKAWi6XarfbisViR/MyYItIoTMXhp+ZTCaaTqc2PLHb7VrgBXDzfJ7nGUBSe0E9xZs3b4xt/eabb1QoFMw+CGQIAtLptEkZdrudrq6uNBqNzHbC4bAVGIdCIZ2fn1srZ0AIOYrnefb7OcfZbPZI/hMOh3V6eqp2u61CoaD1eq1Op2NtgmHyNpuNhsOhHj16pHw+L0l2NtPptGq1mjKZjMlastmsZSV6vZ42m43q9bokWdc0mOLRaKTpdGqBIdpxAq1QKGR6dAIo6bilOLIk/o0AnrPtatD5nbDMXAyyJQDCfjgb+B8kStQVJBIJA8FWq3X/1Vdf/d8fBRyf0BVgZICRAUYGGBlg5KeBkbPZTH//93//f34fFvwkGS4YEt7ud7tDi8n3H5C3WTYexmW321nRHGnP+XxuvfclWTEhYITmnUWG+YvH49YJhoI7V3YxmUzsXvguUqGLxcJSp/v93obLsZlok0nbb7eHTjgYDQ6TjQEwcPyk0TebjS4uLrTZbIyd+M1vfqPpdKpsNqt2u61sNmtv0XThgXEaDocGgsVi0e61Vqup2WxaqtwtUoQNpeARbSvT6m9ubszRdLtdc+SRSMQm0+92O11eXiqRSNi9S7ICajSw6F4pVMahEWyMx+P/tvcABQ4kn8+b42JPPc8zm8B+isWisZA43/v7e2PNSMnDcnS7XUUiEdVqNZM3XF1dqdFoqNls6he/+IXZCRKIbrdrsgoKg2FvKaRmxgbBC84J5whzdHNzo5/97Gc6OTkxllGSAT/Fx6T4T09PVa/X1Wg0jCFFKiTpqFiV9rWwgNgcRev1et1S5+Px2EABEGNPKEjGftzgSpI5q9VqRQCuRqOh0Wikzz77TI8ePVK5XDYQBETp4PXNN9/I93397Gc/0+npqXq9nsbjsbU69n1fvV7PaitYH6Q5L1++VLVatUJwZCHY/G536BaGI0yn02bTnC0YNRg0ztl+v7eubUhjcNa0ox4MBppOp1b7AmsMEw7AwtwDwpx/mOj1em02Q8DqniOAjsCA+htAhQCQ4NUFl3Q6rX6/r0QiYWeGoDcWi5mNAMjsNZkKzqqbMeDvguvjrgAjA4wMMDLAyAAjPw2M/ND1kw1VwdAxNP6Ow4JR4Fhg50hXk7KHcZAeujOFQg+zSzB+/peCSt402VjAAd38fr+3jWdR0RDf3d2p0WhoNjsM1UOjO5vNrAA1mUxaSpT7dNPVvClTaMrnkY3AyFBwS/HgarVSt9u1wlmYBABitVrZRiNfIJ283++tYJqDAZMZiRzaDgOirgyEe4IFhOVAY3t1daXVamWHkAMxnU41GAzU6XT07t07S8+7aXnuh0GNDGncbrfmwNGhY7DofwkIYMTC4cOEcQo+R6ORWq2WsaKbzcZkA4lEQoVCQbvdoXNOJpOx37VeH1oOl0olazVKQABTVSqVdHp6quVyqW+++cYmq1cqFX3++ec6PT21lsHsdzQaVblctuCDdrJof906hlarZXvzzTffmBQnn8/L8zx1Oh17tmw2a3ZEG13kDjh3bAvQ4kygq55MJlosFrZ2PDtyBbpz8fzff/+98vm8zs/P7exGo1F1Oh2TFME8b7dbO7fY1eXlpbbbrX79619rvz90dqKQ+OzszCQ2i8XCGHCGVI5GI9Xrdf3Zn/2ZisWiLi8vFYkchndGIhHd3d3Z2SWIjEQiNmgxHo/bOZFkhfHY3d3dnd6+fXskNXr27JnC4bB1kyJYTSaTts9uTQhMpiRbO9aXAajYPt/reYd23gTXbq0Mhc6+7yubzdqzsO+wk5KM5XblW7DsBGKS7Dw3Gg3d3t4qFDq0nEaS5HmenSv8FmCCH0B7j2zClfHg84Lrf3YFGBlgZICRAUYGGPnpYuRP0hYe+QPSBdgFUtoACm+NbocaGBpXLiE9FBrjqAAZfpaf4fPu99HhCDByP59MJq3gFQ0s6X46xbia6nq9bvMv5vP50WGiqHM0Gtk6+L5vRp3P540JgTkLh8Pq9XpWfDibzXR6eqqXL18eFariMEejkc1VgKVBHxuPx3VycqLVamVsxJMnTzSfz9Xr9ZTP503TnU6nrYMTBk/xMhp/GEKKMHHYvu8rl8vZYZxOp1qv18a2brcPgxTRSTNA0vd90922Wi1zvADOu3fvTLu/3++N9eEA0LUIZ8z9ICOAwctms8ZAsl4806tXr6y48+TkxIIBWAvuG20+czJms5l1duLQVqtVA735fG7gzKwPPucGAc1mU2dnZ6bX7vf7JsPY7Q7Fsvl8Xq1WS/1+3+QEy+VSd3d38n3fOm1R7Nxut40B5LAz3I9gio5QbtBE6n+32+ndu3daLA5zO2q1mp2n3W5n3bmq1aqxUpvNxtYBQMbh7nY71et1DYdDq7XgTDKIsd1uK5FI6Pz83NYMX8F8DWywXC6rVquZDMT1FbTTLRQKajabBiwwVuFw2O55v9+r1WppMpno2bNn+uabbyywgWlGPuDKJubzuc7Ozow7aYQLAAAgAElEQVQ1m0wmKpfLFvSSBaAQmkBhs9lYdyw6uDE01m35u9lsTF6CY6cmRDrMMELyROBNUEFGgLPntr2eTqfmC9DgY/uZTMaCOYqn8WPRaPSIbYStRbaEfw5euP5nV4CRAUYGGBlgZICRf/oY+aHrJ3nhwrGTdoQ9Qu/ITfDWTREiThkHgWFR+MaDkAJl8yWZLCEajdqi7nY7KyxF84wsgMX3vEPLWNpi4lj4X74TjexisVC/37fngp2kiwkAypsu+vlMJmNFfLFYzNpM1uv1I200aV/AByOEmRkMBjYvgftMJpN2H/f393rx4oUk/TfZSKvV0u3trbLZrGlp6azCm7wkc8owdI8ePbLvdmUNpGppxTsej20dJBkzBmDQ3hbpDEwGzA5/YF7R0gJwtPZkTgva43a7/d/kGNFoVPf399ZVCWBHerFYLKy96GKxUCaTMYf76NEjY0TPz8/16NEjvXz50uokYDvQT/M5vjedTlsBajabNTasUqloMBhYmp/Ps85IMgiunj59qu320OVrsVioWq3q8ePHur6+VrvdNh09TNJut7OfpXiTlD9SFhxLNBpVvV5Xp9MxtjeTySifz5v85erqypysO2eDfUdKwXp0Oh1zduv1YWZIvV7Xr3/9axtqSXcz5qTAcPu+b521UqnUUbvhRqOh/X6vWq2mVCplc2hc34LU4MWLF/bszPhxbRJwYq4P7NdsNrMBqLBp7A0F8+7gRgJYmC7X5geDgTl1AtRcLqfJZGISjlKpdAQKABhniDONryMLgZQLf+rWkVC3gh/DHpE4ENhQeA+g+b5vv9OV9eAfYfYBGoJIgsfg+p9dAUYGGBlgZICRAUb+aWPkh64fLSlEM+3KIAAW/qDldcGG/2ZRMVQcNBpfHpif4yFdxpDDSAet0WhkjhJH7RowYMEisVGwEdznbDbTcDg0QxmPx5pMJpZuJiVNur1Wq6lSqSifz9shR7Pu+74VI3OYJens7MwKTknXwrgAbKQ0AUgYDLdw8+LiQvv93lqQ8v+RG7iDIV2WZrfbGWOAkedyOSuMTacPU7xJ22+3W5tfUCqVVC6XjQl0GdlkMmlaf1gr9pa2o+Fw2CbMc9ByuZwqlYp1jxoMBsZU4rQ2m40B9Ww2U7vd1mAwMOdKS1VJKhQKBrCx2GF2xNOnTw3I0Rij6Z1MJspmszaLYzweG9t2e3troMRcE/TotL6dTCbGnC6XS2NgmB2BLVCQC2tMyh9nXSqVzGnm8/9/e18eHHd5n/98917tfUkrWZJl4wMbglufgA0YTGJzmuEcaNJOW4OnMxwZAgFMyzGlJKEBQqekM6Rt6ik0DWCuKYcBA6XEMZdtbGNsA7YkW5Kl1Z7SSrva6/fH/p6P3xU+sZyA/T4zHoy1+u77fY/P877P+zn8sFqtouJ1dXWJkaYiQ1C9I5mXy+UaF45oNIr6+nqMHz9eVGIq2azxMzQ0hEQiISSTzWaRSCTQ19dX4/5C9ZebB7rW0D+dBRq9Xi9cLheCwaCohDTI3OwxVoB9AwBffvklEokESqUSWlpaJDg8m82iq6sLPT09QqTBYBCBQEAMMDehmUxGgsiz2ayk9aVLV0dHB0wmkxhYbkJ4uDAMQ9Il012ImwBuzmhnqHoywJubDqpkzBo2MjIitUCovpIAaItITNxAc9NNhVHNJEZ3MPU76NrFdck04P39/RI/wtuM0Zt12izOVwAyp2grNA4fmiM1R2qO1BypOfLE4MiD3XId9YGLhokvS9VleHi4JgiNpEJjQ2WNnc7TMA053SL4/zSaNBT8w+/hs0dfYTONJztdzayUy+XkZ1QVVT9z+hvz6pdX/Jy0/B0uer5vPB6Xmg88FVOR++STT7Bnzx65nqTCAaCG2JjBhQYyk8mgu7tbVEBOFLvdjo6ODgmS3b59u2SIUd1LSFAcs0KhIOTGwGVgX5VvjhNrKVitVjQ2NsrVOABRz6igUe3jFW0qlZIxZt0MqpP5fF5UJZvNJpmHuDlgPAOvaUk2NNw2m00KYyaTSQwMDIhhpfLKIEwqMZVKRa7o6TceiUSQTqdF1S2VStixYwfC4TAmTZokrh6Dg4OymOg3D0DmLTc8nE/0Wy+Xq8GpKmkx2JvqFDMicQF7vV7x/aaSxHovxWJRFF81oJWbBKp1+XxegpcZZEzle2hoSGIgSFQ0JIz3iEajGB4eRnd3twTq0gWFKhvXC9cFXQkMwxD3jr6+Pnln1vlhnzHeIBQKSYwJx59rsFSqBsVmMhlxbSiXy7J+GJPCWBeuc25C0+m0ZDaiHWHfkLjYX3a7XYiTczIcDstGj+uOqhmVXLqxAJB3JLhpGxkZkc0g5zbXrDpvuLmifWEa6Lq6OlHSVFVQdfthG5iFjRsR3izQztHliWPFdUa7TFtN+8U5zzmucWTQHKk5UnOk5kjNkScGRx7zAxewr3Aj/7DD2Dh2GF+MDebv0tDRf5MvwMGjSwQHhROAKgoNOTubn7fb7QgGg5KRiadbnmjD4bCQA69W6W7AEzLVHw4efd8DgQCCwaD4blPpKZWqaVtdLpcs0kKhIAGVVAVpkOjfSr9sKmWqoaCxILHR4PKKs6enB8FgEIZhSDs4ETiBadj6+/uRSCTkmVSkDMNAMpkU40kljCf8zs5OlMtlNDQ0iCsEFQcugEKhIIHUhmGIqwQXBFNnUjXk75RK1WxWVHTY93v37q1ZbCQNr9eLdDqNeDyOUCiEjo4OlMtVP9pgMCi+wUA1QxTHolQqob+/X/qdfum8gqaSs3PnTng8HjQ2NiIYDNb4jLPoH0mU78G5XigUkEwmUS6XRTGkqwiDhBsbG0WNVI0ZlSYaA24uuLkhGbPuDgmGmymuG5vNVrOh40aZfU0DVyqVRGXL5/M1Wc84ltyscX1yA0jFm0osCYDGlyo8FfNKpSKxIHa7HaFQqMZFge5CpVKpxu2mUChgx44d6OnpkTEFIKofA3E5Z7lhSKVSsp7pB67GchQKBVGfh4eHZa3ScLNeCzcztA/cANKNhO3lnKLyzI20qorxhoI/UzeyDPIlUXCzSLunGvx8Pi8kwvVBG1osFtHf3y8+8LSH3FSqv8sDAO0x+5/ziDaYSqaaiUvj8KA5UnOk5kjNkZojTwyOVA+TozEmaeHVSc9Bph8nO4zqV6lUkkEhibCjSeh8GRICf8bv4xWw+pnBwUHU19fLSZpXph6PB+PGjUM2m5UCiCzERyNIZYJuCWwDryGZNYWKEFC9hme7uGDZNk4CXu9ykRUKBXE/YDpTKjA05jS07EcqMJws7EcaXU7aHTt2wOl0SqFAXkPTj7tSqUjwMokRqKoNVDLpg1qpVOR6mcY2Ho+jUCigpaVF+oXqaKGwr6An/cgbGhrQ19cnEz+dTku9FKqrVLxSqZSoFoZhSJs4zj09PbBaraKyceGyejwDNXnlrBp1ADLO6XRavoNX4rFYTDYDdKfhvNq1axcASB0YGm2SK+c+r87ZBrqfcCFSOePmIJVKoaGhQQJFuah5zU0VKZPJIBAIiNHnlTznkWrASFLlcrkmdbRhGOjt7ZV+4NpjfxcK1RTKhmGI/zjbzXSy7HNmFVMJIBaLydU8DRmLCdbV1YkKyEBtEh6VLAASPO12u2XzQVJgUUYaRsYYMB6gVCrJZo/KMgmgUCjIOs/n8/LODodD3OOYDphzzev1wmaziRqmuhLZbDZZV6oqR/cYFn+k2xjnFAOd+XnaCm66GeBOomGf0E1DVYa51lR7yrlGe0sXF9Vdhu4jnJ/lchnJZBI2m00SNVC1p52l7WWgO+eoxpFBc6TmSM2RmiM1R54YHHnMD1z8Yr4oJxv/rhpZ9TqOUP0s6SupKg58ESokNAZ8LgmrUCggkUhI5hQaKU4ei8WCkZERUWMGBgYkmI8TnORBQ+1yucTg22w2mbg0WjTqbAdVLSpg9FOmapLP5+H3++HxeJBMJlFfX49yuSx+8MViNaiXwclUK3kCVw0Cfd7Zjq6uLkydOlWeS3ImaQcCgZpUmixSWC6X4fV6YbdXC/9Rmezt7UUoFJKARbUSN9UuBmZzLpTLZWQyGVn0VAxI0IlEQtQlqmW8eiaZc9GoAcVMQctFp7phjIyMoKWlBel0GuFwWPx4aQSYoYbX3CQDzlfOrUwmI1myOBZUVkOhEEymfcHgXIAcI7oe0IBRIaYrh5oRiO4b6vqxWq1SiJIKYTabRTKZRDAYRFdXl5DA4OAgANQYDM5JGj3GajDo1jAMMWR0UzGbzVIUk2RL9ZKbQ6vVKkHtTDtMAuW8Z50R9g+LgiaTSVEis9msuD6oAatUeFkDh2ud71QsFmVzwmfV19dLDIq6BhmETDKmQq/GnvDdaLNUY8pNlclUDdj2eDySTYubLH6OQcfsA6vVKrEFXKvcdNE+qYaftwJ1dXUoFAriy+5wOGTcmFGL655zhc8gSXH+cQ0xSxbdRKjIMY6BdpDPoH3gZodt5vxU1yQ3oRqHD82RmiM1R2qO1Bx5YnDkwTBmaeHV6zS+tPp3Tg71BdhgvgAn08DAgHQ8/52dyGfw56VSSbKMJBIJ7N27F5FIRK6nmTWkXC7LKZuneV7FUwVhZ/G7eM1OX26qcrlcTpQqq9Vak3mFyiXdHQDUqCpcSGyXw+FAV1eXbGS44Hl1yWdQSeDJ3+FwwGq1IhqNSv0FFizklSgz2wCQyUI3BLbBbrdL8K/qN852NzY2iopE40a1i+NGhYdtpwpRLpdFDWNWJHUBq9e2bAsXAic/1Zbh4WHx4R4aGhL1goYin8/D5/OJCwhVRX4vlU81RoHvQENAI02yLBQKCIVCKJdriwHSt5nKEecLr7j5TCpKbrcbe/bsEcWXhGSz2WQTwmtvvovH40GxWJTMRXQJ4OfoEkAFjkaSrjRerxcDAwPiFqMaVV7pc25xftOdxuVySUYgdUNC9xjObxIo+9xut8vGJxAISOpl1puhmsz1x/VWKBRkc0Gb4PF4MDw8DJOpGjDd1dUlREmi47NIogyQ5+fMZjNSqRTq6uoQDAZlE8W6InSbUG0XN0e0U+wDjjM3lhxn9i1jC6j6cf2SBGi7OOdoC8xmswTdcoPG/h69UeXmcbSfOJVwrgnGb9CHnzaB4895xI0hx4bKKm8y2D9ck2yHxpFDc6TmSM2RmiM1Rx7/HKl+72iMyYGLJ1V+If9LULnjgPG6nx2oGib61arXf+oLjFbt+H00tplMBtu2bcOsWbNqcvrzD324meaVQZdut1tO23we/04lzefzwWq1ykTv7u6uCWTlYjaZ9mVQUaux09eXz25tbZVry0KhIKlk6Vqh+o3yqpMn7EKhGvAcCARqFojT6UR3d7eQw86dO2XxU3kgoVitVkQiEfGNpXLF4EuqqjQ0NF5UopixhoubfrdUxWi06cISDAbFiJJ8uDjpEkJCUhU8wzDk+pzpO9UNBlVHLkLVz5xt48LnomQNEz5fdWehAkcDTqNLMlcVUT6fbitUf9TFykBg+qBTQaWxoEJG94a+vj44HA40NDRIgctAIFCjeDKFLo06XQHUjQjdBkgQlUrVP5y++sViEQ0NDejq6oLP54PZbJb0rvws+5dkTJKjkWVbmO5YdV9isUP2ibrZpOFi39E1hyor56PH4xHXI8ajcF2qKjc3H1TiqbDt3bsXFotFAtIHBgZE9aKbCjdy6iaFc4uKMlVFKpecr+xvFljkuHPNcMzZpyRp2j66UKgqNtU9ko/677SRJFUaftpZoEpaPp9PXD34DqPVP/Yj55C6SefvqbaXdk7jyKE5UnOk5kjNkZojj3+OPBiO+sClGl51krAjVALhhAIgjefVKE/dxWJROpoGh9eU/AwHn9/NSWW1VrMEFYvVwmq8VvV4PGI8eBIFqqdeEhcJrVwuSyYfprelgXc6nTCZTFK7g2oSDSoXcT6fl4xLqgqpZk5hHZTGxkbs3bsXiURC3EWoZDE4lxOX78B2ms1mDA0NiYLCYMJSqRqQzBoQNFI09iQSTmSqegzsVA0S3Sf4Hm63G8FgUCp+8yqWxoBjzTnBTQGVUMYDqP7zqspntVpFMaMyQqWRi8LpdCKTycBisUitkGKxWnuDKU1JApxzJPRYLFYTm6AqKnRjYZA5r+LV9KI0XKx5wc0Pv4dtzeVyUmQwlUqhpaUFqVQKiUQCkUhENgP0xafxYQarVCoFr9crV+a8nk+n04hEIkgkEkilUtInZrNZrt3t9mr9DwaYU+2hm0koFBIllaosMyYxIxdJRXWLoFGk4aZSSHcCGkQqwtlstka9p8pIBZDKKjdNJA9mVqKhK5VKcLvd6Ovrk3o7nJOGYUiNEI4jyYLznmqraoc49txo0aWKrjzcGJBY2A4aev4b1dZ8Pi9xDACEABkYzmerY8V+JSF4PB4YhiHpm6lKMiaG9oTKJG0gbz0ASBttNltNFjgAYkdUwmK71M0+n6u6UHDMDqbeaewfmiM1R2qO1BypOVJz5JjccKkTRG0klRn+nAaEJ1KLZV9xMWZVUn/Gl+bfeY3PzuQkpcExmUyYMGECAEiNCWY8omGmywQJyOfziSsEoX4/fWqBagG3zs5OfP755/IMTpTRCg4ntdVqlUVPtSifr9Zx6OzsRF9fH/bs2SOTi2lw6YZBv14uSpX0BgcH4XK5EI1GpS8GBwcxY8YM5PN5bNmyRUhZNcScbCRM1dWDV/bsT/qTM00rFZru7m6ZmGz76E0Dx9xqtcrvm0z7aqdQgaBRAPZtMurq6hCPx2XxUHXKZDJwu92IRCJixGn0aDA5Pzge3ATQEFFBAyAplNVid6rySNWF84kES6KjIeRC93g88Pl8SKVSMAwDoVAIQFUtCYfDSCaTEuPA7ySpqwaKBMmUuIZhIJPJYO/evUKY2WwWPp9PsjypPtsMMOf1PQDZkAwNDaG+vl5USvYFMw5R+aN/NucyCYXGj3ESVHVI4sz6w7VAZY/9wLlsNptrqt6TBFiXZmRkBG63W9Iwc66l02k0NDSIoksXA7oHlMtlST1rt1drx3R2doq7DNcENwvDw8NoamqC3+8X/3uuNRI5VWVmcKO6y/ZXKhXxi+ezuVbpFsQ+49znvKFSrvqAcy5QKeYGnT/j+7EPOE+LxSL8fr+4LFGh5A0LlXU+h+tcVSyBfXWgVDI5lLuExoGhOVJzpOZIzZGaI49/jjwYxqTwMX2xafj57zx9UzVio3ji5wLkwqI6wkEHIIXraGB59U1ViJOXuf75d/Wan4GefGZ/f78QBK+C2alMZ0niYuHGxsZGpFIpbN++XfyUed3PK3oayeHhYaRSKfHVVo2VYVSv5nt6emAYBhKJBLq7uwFA+oL9QCXHbrfD4/HU+OLSxYBF5JxOJ/x+f40fNwkmn8+ju7sbvb294jLAsVBVBKqnHKuhoSExUCxkmMvl0NHRgXg8jkwmI4uZV+hcGJy0Pp9PUsWyQB8VWMYKZDIZMWSsG0NFhUHZnPCpVAqdnZ2yIICqgaKrB9+DZEkDxT5jXZRMJgOTyYRgMCgbArqykMipnNHtgXNaVX2YWpguAslkUowgDWM4HIbT6YTL5UJTU5PMdTW4nMaIhEtVk4HDHDO73S4+51Tf6O/NOUQi4Tqx2WwYGBgQRS6bzUrqWbrZMFDX5/MhFAohGo1KXAXXId2L1DnCfgWq6pBaIJPBuSx+yTnB9yZR0RZQUaNbBgPWuY6prFYqFVkbdEFwOBxiWyqVimwuuMmpr68Xu6IqeexHrgUqu5x7fr9fNickqXg8LqmtuTapKPN53BjS95zvyT8kFLpVcV1wI6POBW4GuMkicfD9qaKyP+lWQ3cfrg2uCapwJEWV/NgvqusF1VnaT40jg+ZIzZGaIzVHApojT3SOPOobLp5KqcwR6kmPL001gQaTV3scFBITDbyq3vHakAPOF6cqlE6nJXiXAYlutxu7du0SsmJbmPWEp1X1upEdOzQ0JGpOS0sLBgcHsXPnTjGOnFxUxPhfKh5UcnhlWShUs+5QHcjn86IWmM3Vqun0T6eqNjg4WKO+0F/b4XCIzzdP1W63G729vejv70cul5NifJFIBCMjI3J1zaBU+unyPThGVFI56dhn+Xy+JgVqJBKRMaaRYI0IKl2NjY0YGRmRaufcDAQCAVEuWAeDV+Uca9V9gvVh6P5Bdw2LxQKv1ytX1Iw1oK8/DTSVZC4Mtpvzja4TXHB0hVBT+LIejMlkqlFYaVCpOjM42+/3i6H1eDxi4NkWtofqLH32OS/K5WrcgmqQmXL5888/x5QpU5BOp2G1WiUNbaFQkIKNDPil6weNaqFQDZLeuXMnotGo9ENfXx8Mw5CMUqqxJXFynBlwyk1GIBCQAGyOh8/nk+9ikDD7iWRKA8sNJNPbulwuUVpJMm63WzYjVGX5vFKpWvNEvS3gxg4A4vE4otGouNjQtYbzm6l+acdIwvwObpA4HzgHuru7ZYNitVZTMqu3F9ygqko1XUY49kBtEgCqeqwnMzw8LHEIwL70zXxvlRxUVZBq5uDgYI166nQ6RVnmnKeiSCUcgKh2/B0AshHQODJojtQcqTlSc6TmyBODI1WhYzSO+sBFBY1Xkzz5lUr7AsvUU6J6omZn8DqQn+UplM8bHBysuRrnlSdJgAoH/+7z+VBfX4/29nbJYEPDRJLiCdlkMslJmYuFV+Z2e7X6Nw1iOByWd6GKwEnHgeE1dG9vL4aHq9XT1Wthr9crxjkUComKRLWD1630V+Vi4gSn/y8/w76hSkdjsnv3blGdWOvB6/XC6/WKcWdqXrpwUEmge0ulUpEikqobCJUSTnCqazQSVH1ohBjUCaAme9Hw8DCcTidCoRASiYQoG3wWr8YtlmrhPsYiUIXgVTKJLpPJwOl0fiVAuVgsCqGqriNUKJl6lAqeSqIkEdbtUK//k8lkzeJTA6u5+AAISQwMDMg8NAxD0gJTbaF6RQMybtw4GV+mQ2UgtsPhQGtrq/gcs1gmlSv+O/9rtVoRj8cxPDwshRsHBgZqCiem02nJXEZ3EG4Sc7lczUYunU5LZizOm3g8LobSYrGgoaEBiURCFEYaKCp8dDmgXVD92iORiMwjEgVjIDweT417Euch390wDMTjcTgcDtTX14tiWShUU2KHw2HZyJpMJonBoLJLMuA6J4kx85gaO1MuV2vGMCUzbdvIyIgQJ5VBkgPHl/Od85q2k/aMCiLtHolXfT7nP9etx+Op2bz7fD4hco6TmoWN/c7+44af859jw80757vG4UNzpOZIzZGaIzVHnhgceUwPXPwCnpjVhaouKF5JU42jKkJli1ArPpvNZjEufHm+HJ/J57I2Q6VSQXNzMywWC9rb20W9Khb3VWGnwaOawkVH48fvLhQKCAQC6OjoqHFBiEQiACBXxvy9dDoti4yKYltbG8LhsKQoJVFOmTIFXq8XnZ2dYvRVIuPpW/Xhz+VyUuyPi4z+90zpOTQ0JG4O9KE3DEMmGQN5eZpnH/L9M5mMkAtVRqZtpYsGx4jGPJPJiI8zVQWqHRxLjjF9zWlEaaTYnwDEwJM8M5mM9AevwWmYGG/Q19cnag7Ji20DIG4hJCmqh6ydQrJmYCuzUHHBs34EUF3kPT09UqiyUChIlqFwOAwASKVSUqCQqk8gEIDFYpHikrxap5LHRW2xWESNVZVqZpqaPHlyjWsNXQ+4tlRfZ5Iu1U6/3y+GPJfLiboXiUQQDAaRzWaRy+UQDoel5gh/l6oqXQicTqe4CTidTsm45PP50NvbK9mUWCeF70wFOh6PI5/Py2aJbiicwzSYQ0NDkmGLV/9ci1Qb6ctfV1cn6j3Jga4VVKVTqRR8Pp8QDPuLtoGEQEM9MlItsEgljORM4kyn01IsVVXWqH5yLpLs6SbGtUfFuVQqiZrLeca1qm4MVOUOgGxGuaEhGTHYn+4m3HRwI865zX7hBp82VR0T1Q1O48igOVJzpOZIzZGaI08MjjwYxiwtPK/uaUi48Gj8aSypdLFaNRvMU6zdbhf1j1fMNCAkKC58fjevPm02m6gPnOAsCuh0OsWAcdHSGPLkSwNWLBZr1DSqHbxO50CqfqNUX0qlavXyYDBY497Q1taGzz//HC0tLfD5fNizZ4+oc3V1dejo6JD3Vq+qGfQZDofF8HV1dUnfRyIROeXzahqAXGnTCPT392NkpJp9h76mJDwqmHRRUINfaRB4Ne/z+dDX1yfKBjcLXIAWi0VUnXQ6Lf7FVGy4UNjvgUBA0g97PB65VuaE5jtwwjPTDomUKh2LOfIqm37jbrdbxtThqBZ3ZMYeLjYA4ibC7En8/1wuJ58ZGRmROiYjIyNobW0VozIyUq19wnnDvk6n0+LaQqVI9R8H9qUEJjGwTwOBgBgPqqeBQKDmyp5qC10V6B9PVxTOp1wuJz7v6nU+r+1TqRSmTZsm4x4IBNDe3g6v1ysqHA04XRrY90xHy3VkGAYikQh2794t/Z3NZsUNgil3uVHi+9Gff+/evRKQzN8PhULo6emR92Z2NP4uU1ZzQ8L2MCsSNyZUG+kmRIJPJpOiWHLtqRtZADUuKcyWRCNNn/9yuSzqIn9Gu8NNEO1huVyW2iWZTEaU0kqlIu9J9y9+lptd+tCr5McNE7+bz6ELWD6/rw4JNye8JWGfqSTKeU+SUWMRNI4MmiM1R2qO1BypOfL458iDYUxcCnnKVo0rO4y+nTwF0ojw6pAkw5dXr+P4WZ6Maew5uDyd06gVCgX09PSIWhQKhTA4OAiTyYT6+nq53qbPLweaKhWv8n0+HxyOah0IBv8ODg7KNT9rQHAwbDab1ICg0QqHw3INW6lU5Jkulwvjx4/Hli1bxP2AKiaJKBwOw2qtpsXl5HG73Zg+fTq6urqElCuVCvr7+8Wlg4od3UlIpvF4HCaTCU1NTXA6nYjFYuKSQIPGKuY0SPl8XrK3UKE0m82IxWJCVHxXnvg5EZnlB4D4Zg8MDEilew51IQMAACAASURBVAb58mrY5XIBgFRdV8dYnStUGc1ms/S/w+GQKvVUYvm9JGf+O6/d1c0P1R/6KCcSCTHu+3OhKper/vd+v1/GgQs7n99XT4OGl8Ypk8lg165dcDqdophFo1H5bn6O64kkxr6ORqOyVtT0xHwft9uNdDqNVCqFUCgkxot/qIoxILmjo0NiIUKhkAR2h8NhRKNRSR8MAOPHj0cgEEAmkxF1k2uTG0h+XyqVQn9/PyZPniwqHJV1GjT2D9NCcy4lk0lJwRuLxdDW1iaKPxV7m80mWcHoD64q/dxo0EUlFouhvr5e+oD/TafTQop9fX3i5021jWRUKpWEnLmumMnIbDaLkm632+FyueQ9VdJQN3Ykc66pSqUiyqtK8Pzu0QkLGFdDe2EymcTGcO6zbdwMqX71/Jzq+sJ5rR4IqPqqdlsdB43Dh+ZIzZGaIzVHao48MTjyYDjqA5fZbJaFwD9sIINZ2VG8gqd7AUGiUH3V+QwaDKpzKvFQLWhpaUGpVEJPT48EH9rtdjHKdKNQ3WLUUzlVNxISDRMVmP35b5PISJjFYhFer1cWiNlsRjabFXLgoJtMJnR2dkpWqZaWFvT398NkMuGkk05CqVRCf38/HA6HnNIrlWqWGRohZlwioXR2dqK1tRXjxo2Tq1GmvYzFYjAMA8FgsCaQGICkJk0kEkJEJGcaQ7oVcGypCHFcqHT5/X658lX91tn3mUwGnZ2dogiy36k20Ve5rq6uxl+bC4jfwz5h3Rcu/r6+PtTX16O/v1/UJNZtIcFms1kxZiT2oaEhCRANhULiUsGrbbq/cKPDjFyhUEiqw1M1IanSoAwN7SuqWC6XMW7cOGQyGZmLNM4+n0/eGYBkrCqVSohEIigWixgeHhbV0e/3i7sRx3B4eLhmA8D5SNXH4XCgs7MTuVwOfX19ACD9zk0X20PVs7m5WeIqxo0bB5vNhnQ6Da/Xi3g8LkGshUIBDQ0NssmigfR6vUgkEnC73UIqdNMh+N6VSkU2MdwI8Drf6XTKGqHLgsPhQCQSQS6Xk4KKIyMj8vt0T3C73ejp6UE0GhWXItoVqsTcfFJJJ3lwI0XCo/Hdu3evrAnGL3DcOb8rlYqsUyq4qh89N9j0cecGiao0FUpgX2wE26eqcVwjXNcqSdG9hP1A28TvIxkB+1I7cwOlEkelUhE7xM9rHD40R2qO1BypOVJzpObIoz5wqadbukrwv5w4VJZ4BceX5VWv6gpB8uFzqGTQcNIo07DQyPv9flgsFhkIToJ4PI729nYMDw/D7/ejt7cXuVwOzc3NcLlcojSp7eEAVCoVJBIJ8Qfu7e2VjqZRZSBmZ2cnIpEIpk2bVqM4xuNxScmpKjoejwexWEyqkvf396NYLGL37t1IpVKw26vV5un2wXdhphtOetadGBoaQkNDA3p6esR/nJORrifjx48XQ0kj4PF4xEhls1l0dnYKadPY0ABzYptMJlFJeTVOP10G3+4vNWlHRweam5tlwtN3PBAIiEFgEU7OGyp8dFWgkkrlxWarFq9jBhxmPiJxjFaHVfcGKsW8XmYaYtb06O/vF5/jQCAgaVu5UVDflQalXC5LBicGuLIuBYObqeTQ5UAN+B0eHha1letHdQ+guqWqnwzu5bMzmQwCgQAKhQJisZgYXcOoBrt6vV4A1c1ELBaDy+WSec1+VBXsvr4+eL1edHd3Y2Skml2K86tUKskGgOvRYrGIIma1WiX7VzKZhMvlkqB4NSaDv5fNZhEIBKTifUNDA+rq6pBIJOBwOETppHsPg12paubzeSFLugI5nU709vbKd1F1ZipsbtbsdjsymYzYHf4BIHOdRTK56aCbQ6FQQDAYlBgS+uJzHFkfJ51Oo1KpiOsQY3qofFssFvT29oqSyJsJzjWuCW7I6WpEguLzqP653W4hG5fLJbaZGzHVFYSEwvXF8aN/OuMdNI4MmiM1R2qO1BypOfLE4EgKYfuDcbQuIuPHj6/cddddMlFV/1oaZ6oJbDjJg9/NjuFiZ3aScnlfth9+Xv0dXrOqk4EdpE7kcrksi4+DqVZ0p3JHdwXVeFosFsmixEnC72WAXS6XQyqVkqBIXt3yapKTPx6PS60QBjjy6jSZTEq2FJ7O2Qdut1uusJPJpPQTJw7dMiZMmIDu7m5MmjQJPT09smhI1IFAQBYRlQT+P6EGp/LqFNhnKEnkfDez2QyfzyftUY00x4qKRiKRgMViEb9jGlduHFTfdBIQlVyqULzSpepCVY7jTfcSto9qItvNVMb02+UmguRJA6c+y2q1isqkqpPcfNJVhG4YNARsA9UcEhE3RQ6HQxQivg99o9kGs9ks6U9J8uoVNr+D7QcggdPcJNE1iKqrukFgm+nDzzXkcrlqFCSqxVSyuFmgseacz+fzEi+ivqvZbJbAb8YMkED4+Xw+L2qsGlMAVInBMAypO1IoFMRVgSlZ2Qb2B+2N6pduGEZNOmuOCdNWk0zUGwZuKNg3hmHIeud78T05PrRbKhlxfXHM6fLAdcSbAFVVo+sC40z4M6rC/Dk3TPwdfifnAecF+4Vt4Oc5RpxH3Ehy8872FotFLFu27ONKpTL7iIjiBIbmSM2RmiM1R2qOPDE48t5770V7e/t+UxUe9YFr9uzZlQ8//HDfA0cZEvXfD/Zdh/q5hoaGhsYfHyaTSR+4jgCaIzU0NDRODMyZMwcfffTRfg9cY+IfwlOgSgZUFkb7M5Js+Ble/fE5PAGPBY6UoEarg6PbTKXnQL+rfnascDjPHf2ZI3nvsSLxI30OPz/69/b3nof6+YmAg809/lyF2q9H02fqM7g+xmoM1Gftb+4c6nsO9H5H8t5Hsk4O9vmDrcGx6LMjHcuj7ZuxHGcNzZH8mfrZsYLmSM2RgObIQ7XtcP79YM84FDRHHhpjcuCi4U0kEti8ebNcYU6cOBF+vx+xWAy7du3CuHHj0NraWhNslkgksHXrVgDApEmT0NjYeMCgs8MxWIcyTgd7BoMQAUjmk/09Y3+/zyvMI8GxNIxH8uyxJO+v8/lD/d6JSiAqvk4fjUW/qSRyoM0Wr+dVd4H9YfRzVL/s0Z9TN5UHWn/7W+dqG/fXlv0ZSW6GD9X+A70TsK8P+Bx+P11p1D8HAtsx+p3Unx3Oc9gudaPOf+P7H8i2aRwbaI7UHPl1nqM58vChObL25+p/1c9ojtzXrj80R47JgYuN3Lp1K/7sz/4MlUoFfr8fp59+Oq6//nps374dP/nJT7Bs2TIsX75cMhylUincf//9ePnll2EymTB9+nQ88sgjmDx58lcmzOjTMztFnew06OoAArUDQfAzKmKxGN5++23YbDacd955Ehg6+l3VdwZqiUQdMLVt/P7RP9OGUuObDM7fYrEoAbRut7umdksikUChUC2ASl/x/a2bSqWa1YwB3nwOA7jV9WC328VPfX9gMC1jSphyl0Hl5XJZ4jH4TKbDZYrpSqUaM5BIJFCpVNM9MwPV4fYNUC1GGovFUCqV4PP5JCMdM2dxc80A7/0prczgpP5cjblg3Z+6urr9Jq4YfStSqVQkBsZut8MwDIlvsdvtUltF26A/DDRHao7UOD6hOfLgfQNojiTGNOUUs8/MnDkT8+bNw7PPPotyuYxp06Yhk8nUZKoxmUxYtWoVfvOb3+CKK67ASSedhBUrVmDatGl46KGHJMNLPB6H1WpFfX29BFCmUikMDg7C5/NJkcFSqYSGhgaYzWbs2bMHANDc3Ix8Po+9e/eiUqmgoaFB0qH29fXB5XJJMJ/VasWGDRvw85//HG63G6FQCDNnzpS0pqPfM5lMIpVKoa6uDg0NDTCZqnUTEokEcrmcpJjt7++XQEgWiQyHw1IUThOKxjcVNGqFQgGbNm3Cyy+/jD179uDkk0/G1VdfjYaGBqxduxavvPIKstksvvOd7+Dqq69GMBismdd8Tj6fx4YNG/Dqq6+ir68Pp512GpYuXYpkMok33nhD7EOhUMDMmTNx4YUXfmUjBlSN9/bt2/H2228jm83i1FNPxfz582EYBtasWYPf/e53yGazOP/883HBBRdIqtdYLIbVq1dj6dKlkpXqrbfewquvvgqTyYQ5c+bgsssukwxgB1uXbMvIyAjeeecdvPDCCyiVSpgxYwauu+46OJ1OfPrpp3j33XeRz+cxc+ZMnH766fLs0e+VSCSwevVqCQ4uFos49dRTcdppp+HDDz/E//3f/0kb582bJ6R9oBuDnp4erFy5EnPmzMF5552HWCyG5557Dhs3bkQgEMBFF12EefPmCTFpG/SHgeZIzZEaxw80R2qOPBKM6YGLWW0mT56MH/7wh0gmk2hvb0ckEpETJD+Xy+XwxhtvwGw244477kB9fT2effZZ7NixQ9KPrly5Elu2bIHH48GSJUuwePFibNu2DS+88AI6Ojpwyimn4KqrrsJbb72FzZs349Zbb4XdbseDDz6Ik046CTfccAOee+45vPfeeyiVSpg/fz6uuOIK9PX14ZFHHkEkEsHg4CDq6+tRX1+P119/HR0dHbDb7fj5z3+O22+/HWeeeaZkJgGqg/fFF1/gP//zP/H555+jsbERl112GebMmYONGzfi2WefRTwex7x58zBz5ky89tpr6OjoAAAEg0H09PTg/PPPx/e//33JBKSh8U1FpVJNVfz444/jiy++QEtLC/71X/8VHo8HixYtwj333AObzYZp06bhb//2bzE0NIRbb731K1f15XIZX375JR599FF0d3ejqakJv/71r+FyuTBx4kT09PQgm82iq6sLr732Gm655RYhE5VIyuUy9u7diwcffBCbN29GfX09XnrpJdx6660wm814+OGH0djYiMHBQfz0pz9FW1sbJk+ejHfeeQevvfYaXn75ZZx33nkIh8PYuHEjVqxYgWg0inA4jF/84hfw+/24+OKLARyaTAzDwObNm/HDH/4QTU1NmDBhAn7605/C6/Xi9NNPx9///d+jo6MDHo8Hq1evxl133YXzzjsPQ0ND2L59O0KhEJqbm2G1WtHR0YEVK1Zg1qxZQnQ2mw1OpxMrVqxALpeDy+XCG2+8gfvuuw9nnHEGkskkdu7ciWg0isbGRsnmVi6X8cQTT+Af/uEf8KMf/QiLFi3CSy+9hLvvvhuXX345Nm7ciA8//BCPPvooTj31VL2h/QNCc6TmSI3jC5ojD9wvmiNrMaYHLvVq1ev1YvLkydi8eTPi8fhX/Fuz2Sz27NkjKRUB4Oabb4bP50OhUMAvf/lLPPHEE1i4cCHWr1+Pjo4OeL1ePPPMM3j//ffR0tKCJ598EpVKtXja888/j7POOgsOhwNPPfUUfvGLX+DNN9/EnXfeCZ/PBwBYs2YNxo0bh3A4jKeffhpWqxXTp0/HwoULYTKZxCedfx/t7gBUFYiHHnoIr7zyCs455xz87ne/w549e/CjH/0Ijz32GD799FO0tbXh17/+NTZv3oyPP/4Y6XRaaiEUi0Vs3rwZF154IRoaGsay+zU0xgyq8d60aRM+++wzLFu2DJdddhn+5m/+Bu+++y4mTJiAnTt34oEHHsB1112Ht99+G08++SRuvfXWr1zfUwHctm0bbrvtNixYsAB33HEH1q1bh0WLFuGBBx6AxWLBv/3bv2HdunU4//zzv+KHzTb19fXh448/xg9+8APMnTsX9957L959911xnbjrrrtQLpfx53/+53jvvfcwfvx47N69Gxs2bEAulxN17Pnnn8fg4CAee+wxlEol3HDDDVizZg2WLFlySFcCFkZ85pln0Nvbi9/+9rdoaWnBli1b8Pzzz6OpqQk7duzA8uXL0dLSggceeADr16/H2WefjVgshn//93/HmWeeicsvv7wmze3SpUsRCAQQDocxbdo0rFu3Dk6nE3feeSeKxSLuvPNOvP/++zjzzDPR3t6OlStX4rvf/S4WL14s/bVu3TqsXLmypsDjhg0bUKlU8NBDD2Ht2rVYvnw5Pv74YyETjT8MNEdqjtQ4PqA5UnPkkeKYVLGkvyXz1PNKTlXB1L9z4P73f/8X4XAY55xzDv7rv/4Lp512Gh5++GH8/ve/x9q1a7F582Zs3LgR3/ve93Dttdfivvvuw2uvvYbbb78dra2teOmllwAA4XAYF198MR544AEkEgmcfPLJKJfLaG9vl993uVw4++yzccstt6C5uRl+vx8nn3wyvvjiC3g8Htxzzz2YNGmStI3+79lsFqtXr8b8+fPx8MMP47XXXsOGDRvwwQcf4L333sOVV16Jv/7rv8ZPfvITbNmyBUNDQ1iyZAm2bduGXC6HdDqN9vZ2DAwMIBqNHovu19AYM/Aq32QyobGxEX6/H9OnT8ebb76JQqGA1tZWrF27FkC1+OTSpUulAr1qiIeHhxGLxeBwONDW1ob6+nq0tbVh69at6O/vR1NTE/L5PFavXo1x48bhT/7kT6Sex2hEo1H8+Mc/xqxZs9DV1QWTyYS6ujp0dnZi3LhxCAQC8Pv9aGpqwkcffYS//Mu/xLJly9DR0YHt27dL0cOdO3ciEAhg4sSJiMVimDZtGjo7O9Hf349oNHpYqtaOHTsQCoXQ1tYGi8WCBQsWYM2aNbDb7bjjjjswa9YsfPrppzCbzfD7/TCZTAgEArjgggvQ2toqtnHnzp0YHBzE22+/LYUX7777bpx22mm4++67EQqFsGrVKni9XrS0tEg/LFmyBJMmTRJ7mkwm8Y//+I+or6+vcdWaPn067HY7nnnmGXz++edoa2vDtGnTAGh3wj8GNEdqjtQ4PqA5UnPk4WLMD1x0hVi/fj3WrVuHYDCIaDQqxQe7u7thtVrhcDgwefJkbN26FTt27IDf78d//Md/4IILLpCCaiwuZrPZEI/H0dTUJAGCVMJyuRyi0SjmzJmD5557DoODg7j88ssRDAZlQi9atAhWqxUrV66U4D+Xy4UzzzwT8+bNk0nDauKsBt7V1YVoNIrt27fD5/Nh8uTJAKqdzwJ0iUQCu3fvht1uF5WiUCjUBCXTV51BeVQINTS+6RitoFksFkSjUSQSCQDVQqEbN27EwMAAMpkMzjjjDKxatQqbNm2Cw+GQjZjT6cTevXthsVikyGcwGEQymUQ2m4XJZMLu3bvxySef4OKLL4bVasU//dM/yXoh6urqcM011+AHP/gBtm7diieffBLhcBjnnnsunnrqKVl3Pp8PwWAQu3fvluKwJJH9vZfdbkc4HMbOnTsPa6OnboqBfTE3zc3NSCaTcLlcuPbaa7F+/Xr89re/RVtbG84880xYLBb4/X5ccsklAPYpklarFWeffTYWL16MXC6HG2+8EbNnz8aNN96IUCiE1atX44UXXkB9fb2QQGNjI5qamuQ5pVIJv/nNb/DFF1/ggQcewPLly4WsWltbUSqV8Oqrr6K3txcNDQ1obW3Vh60/AjRHao7UOH6gOfLA/QJojlQxpgcuVo3+4IMP0NXVhVQqhWXLlonq9eKLL2LTpk0AgLlz5+Kyyy7De++9h3vuuQdOpxNutxt/8Rd/Abvdjuuuuw6/+tWvcMsttyCVSsHn82HWrFlob2/HK6+8gg0bNqCrqwuXX345WlpacP755+Ppp5/G0NAQLrzwQphMJpx33nlYtWoV1q5dC6fTiWw2i9mzZ0smGDVzkmEYaGxsRFtbG95//3383d/9HebOnYtLL70Ud999NyZOnIjHH38cLpcLF1xwAVatWoWbb74ZX3zxBcaPH49Fixahvb0da9aswY4dO5BIJDB//nx89NFHGBkZQS6XQz6fl/9qNx6NbwvUuVosFtHV1YXGxkZs2rQJu3btwr333ou2tjbs2rULK1euxF/91V8hGo3CZrOJ8bZarUJAhmEgm80iHo8jFApJkOy6deswODgom7/6+noMDw/XGDyHwwGz2YzPPvsMP/vZzxCPx3Hrrbdi6tSpNVnVkskk4vG4qGr7eyd1w5fL5RCLxRAKheD1eg/byPIZ9A3v6OhAMBhEIBDAhg0b8NBDD6FcLuPmm2/GlClTAOzLBGWxWGC321Eul9HW1obbbrsNc+fORU9PD+x2O3p6ehCLxTAwMIBTTz0VV199NZ5//nmsX78e06dPR6FQkBsSfn7lypVwOBzYuXMnhoeHsXHjRqxevRr//d//jSlTpuC2227D1q1b8fjjj+ODDz7ApZdeikqlcsBMVxpjC82RmiM1jj9ojjwwNEfuw5geuAKBAJYsWQKHw4GmpibMmDEDF1xwATZu3Ihzzz0Xg4OD4kpRKpVw+umn48EHH8SLL76IZDKJ+++/H9/97ndhNptx0003wefzYdOmTZg+fTouueQSzJ07F8FgEOFwGJ2dnbjkkktwySWXIBQK4YwzzsCVV16JkZERLFiwAIZhYPHixXj44YexZs0aFAoF3HHHHfje976HZDKJ73//+zjllFNqTvGRSATLly9HMBhEX18fpk2bBo/Hg+nTp6OlpQWVSgV2ux0//vGP0dzcjM2bN2Px4sW46qqrMGvWLIRCIbz00kvo6OjAlVdeiblz56KtrQ0nnXQSotGopK6MxWKygDQ0vskwDANNTU2wWCzYu3cvBgcHsW3bNjQ3N2NwcBDJZBITJkzAnDlzYLfb8cknn+Dcc8/FWWedVfOccrmMN998E2+//TZ27tyJ1tZWdHZ2oqWlBcFgEMPDw1izZg0ikQimT58Oh8OBq6++umazR2QyGTz++OMYGBjAihUrMG/ePJTLZbS0tOCDDz5AIpFAPB5HT08PLr30UlitVhhGta6G2WwW4zlr1iysX78eX375JUqlEj777DPMnTsXoVDosPtn9uzZWLNmDdrb29Ha2orf//73aGtrQ7FYxOOPPw6z2Yzbb78dM2fOlDicrq4uPPbYY5g7dy6WLl0Km82Gf/7nf0apVMK8efMki1tdXR1+9atfYfv27Xj00UexcOFCrFq1Ch0dHTAMA1u3bsVTTz2FhQsX4vzzz5dsd7lcDh9++CHK5TL27NmDXbt24bPPPoPf78ef/umfolAoIJlMoru7ewxmiMaRQHOk5kiN4wuaIw8OzZH7MCYHLhrkKVOm4NFHH4XZbBY1rq6uDmeccQamTp1aU/isrq4O4XAYS5cuxbx581AoFNDQ0ACPx4NKpYJx48bhxhtvRCqVgtPpRCgUgtlsxowZM9DW1oahoSH4/X5JHRsIBHDHHXcAAEKhEEwmE7xeL6666iosXLgQ5XIZwWBQ2nTLLbfA5XLVTFKTyYR58+Zh0qRJyOfzCIVC4mdqs9nkPSdOnIibbroJqVQKDocDkUgEZrMZM2fOxEknnYRsNgu/3w+n04nx48fDZrOJn2i5XEaxWJQJq116NL6JUNMxn3baaTjllFPwxBNP4JVXXsEnn3yCFStWYOrUqXjmmWdw22234dRTT8Unn3yCa665Bna7HVar9SsFBWfMmIHp06fjl7/8JZ577jl0dXXhoosuQigUwvbt27F+/XosWLAAkUgEAOByuWqUQ6rub731Fl588UWcfPLJePPNN/H6669j/vz5OOecc7B27Vrce++9yGQycDqdWLBggaj0w8PDUqvDbDZj6dKlePrpp7Fs2TI0NDQgl8th8eLFEpNyMJcmrtsrr7wSTz31FK6//npMmTIFu3fvxk033YR33nkHr7/+OmbMmIH/+Z//wcsvv4yFCxfirLPOgs1mw4QJExCJRITgvvOd7+Cee+6B1+vFtm3b4PP5sGjRImzZsgWPPPII/H4/gGoihebmZumftrY2BINBmEwmRKNR3H///SgUCshms3jppZcwf/58XHbZZejq6sK//Mu/4MYbb0RnZydcLhemTp06tpNG44DQHKk5UuP4guZIzZFHCvN99913VA944okn7lu+fDkMwxDfS6/XC5fLJQbYbrfD5/PB7/cjEAggEAjA6/VKsLDX64Xf75esRzxp2+12eRYD3sxmMxwOB7xeb00aXcMw4Ha74fF4aq7+LBYL3G53zedJdpzwqoJnNpvhcrng8/mk+JvT6YTT6aypPM22sbgdUCUjp9Mp32UymeBwOGC32+FwOOBwOOB0OmsKs2ky0fgmg+tq/PjxMAwDhUIBF198MZYuXYpJkyZh6tSp4gZ00UUX4aabbhJ3g9F/PB4PJkyYIC4GS5cuxYUXXohAIIBEIoGBgQFceeWVmDhxoqx39Q99sHft2gWz2YxoNCpxIo2NjViwYAHGjx+PgYEB+Hw+3HDDDZg3b56s82QyCb/fjyVLlsgaP/nkk5FOp+HxeHDttddi8eLFNRvHg6FSqRavnTFjBvr6+mA2m3HNNdfg4osvxp49e2C1WhGJRKSNbW1tmDhxIjweD6ZNm4a2tjbY7XYA1YDdpqYmdHd3o6GhAbfffjvOOOMMjB8/HlarFVu2bMHAwACuuOIKXH755airq4PX68W0adPQ3Nwstsrj8SAYDMJms+GDDz7AOeecg3PPPRennHIK6uvrkUgk0NjYiOuvvx7nnHPOYb+rivvvv7/nvvvue+KoJtYJBM2RmiM1jl9ojjwwTkSOfOKJJ3DDDTfcv9+5crR+0rNnz6589NFH0rlf+QLDOGJf7AP9zsH+ffT3H+4zRnfiodp6pG071LM0NL7JYEAvq9aPjIzIpsgwDHEBKpVKsNvtqKurq9l0qc8BqgUbh4eHUSwWZYNlMplEcaqrq5Nq8+oz+PtU4XK5XM1647OKxSKGhoZQqVTgdrvFVQIAhoaGkMvlJBMS03MPDg6iUqmgrq6uZkN7OH0DVP3N0+m0BD7bbDYhELWN3EgCqPGNp6/8yMiIBEB7PB7px2w2K6qj2+2WWweOzf7aWyqVEIvFUFdXB4/Hg3K5jHw+j+HhYclY9XUOW/9/TD6uVCqzj+iXTmBojjz4vx/qWRoa32Rojjx43wAnFkfOnj0bH3300X5/YUwPXBoaGscf1PpANF40dGpgLQBRww/1HOCrWZBUA3u4z+B/+b0Her5hy11OXQAAAj1JREFUVONiRrtBjA4MPlLjyu9i4DPbMrqd/Hc1CcFosH3q+6j9Qhyoffsj79HP589G31ocCfSB68igOVJD4/iG5siD9w3bThzPHHmwA9cxqcOloaFx/EA1yPv798PN3nOw5xzKH3z05/dnBA/VztFkQWJU2/91FfXRbT+YoT7Qv+/vGfvrl8Np4/5uE77OczQ0NDQ0Dg7NkYeG5kh94NLQ0DgMHKlhPFbPOdTnDvbzA5HM0WAsDfLBCHKsnqehoaGhMfbQHPn12nO0z/o2caSuLKihoaGhoaGhoaGhoXGMcNQxXIZhxAB0jE1zNDQ0NDS+4RhfqVQif+xGfFugOVJDQ0PjhMEB+fGoD1waGhoaGhoaGhoaGhoa+4d2KdTQ0NDQ0NDQ0NDQ0DhG0AcuDQ0NDQ0NDQ0NDQ2NYwR94NLQ0NDQ0NDQ0NDQ0DhG0AcuDQ0NDQ0NDQ0NDQ2NYwR94NLQ0NDQ0NDQ0NDQ0DhG0AcuDQ0NDQ0NDQ0NDQ2NY4SjPnAZhnH6WDTkjwXDMOYZhtHwx27H14FhGKd/i9t+tmEYgT92O74uDMM441vc9+d8y/v+rG9r3wPf/v7XODJ8mzlS8+MfD99mjvw28yPw7bfR32aO/Lb3/cGg63BpaGhoaGhoaGhoaGgcI2iXQg0NDQ0NDQ0NDQ0NjWMEfeDS0NDQ0NDQ0NDQ0NA4RtAHLg0NDQ0NDQ0NDQ0NjWMEfeDS0NDQ0NDQ0NDQ0NA4RtAHLg0NDQ0NDQ0NDQ0NjWOE/weewGggFgupFgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [], + "needs_background": "light" + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lMombPr0GF9a", + "colab_type": "text" + }, + "source": [ + "The images used in this demo are from the [Snapshot Serengeti dataset](http://lila.science/datasets/snapshot-serengeti), and released under the [Community Data License Agreement (permissive variant)](https://cdla.io/permissive-1-0/)." + ] + } + ] +} \ No newline at end of file diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..a779528fa76abb1f7b08bdf0fcb7fb417738e3a0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb @@ -0,0 +1,685 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "rOvvWAVTkMR7" + }, + "source": [ + "# Eager Few Shot Object Detection Colab\n", + "\n", + "Welcome to the Eager Few Shot Object Detection Colab --- in this colab we demonstrate fine tuning of a (TF2 friendly) RetinaNet architecture on very few examples of a novel class after initializing from a pre-trained COCO checkpoint.\n", + "Training runs in eager mode.\n", + "\n", + "Estimated time to run through this colab (with GPU): \u003c 5 minutes." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "vPs64QA1Zdov" + }, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "LBZ9VWZZFUCT" + }, + "outputs": [], + "source": [ + "!pip install -U --pre tensorflow==\"2.2.0\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "oi28cqGGFWnY" + }, + "outputs": [], + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "# Clone the tensorflow models repository if it doesn't already exist\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "NwdsBdGhFanc" + }, + "outputs": [], + "source": [ + "# Install the Object Detection API\n", + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=.\n", + "cp object_detection/packages/tf2/setup.py .\n", + "python -m pip install ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "uZcqD4NLdnf4" + }, + "outputs": [], + "source": [ + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import os\n", + "import random\n", + "import io\n", + "import imageio\n", + "import glob\n", + "import scipy.misc\n", + "import numpy as np\n", + "from six import BytesIO\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "from IPython.display import display, Javascript\n", + "from IPython.display import Image as IPyImage\n", + "\n", + "import tensorflow as tf\n", + "\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import config_util\n", + "from object_detection.utils import visualization_utils as viz_utils\n", + "from object_detection.utils import colab_utils\n", + "from object_detection.builders import model_builder\n", + "\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "IogyryF2lFBL" + }, + "source": [ + "# Utilities" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "-y9R0Xllefec" + }, + "outputs": [], + "source": [ + "def load_image_into_numpy_array(path):\n", + " \"\"\"Load an image from file into a numpy array.\n", + "\n", + " Puts image into numpy array to feed into tensorflow graph.\n", + " Note that by convention we put it into a numpy array with shape\n", + " (height, width, channels), where channels=3 for RGB.\n", + "\n", + " Args:\n", + " path: a file path.\n", + "\n", + " Returns:\n", + " uint8 numpy array with shape (img_height, img_width, 3)\n", + " \"\"\"\n", + " img_data = tf.io.gfile.GFile(path, 'rb').read()\n", + " image = Image.open(BytesIO(img_data))\n", + " (im_width, im_height) = image.size\n", + " return np.array(image.getdata()).reshape(\n", + " (im_height, im_width, 3)).astype(np.uint8)\n", + "\n", + "def plot_detections(image_np,\n", + " boxes,\n", + " classes,\n", + " scores,\n", + " category_index,\n", + " figsize=(12, 16),\n", + " image_name=None):\n", + " \"\"\"Wrapper function to visualize detections.\n", + "\n", + " Args:\n", + " image_np: uint8 numpy array with shape (img_height, img_width, 3)\n", + " boxes: a numpy array of shape [N, 4]\n", + " classes: a numpy array of shape [N]. Note that class indices are 1-based,\n", + " and match the keys in the label map.\n", + " scores: a numpy array of shape [N] or None. If scores=None, then\n", + " this function assumes that the boxes to be plotted are groundtruth\n", + " boxes and plot all boxes as black with no classes or scores.\n", + " category_index: a dict containing category dictionaries (each holding\n", + " category index `id` and category name `name`) keyed by category indices.\n", + " figsize: size for the figure.\n", + " image_name: a name for the image file.\n", + " \"\"\"\n", + " image_np_with_annotations = image_np.copy()\n", + " viz_utils.visualize_boxes_and_labels_on_image_array(\n", + " image_np_with_annotations,\n", + " boxes,\n", + " classes,\n", + " scores,\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " min_score_thresh=0.8)\n", + " if image_name:\n", + " plt.imsave(image_name, image_np_with_annotations)\n", + " else:\n", + " plt.imshow(image_np_with_annotations)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "sSaXL28TZfk1" + }, + "source": [ + "# Rubber Ducky data\n", + "\n", + "We will start with some toy (literally) data consisting of 5 images of a rubber\n", + "ducky. Note that the [coco](https://cocodataset.org/#explore) dataset contains a number of animals, but notably, it does *not* contain rubber duckies (or even ducks for that matter), so this is a novel class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "SQy3ND7EpFQM" + }, + "outputs": [], + "source": [ + "# Load images and visualize\n", + "train_image_dir = 'models/research/object_detection/test_images/ducky/train/'\n", + "train_images_np = []\n", + "for i in range(1, 6):\n", + " image_path = os.path.join(train_image_dir, 'robertducky' + str(i) + '.jpg')\n", + " train_images_np.append(load_image_into_numpy_array(image_path))\n", + "\n", + "plt.rcParams['axes.grid'] = False\n", + "plt.rcParams['xtick.labelsize'] = False\n", + "plt.rcParams['ytick.labelsize'] = False\n", + "plt.rcParams['xtick.top'] = False\n", + "plt.rcParams['xtick.bottom'] = False\n", + "plt.rcParams['ytick.left'] = False\n", + "plt.rcParams['ytick.right'] = False\n", + "plt.rcParams['figure.figsize'] = [14, 7]\n", + "\n", + "for idx, train_image_np in enumerate(train_images_np):\n", + " plt.subplot(2, 3, idx+1)\n", + " plt.imshow(train_image_np)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cbKXmQoxcUgE" + }, + "source": [ + "# Annotate images with bounding boxes\n", + "\n", + "In this cell you will annotate the rubber duckies --- draw a box around the rubber ducky in each image; click `next image` to go to the next image and `submit` when there are no more images.\n", + "\n", + "If you'd like to skip the manual annotation step, we totally understand. In this case, simply skip this cell and run the next cell instead, where we've prepopulated the groundtruth with pre-annotated bounding boxes.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "-nEDRoUEcUgL" + }, + "outputs": [], + "source": [ + "gt_boxes = []\n", + "colab_utils.annotate(train_images_np, box_storage_pointer=gt_boxes)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "wTP9AFqecUgS" + }, + "source": [ + "# In case you didn't want to label...\n", + "\n", + "Run this cell only if you didn't annotate anything above and\n", + "would prefer to just use our preannotated boxes. Don't forget\n", + "to uncomment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "wIAT6ZUmdHOC" + }, + "outputs": [], + "source": [ + "# gt_boxes = [\n", + "# np.array([[0.436, 0.591, 0.629, 0.712]], dtype=np.float32),\n", + "# np.array([[0.539, 0.583, 0.73, 0.71]], dtype=np.float32),\n", + "# np.array([[0.464, 0.414, 0.626, 0.548]], dtype=np.float32),\n", + "# np.array([[0.313, 0.308, 0.648, 0.526]], dtype=np.float32),\n", + "# np.array([[0.256, 0.444, 0.484, 0.629]], dtype=np.float32)\n", + "# ]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Dqb_yjAo3cO_" + }, + "source": [ + "# Prepare data for training\n", + "\n", + "Below we add the class annotations (for simplicity, we assume a single class in this colab; though it should be straightforward to extend this to handle multiple classes). We also convert everything to the format that the training\n", + "loop below expects (e.g., everything converted to tensors, classes converted to one-hot representations, etc.)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "HWBqFVMcweF-" + }, + "outputs": [], + "source": [ + "\n", + "# By convention, our non-background classes start counting at 1. Given\n", + "# that we will be predicting just one class, we will therefore assign it a\n", + "# `class id` of 1.\n", + "duck_class_id = 1\n", + "num_classes = 1\n", + "\n", + "category_index = {duck_class_id: {'id': duck_class_id, 'name': 'rubber_ducky'}}\n", + "\n", + "# Convert class labels to one-hot; convert everything to tensors.\n", + "# The `label_id_offset` here shifts all classes by a certain number of indices;\n", + "# we do this here so that the model receives one-hot labels where non-background\n", + "# classes start counting at the zeroth index. This is ordinarily just handled\n", + "# automatically in our training binaries, but we need to reproduce it here.\n", + "label_id_offset = 1\n", + "train_image_tensors = []\n", + "gt_classes_one_hot_tensors = []\n", + "gt_box_tensors = []\n", + "for (train_image_np, gt_box_np) in zip(\n", + " train_images_np, gt_boxes):\n", + " train_image_tensors.append(tf.expand_dims(tf.convert_to_tensor(\n", + " train_image_np, dtype=tf.float32), axis=0))\n", + " gt_box_tensors.append(tf.convert_to_tensor(gt_box_np, dtype=tf.float32))\n", + " zero_indexed_groundtruth_classes = tf.convert_to_tensor(\n", + " np.ones(shape=[gt_box_np.shape[0]], dtype=np.int32) - label_id_offset)\n", + " gt_classes_one_hot_tensors.append(tf.one_hot(\n", + " zero_indexed_groundtruth_classes, num_classes))\n", + "print('Done prepping data.')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "b3_Z3mJWN9KJ" + }, + "source": [ + "# Let's just visualize the rubber duckies as a sanity check\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "YBD6l-E4N71y" + }, + "outputs": [], + "source": [ + "dummy_scores = np.array([1.0], dtype=np.float32) # give boxes a score of 100%\n", + "\n", + "plt.figure(figsize=(30, 15))\n", + "for idx in range(5):\n", + " plt.subplot(2, 3, idx+1)\n", + " plot_detections(\n", + " train_images_np[idx],\n", + " gt_boxes[idx],\n", + " np.ones(shape=[gt_boxes[idx].shape[0]], dtype=np.int32),\n", + " dummy_scores, category_index)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ghDAsqfoZvPh" + }, + "source": [ + "# Create model and restore weights for all but last layer\n", + "\n", + "In this cell we build a single stage detection architecture (RetinaNet) and restore all but the classification layer at the top (which will be automatically randomly initialized).\n", + "\n", + "For simplicity, we have hardcoded a number of things in this colab for the specific RetinaNet architecture at hand (including assuming that the image size will always be 640x640), however it is not difficult to generalize to other model configurations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "9J16r3NChD-7" + }, + "outputs": [], + "source": [ + "# Download the checkpoint and put it into models/research/object_detection/test_data/\n", + "\n", + "!wget http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz\n", + "!tar -xf ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz\n", + "!mv ssd_resnet50_v1_fpn_640x640_coco17_tpu-8/checkpoint models/research/object_detection/test_data/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "RyT4BUbaMeG-" + }, + "outputs": [], + "source": [ + "tf.keras.backend.clear_session()\n", + "\n", + "print('Building model and restoring weights for fine-tuning...', flush=True)\n", + "num_classes = 1\n", + "pipeline_config = 'models/research/object_detection/configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config'\n", + "checkpoint_path = 'models/research/object_detection/test_data/checkpoint/ckpt-0'\n", + "\n", + "# Load pipeline config and build a detection model.\n", + "#\n", + "# Since we are working off of a COCO architecture which predicts 90\n", + "# class slots by default, we override the `num_classes` field here to be just\n", + "# one (for our new rubber ducky class).\n", + "configs = config_util.get_configs_from_pipeline_file(pipeline_config)\n", + "model_config = configs['model']\n", + "model_config.ssd.num_classes = num_classes\n", + "model_config.ssd.freeze_batchnorm = True\n", + "detection_model = model_builder.build(\n", + " model_config=model_config, is_training=True)\n", + "\n", + "# Set up object-based checkpoint restore --- RetinaNet has two prediction\n", + "# `heads` --- one for classification, the other for box regression. We will\n", + "# restore the box regression head but initialize the classification head\n", + "# from scratch (we show the omission below by commenting out the line that\n", + "# we would add if we wanted to restore both heads)\n", + "fake_box_predictor = tf.compat.v2.train.Checkpoint(\n", + " _base_tower_layers_for_heads=detection_model._box_predictor._base_tower_layers_for_heads,\n", + " # _prediction_heads=detection_model._box_predictor._prediction_heads,\n", + " # (i.e., the classification head that we *will not* restore)\n", + " _box_prediction_head=detection_model._box_predictor._box_prediction_head,\n", + " )\n", + "fake_model = tf.compat.v2.train.Checkpoint(\n", + " _feature_extractor=detection_model._feature_extractor,\n", + " _box_predictor=fake_box_predictor)\n", + "ckpt = tf.compat.v2.train.Checkpoint(model=fake_model)\n", + "ckpt.restore(checkpoint_path).expect_partial()\n", + "\n", + "# Run model through a dummy image so that variables are created\n", + "image, shapes = detection_model.preprocess(tf.zeros([1, 640, 640, 3]))\n", + "prediction_dict = detection_model.predict(image, shapes)\n", + "_ = detection_model.postprocess(prediction_dict, shapes)\n", + "print('Weights restored!')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pCkWmdoZZ0zJ" + }, + "source": [ + "# Eager mode custom training loop\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nyHoF4mUrv5-" + }, + "outputs": [], + "source": [ + "tf.keras.backend.set_learning_phase(True)\n", + "\n", + "# These parameters can be tuned; since our training set has 5 images\n", + "# it doesn't make sense to have a much larger batch size, though we could\n", + "# fit more examples in memory if we wanted to.\n", + "batch_size = 4\n", + "learning_rate = 0.01\n", + "num_batches = 100\n", + "\n", + "# Select variables in top layers to fine-tune.\n", + "trainable_variables = detection_model.trainable_variables\n", + "to_fine_tune = []\n", + "prefixes_to_train = [\n", + " 'WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead',\n", + " 'WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead']\n", + "for var in trainable_variables:\n", + " if any([var.name.startswith(prefix) for prefix in prefixes_to_train]):\n", + " to_fine_tune.append(var)\n", + "\n", + "# Set up forward + backward pass for a single train step.\n", + "def get_model_train_step_function(model, optimizer, vars_to_fine_tune):\n", + " \"\"\"Get a tf.function for training step.\"\"\"\n", + "\n", + " # Use tf.function for a bit of speed.\n", + " # Comment out the tf.function decorator if you want the inside of the\n", + " # function to run eagerly.\n", + " @tf.function\n", + " def train_step_fn(image_tensors,\n", + " groundtruth_boxes_list,\n", + " groundtruth_classes_list):\n", + " \"\"\"A single training iteration.\n", + "\n", + " Args:\n", + " image_tensors: A list of [1, height, width, 3] Tensor of type tf.float32.\n", + " Note that the height and width can vary across images, as they are\n", + " reshaped within this function to be 640x640.\n", + " groundtruth_boxes_list: A list of Tensors of shape [N_i, 4] with type\n", + " tf.float32 representing groundtruth boxes for each image in the batch.\n", + " groundtruth_classes_list: A list of Tensors of shape [N_i, num_classes]\n", + " with type tf.float32 representing groundtruth boxes for each image in\n", + " the batch.\n", + "\n", + " Returns:\n", + " A scalar tensor representing the total loss for the input batch.\n", + " \"\"\"\n", + " shapes = tf.constant(batch_size * [[640, 640, 3]], dtype=tf.int32)\n", + " model.provide_groundtruth(\n", + " groundtruth_boxes_list=groundtruth_boxes_list,\n", + " groundtruth_classes_list=groundtruth_classes_list)\n", + " with tf.GradientTape() as tape:\n", + " preprocessed_images = tf.concat(\n", + " [detection_model.preprocess(image_tensor)[0]\n", + " for image_tensor in image_tensors], axis=0)\n", + " prediction_dict = model.predict(preprocessed_images, shapes)\n", + " losses_dict = model.loss(prediction_dict, shapes)\n", + " total_loss = losses_dict['Loss/localization_loss'] + losses_dict['Loss/classification_loss']\n", + " gradients = tape.gradient(total_loss, vars_to_fine_tune)\n", + " optimizer.apply_gradients(zip(gradients, vars_to_fine_tune))\n", + " return total_loss\n", + "\n", + " return train_step_fn\n", + "\n", + "optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)\n", + "train_step_fn = get_model_train_step_function(\n", + " detection_model, optimizer, to_fine_tune)\n", + "\n", + "print('Start fine-tuning!', flush=True)\n", + "for idx in range(num_batches):\n", + " # Grab keys for a random subset of examples\n", + " all_keys = list(range(len(train_images_np)))\n", + " random.shuffle(all_keys)\n", + " example_keys = all_keys[:batch_size]\n", + "\n", + " # Note that we do not do data augmentation in this demo. If you want a\n", + " # a fun exercise, we recommend experimenting with random horizontal flipping\n", + " # and random cropping :)\n", + " gt_boxes_list = [gt_box_tensors[key] for key in example_keys]\n", + " gt_classes_list = [gt_classes_one_hot_tensors[key] for key in example_keys]\n", + " image_tensors = [train_image_tensors[key] for key in example_keys]\n", + "\n", + " # Training step (forward pass + backwards pass)\n", + " total_loss = train_step_fn(image_tensors, gt_boxes_list, gt_classes_list)\n", + "\n", + " if idx % 10 == 0:\n", + " print('batch ' + str(idx) + ' of ' + str(num_batches)\n", + " + ', loss=' + str(total_loss.numpy()), flush=True)\n", + "\n", + "print('Done fine-tuning!')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "WHlXL1x_Z3tc" + }, + "source": [ + "# Load test images and run inference with new model!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "WcE6OwrHQJya" + }, + "outputs": [], + "source": [ + "test_image_dir = 'models/research/object_detection/test_images/ducky/test/'\n", + "test_images_np = []\n", + "for i in range(1, 50):\n", + " image_path = os.path.join(test_image_dir, 'out' + str(i) + '.jpg')\n", + " test_images_np.append(np.expand_dims(\n", + " load_image_into_numpy_array(image_path), axis=0))\n", + "\n", + "# Again, uncomment this decorator if you want to run inference eagerly\n", + "@tf.function\n", + "def detect(input_tensor):\n", + " \"\"\"Run detection on an input image.\n", + "\n", + " Args:\n", + " input_tensor: A [1, height, width, 3] Tensor of type tf.float32.\n", + " Note that height and width can be anything since the image will be\n", + " immediately resized according to the needs of the model within this\n", + " function.\n", + "\n", + " Returns:\n", + " A dict containing 3 Tensors (`detection_boxes`, `detection_classes`,\n", + " and `detection_scores`).\n", + " \"\"\"\n", + " preprocessed_image, shapes = detection_model.preprocess(input_tensor)\n", + " prediction_dict = detection_model.predict(preprocessed_image, shapes)\n", + " return detection_model.postprocess(prediction_dict, shapes)\n", + "\n", + "# Note that the first frame will trigger tracing of the tf.function, which will\n", + "# take some time, after which inference should be fast.\n", + "\n", + "label_id_offset = 1\n", + "for i in range(len(test_images_np)):\n", + " input_tensor = tf.convert_to_tensor(test_images_np[i], dtype=tf.float32)\n", + " detections = detect(input_tensor)\n", + "\n", + " plot_detections(\n", + " test_images_np[i][0],\n", + " detections['detection_boxes'][0].numpy(),\n", + " detections['detection_classes'][0].numpy().astype(np.uint32)\n", + " + label_id_offset,\n", + " detections['detection_scores'][0].numpy(),\n", + " category_index, figsize=(15, 20), image_name=\"gif_frame_\" + ('%02d' % i) + \".jpg\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "RW1FrT2iNnpy" + }, + "outputs": [], + "source": [ + "imageio.plugins.freeimage.download()\n", + "\n", + "anim_file = 'duckies_test.gif'\n", + "\n", + "filenames = glob.glob('gif_frame_*.jpg')\n", + "filenames = sorted(filenames)\n", + "last = -1\n", + "images = []\n", + "for filename in filenames:\n", + " image = imageio.imread(filename)\n", + " images.append(image)\n", + "\n", + "imageio.mimsave(anim_file, images, 'GIF-FI', fps=5)\n", + "\n", + "display(IPyImage(open(anim_file, 'rb').read()))" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "interactive_eager_few_shot_od_training_colab.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/eager_few_shot_od_training_tflite.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/eager_few_shot_od_training_tflite.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..b47d4bdb4f1bc81c8e8727721fbb9db732de8e22 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/eager_few_shot_od_training_tflite.ipynb @@ -0,0 +1,730 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "rOvvWAVTkMR7" + }, + "source": [ + "# Introduction\n", + "\n", + "Welcome to the **Few Shot Object Detection for TensorFlow Lite** Colab. Here, we demonstrate fine tuning of a SSD architecture (pre-trained on COCO) on very few examples of a *novel* class. We will then generate a (downloadable) TensorFlow Lite model for on-device inference.\n", + "\n", + "**NOTE:** This Colab is meant for the few-shot detection use-case. To train a model on a large dataset, please follow the [TF2 training](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_training_and_evaluation.md#training) documentation and then [convert](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/running_on_mobile_tf2.md) the model to TensorFlow Lite." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3U2sv0upw04O" + }, + "source": [ + "# Set Up" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vPs64QA1Zdov" + }, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "H0rKBV4uZacD" + }, + "outputs": [], + "source": [ + "# Support for TF2 models was added after TF 2.3.\n", + "!pip install tf-nightly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oi28cqGGFWnY" + }, + "outputs": [], + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "# Clone the tensorflow models repository if it doesn't already exist\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NwdsBdGhFanc" + }, + "outputs": [], + "source": [ + "# Install the Object Detection API\n", + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=.\n", + "cp object_detection/packages/tf2/setup.py .\n", + "python -m pip install ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uZcqD4NLdnf4" + }, + "outputs": [], + "source": [ + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import os\n", + "import random\n", + "import io\n", + "import imageio\n", + "import glob\n", + "import scipy.misc\n", + "import numpy as np\n", + "from six import BytesIO\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "from IPython.display import display, Javascript\n", + "from IPython.display import Image as IPyImage\n", + "\n", + "import tensorflow as tf\n", + "\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import config_util\n", + "from object_detection.utils import visualization_utils as viz_utils\n", + "from object_detection.utils import colab_utils\n", + "from object_detection.utils import config_util\n", + "from object_detection.builders import model_builder\n", + "\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IogyryF2lFBL" + }, + "source": [ + "##Utilities" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-y9R0Xllefec" + }, + "outputs": [], + "source": [ + "def load_image_into_numpy_array(path):\n", + " \"\"\"Load an image from file into a numpy array.\n", + "\n", + " Puts image into numpy array to feed into tensorflow graph.\n", + " Note that by convention we put it into a numpy array with shape\n", + " (height, width, channels), where channels=3 for RGB.\n", + "\n", + " Args:\n", + " path: a file path.\n", + "\n", + " Returns:\n", + " uint8 numpy array with shape (img_height, img_width, 3)\n", + " \"\"\"\n", + " img_data = tf.io.gfile.GFile(path, 'rb').read()\n", + " image = Image.open(BytesIO(img_data))\n", + " (im_width, im_height) = image.size\n", + " return np.array(image.getdata()).reshape(\n", + " (im_height, im_width, 3)).astype(np.uint8)\n", + "\n", + "def plot_detections(image_np,\n", + " boxes,\n", + " classes,\n", + " scores,\n", + " category_index,\n", + " figsize=(12, 16),\n", + " image_name=None):\n", + " \"\"\"Wrapper function to visualize detections.\n", + "\n", + " Args:\n", + " image_np: uint8 numpy array with shape (img_height, img_width, 3)\n", + " boxes: a numpy array of shape [N, 4]\n", + " classes: a numpy array of shape [N]. Note that class indices are 1-based,\n", + " and match the keys in the label map.\n", + " scores: a numpy array of shape [N] or None. If scores=None, then\n", + " this function assumes that the boxes to be plotted are groundtruth\n", + " boxes and plot all boxes as black with no classes or scores.\n", + " category_index: a dict containing category dictionaries (each holding\n", + " category index `id` and category name `name`) keyed by category indices.\n", + " figsize: size for the figure.\n", + " image_name: a name for the image file.\n", + " \"\"\"\n", + " image_np_with_annotations = image_np.copy()\n", + " viz_utils.visualize_boxes_and_labels_on_image_array(\n", + " image_np_with_annotations,\n", + " boxes,\n", + " classes,\n", + " scores,\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " min_score_thresh=0.8)\n", + " if image_name:\n", + " plt.imsave(image_name, image_np_with_annotations)\n", + " else:\n", + " plt.imshow(image_np_with_annotations)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sSaXL28TZfk1" + }, + "source": [ + "## Rubber Ducky data\n", + "\n", + "We will start with some toy data consisting of 5 images of a rubber\n", + "ducky. Note that the [COCO](https://cocodataset.org/#explore) dataset contains a number of animals, but notably, it does *not* contain rubber duckies (or even ducks for that matter), so this is a novel class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SQy3ND7EpFQM" + }, + "outputs": [], + "source": [ + "# Load images and visualize\n", + "train_image_dir = 'models/research/object_detection/test_images/ducky/train/'\n", + "train_images_np = []\n", + "for i in range(1, 6):\n", + " image_path = os.path.join(train_image_dir, 'robertducky' + str(i) + '.jpg')\n", + " train_images_np.append(load_image_into_numpy_array(image_path))\n", + "\n", + "plt.rcParams['axes.grid'] = False\n", + "plt.rcParams['xtick.labelsize'] = False\n", + "plt.rcParams['ytick.labelsize'] = False\n", + "plt.rcParams['xtick.top'] = False\n", + "plt.rcParams['xtick.bottom'] = False\n", + "plt.rcParams['ytick.left'] = False\n", + "plt.rcParams['ytick.right'] = False\n", + "plt.rcParams['figure.figsize'] = [14, 7]\n", + "\n", + "for idx, train_image_np in enumerate(train_images_np):\n", + " plt.subplot(2, 3, idx+1)\n", + " plt.imshow(train_image_np)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LbOe9Ym7xMGV" + }, + "source": [ + "# Transfer Learning\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Dqb_yjAo3cO_" + }, + "source": [ + "## Data Preparation\n", + "\n", + "First, we populate the groundtruth with pre-annotated bounding boxes.\n", + "\n", + "We then add the class annotations (for simplicity, we assume a single 'Duck' class in this colab; though it should be straightforward to extend this to handle multiple classes). We also convert everything to the format that the training\n", + "loop below expects (e.g., everything converted to tensors, classes converted to one-hot representations, etc.)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wIAT6ZUmdHOC" + }, + "outputs": [], + "source": [ + "gt_boxes = [\n", + " np.array([[0.436, 0.591, 0.629, 0.712]], dtype=np.float32),\n", + " np.array([[0.539, 0.583, 0.73, 0.71]], dtype=np.float32),\n", + " np.array([[0.464, 0.414, 0.626, 0.548]], dtype=np.float32),\n", + " np.array([[0.313, 0.308, 0.648, 0.526]], dtype=np.float32),\n", + " np.array([[0.256, 0.444, 0.484, 0.629]], dtype=np.float32)\n", + "]\n", + "\n", + "# By convention, our non-background classes start counting at 1. Given\n", + "# that we will be predicting just one class, we will therefore assign it a\n", + "# `class id` of 1.\n", + "duck_class_id = 1\n", + "num_classes = 1\n", + "\n", + "category_index = {duck_class_id: {'id': duck_class_id, 'name': 'rubber_ducky'}}\n", + "\n", + "# Convert class labels to one-hot; convert everything to tensors.\n", + "# The `label_id_offset` here shifts all classes by a certain number of indices;\n", + "# we do this here so that the model receives one-hot labels where non-background\n", + "# classes start counting at the zeroth index. This is ordinarily just handled\n", + "# automatically in our training binaries, but we need to reproduce it here.\n", + "label_id_offset = 1\n", + "train_image_tensors = []\n", + "gt_classes_one_hot_tensors = []\n", + "gt_box_tensors = []\n", + "for (train_image_np, gt_box_np) in zip(\n", + " train_images_np, gt_boxes):\n", + " train_image_tensors.append(tf.expand_dims(tf.convert_to_tensor(\n", + " train_image_np, dtype=tf.float32), axis=0))\n", + " gt_box_tensors.append(tf.convert_to_tensor(gt_box_np, dtype=tf.float32))\n", + " zero_indexed_groundtruth_classes = tf.convert_to_tensor(\n", + " np.ones(shape=[gt_box_np.shape[0]], dtype=np.int32) - label_id_offset)\n", + " gt_classes_one_hot_tensors.append(tf.one_hot(\n", + " zero_indexed_groundtruth_classes, num_classes))\n", + "print('Done prepping data.')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b3_Z3mJWN9KJ" + }, + "source": [ + "Let's just visualize the rubber duckies as a sanity check\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YBD6l-E4N71y" + }, + "outputs": [], + "source": [ + "dummy_scores = np.array([1.0], dtype=np.float32) # give boxes a score of 100%\n", + "\n", + "plt.figure(figsize=(30, 15))\n", + "for idx in range(5):\n", + " plt.subplot(2, 3, idx+1)\n", + " plot_detections(\n", + " train_images_np[idx],\n", + " gt_boxes[idx],\n", + " np.ones(shape=[gt_boxes[idx].shape[0]], dtype=np.int32),\n", + " dummy_scores, category_index)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ghDAsqfoZvPh" + }, + "source": [ + "## Load mobile-friendly model\n", + "\n", + "In this cell we build a mobile-friendly single-stage detection architecture (SSD MobileNet V2 FPN-Lite) and restore all but the classification layer at the top (which will be randomly initialized).\n", + "\n", + "**NOTE**: TensorFlow Lite only supports SSD models for now.\n", + "\n", + "For simplicity, we have hardcoded a number of things in this colab for the specific SSD architecture at hand (including assuming that the image size will always be 320x320), however it is not difficult to generalize to other model configurations (`pipeline.config` in the zip downloaded from the [Model Zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.)).\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9J16r3NChD-7" + }, + "outputs": [], + "source": [ + "# Download the checkpoint and put it into models/research/object_detection/test_data/\n", + "\n", + "!wget http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n", + "!tar -xf ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz\n", + "!if [ -d \"models/research/object_detection/test_data/checkpoint\" ]; then rm -Rf models/research/object_detection/test_data/checkpoint; fi\n", + "!mkdir models/research/object_detection/test_data/checkpoint\n", + "!mv ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint models/research/object_detection/test_data/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RyT4BUbaMeG-" + }, + "outputs": [], + "source": [ + "tf.keras.backend.clear_session()\n", + "\n", + "print('Building model and restoring weights for fine-tuning...', flush=True)\n", + "num_classes = 1\n", + "pipeline_config = 'models/research/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config'\n", + "checkpoint_path = 'models/research/object_detection/test_data/checkpoint/ckpt-0'\n", + "\n", + "# This will be where we save checkpoint \u0026 config for TFLite conversion later.\n", + "output_directory = 'output/'\n", + "output_checkpoint_dir = os.path.join(output_directory, 'checkpoint')\n", + "\n", + "# Load pipeline config and build a detection model.\n", + "#\n", + "# Since we are working off of a COCO architecture which predicts 90\n", + "# class slots by default, we override the `num_classes` field here to be just\n", + "# one (for our new rubber ducky class).\n", + "configs = config_util.get_configs_from_pipeline_file(pipeline_config)\n", + "model_config = configs['model']\n", + "model_config.ssd.num_classes = num_classes\n", + "model_config.ssd.freeze_batchnorm = True\n", + "detection_model = model_builder.build(\n", + " model_config=model_config, is_training=True)\n", + "# Save new pipeline config\n", + "pipeline_proto = config_util.create_pipeline_proto_from_configs(configs)\n", + "config_util.save_pipeline_config(pipeline_proto, output_directory)\n", + "\n", + "# Set up object-based checkpoint restore --- SSD has two prediction\n", + "# `heads` --- one for classification, the other for box regression. We will\n", + "# restore the box regression head but initialize the classification head\n", + "# from scratch (we show the omission below by commenting out the line that\n", + "# we would add if we wanted to restore both heads)\n", + "fake_box_predictor = tf.compat.v2.train.Checkpoint(\n", + " _base_tower_layers_for_heads=detection_model._box_predictor._base_tower_layers_for_heads,\n", + " # _prediction_heads=detection_model._box_predictor._prediction_heads,\n", + " # (i.e., the classification head that we *will not* restore)\n", + " _box_prediction_head=detection_model._box_predictor._box_prediction_head,\n", + " )\n", + "fake_model = tf.compat.v2.train.Checkpoint(\n", + " _feature_extractor=detection_model._feature_extractor,\n", + " _box_predictor=fake_box_predictor)\n", + "ckpt = tf.compat.v2.train.Checkpoint(model=fake_model)\n", + "ckpt.restore(checkpoint_path).expect_partial()\n", + "\n", + "# To save checkpoint for TFLite conversion.\n", + "exported_ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\n", + "ckpt_manager = tf.train.CheckpointManager(\n", + " exported_ckpt, output_checkpoint_dir, max_to_keep=1)\n", + "\n", + "# Run model through a dummy image so that variables are created\n", + "image, shapes = detection_model.preprocess(tf.zeros([1, 320, 320, 3]))\n", + "prediction_dict = detection_model.predict(image, shapes)\n", + "_ = detection_model.postprocess(prediction_dict, shapes)\n", + "print('Weights restored!')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pCkWmdoZZ0zJ" + }, + "source": [ + "## Eager training loop (Fine-tuning)\n", + "\n", + "Some of the parameters in this block have been set empirically: for example, `learning_rate`, `num_batches` \u0026 `momentum` for SGD. These are just a starting point, you will have to tune these for your data \u0026 model architecture to get the best results.\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nyHoF4mUrv5-" + }, + "outputs": [], + "source": [ + "tf.keras.backend.set_learning_phase(True)\n", + "\n", + "# These parameters can be tuned; since our training set has 5 images\n", + "# it doesn't make sense to have a much larger batch size, though we could\n", + "# fit more examples in memory if we wanted to.\n", + "batch_size = 5\n", + "learning_rate = 0.15\n", + "num_batches = 1000\n", + "\n", + "# Select variables in top layers to fine-tune.\n", + "trainable_variables = detection_model.trainable_variables\n", + "to_fine_tune = []\n", + "prefixes_to_train = [\n", + " 'WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalBoxHead',\n", + " 'WeightSharedConvolutionalBoxPredictor/WeightSharedConvolutionalClassHead']\n", + "for var in trainable_variables:\n", + " if any([var.name.startswith(prefix) for prefix in prefixes_to_train]):\n", + " to_fine_tune.append(var)\n", + "\n", + "# Set up forward + backward pass for a single train step.\n", + "def get_model_train_step_function(model, optimizer, vars_to_fine_tune):\n", + " \"\"\"Get a tf.function for training step.\"\"\"\n", + "\n", + " # Use tf.function for a bit of speed.\n", + " # Comment out the tf.function decorator if you want the inside of the\n", + " # function to run eagerly.\n", + " @tf.function\n", + " def train_step_fn(image_tensors,\n", + " groundtruth_boxes_list,\n", + " groundtruth_classes_list):\n", + " \"\"\"A single training iteration.\n", + "\n", + " Args:\n", + " image_tensors: A list of [1, height, width, 3] Tensor of type tf.float32.\n", + " Note that the height and width can vary across images, as they are\n", + " reshaped within this function to be 320x320.\n", + " groundtruth_boxes_list: A list of Tensors of shape [N_i, 4] with type\n", + " tf.float32 representing groundtruth boxes for each image in the batch.\n", + " groundtruth_classes_list: A list of Tensors of shape [N_i, num_classes]\n", + " with type tf.float32 representing groundtruth boxes for each image in\n", + " the batch.\n", + "\n", + " Returns:\n", + " A scalar tensor representing the total loss for the input batch.\n", + " \"\"\"\n", + " shapes = tf.constant(batch_size * [[320, 320, 3]], dtype=tf.int32)\n", + " model.provide_groundtruth(\n", + " groundtruth_boxes_list=groundtruth_boxes_list,\n", + " groundtruth_classes_list=groundtruth_classes_list)\n", + " with tf.GradientTape() as tape:\n", + " preprocessed_images = tf.concat(\n", + " [detection_model.preprocess(image_tensor)[0]\n", + " for image_tensor in image_tensors], axis=0)\n", + " prediction_dict = model.predict(preprocessed_images, shapes)\n", + " losses_dict = model.loss(prediction_dict, shapes)\n", + " total_loss = losses_dict['Loss/localization_loss'] + losses_dict['Loss/classification_loss']\n", + " gradients = tape.gradient(total_loss, vars_to_fine_tune)\n", + " optimizer.apply_gradients(zip(gradients, vars_to_fine_tune))\n", + " return total_loss\n", + "\n", + " return train_step_fn\n", + "\n", + "optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)\n", + "train_step_fn = get_model_train_step_function(\n", + " detection_model, optimizer, to_fine_tune)\n", + "\n", + "print('Start fine-tuning!', flush=True)\n", + "for idx in range(num_batches):\n", + " # Grab keys for a random subset of examples\n", + " all_keys = list(range(len(train_images_np)))\n", + " random.shuffle(all_keys)\n", + " example_keys = all_keys[:batch_size]\n", + "\n", + " # Note that we do not do data augmentation in this demo. If you want a\n", + " # a fun exercise, we recommend experimenting with random horizontal flipping\n", + " # and random cropping :)\n", + " gt_boxes_list = [gt_box_tensors[key] for key in example_keys]\n", + " gt_classes_list = [gt_classes_one_hot_tensors[key] for key in example_keys]\n", + " image_tensors = [train_image_tensors[key] for key in example_keys]\n", + "\n", + " # Training step (forward pass + backwards pass)\n", + " total_loss = train_step_fn(image_tensors, gt_boxes_list, gt_classes_list)\n", + "\n", + " if idx % 100 == 0:\n", + " print('batch ' + str(idx) + ' of ' + str(num_batches)\n", + " + ', loss=' + str(total_loss.numpy()), flush=True)\n", + "\n", + "print('Done fine-tuning!')\n", + "\n", + "ckpt_manager.save()\n", + "print('Checkpoint saved!')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cYk1_9Fc2lZO" + }, + "source": [ + "# Export \u0026 run with TensorFlow Lite\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y0nsDVEd9SuX" + }, + "source": [ + "## Model Conversion\n", + "\n", + "First, we invoke the `export_tflite_graph_tf2.py` script to generate a TFLite-friendly intermediate SavedModel. This will then be passed to the TensorFlow Lite Converter for generating the final model.\n", + "\n", + "To know more about this process, please look at [this documentation](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/running_on_mobile_tf2.md)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dyrqHSQQ7WKE" + }, + "outputs": [], + "source": [ + "%%bash\n", + "python models/research/object_detection/export_tflite_graph_tf2.py \\\n", + " --pipeline_config_path output/pipeline.config \\\n", + " --trained_checkpoint_dir output/checkpoint \\\n", + " --output_directory tflite" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "m5hjPyR78bgs" + }, + "outputs": [], + "source": [ + "!tflite_convert --saved_model_dir=tflite/saved_model --output_file=tflite/model.tflite" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WHlXL1x_Z3tc" + }, + "source": [ + "## Test .tflite model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WcE6OwrHQJya" + }, + "outputs": [], + "source": [ + "test_image_dir = 'models/research/object_detection/test_images/ducky/test/'\n", + "test_images_np = []\n", + "for i in range(1, 50):\n", + " image_path = os.path.join(test_image_dir, 'out' + str(i) + '.jpg')\n", + " test_images_np.append(np.expand_dims(\n", + " load_image_into_numpy_array(image_path), axis=0))\n", + "\n", + "# Again, uncomment this decorator if you want to run inference eagerly\n", + "def detect(interpreter, input_tensor):\n", + " \"\"\"Run detection on an input image.\n", + "\n", + " Args:\n", + " interpreter: tf.lite.Interpreter\n", + " input_tensor: A [1, height, width, 3] Tensor of type tf.float32.\n", + " Note that height and width can be anything since the image will be\n", + " immediately resized according to the needs of the model within this\n", + " function.\n", + "\n", + " Returns:\n", + " A dict containing 3 Tensors (`detection_boxes`, `detection_classes`,\n", + " and `detection_scores`).\n", + " \"\"\"\n", + " input_details = interpreter.get_input_details()\n", + " output_details = interpreter.get_output_details()\n", + "\n", + " # We use the original model for pre-processing, since the TFLite model doesn't\n", + " # include pre-processing.\n", + " preprocessed_image, shapes = detection_model.preprocess(input_tensor)\n", + " interpreter.set_tensor(input_details[0]['index'], preprocessed_image.numpy())\n", + "\n", + " interpreter.invoke()\n", + "\n", + " boxes = interpreter.get_tensor(output_details[0]['index'])\n", + " classes = interpreter.get_tensor(output_details[1]['index'])\n", + " scores = interpreter.get_tensor(output_details[2]['index'])\n", + " return boxes, classes, scores\n", + "\n", + "# Load the TFLite model and allocate tensors.\n", + "interpreter = tf.lite.Interpreter(model_path=\"tflite/model.tflite\")\n", + "interpreter.allocate_tensors()\n", + "\n", + "# Note that the first frame will trigger tracing of the tf.function, which will\n", + "# take some time, after which inference should be fast.\n", + "\n", + "label_id_offset = 1\n", + "for i in range(len(test_images_np)):\n", + " input_tensor = tf.convert_to_tensor(test_images_np[i], dtype=tf.float32)\n", + " boxes, classes, scores = detect(interpreter, input_tensor)\n", + "\n", + " plot_detections(\n", + " test_images_np[i][0],\n", + " boxes[0],\n", + " classes[0].astype(np.uint32) + label_id_offset,\n", + " scores[0],\n", + " category_index, figsize=(15, 20), image_name=\"gif_frame_\" + ('%02d' % i) + \".jpg\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZkMPOSQE0x8C" + }, + "outputs": [], + "source": [ + "imageio.plugins.freeimage.download()\n", + "\n", + "anim_file = 'duckies_test.gif'\n", + "\n", + "filenames = glob.glob('gif_frame_*.jpg')\n", + "filenames = sorted(filenames)\n", + "last = -1\n", + "images = []\n", + "for filename in filenames:\n", + " image = imageio.imread(filename)\n", + " images.append(image)\n", + "\n", + "imageio.mimsave(anim_file, images, 'GIF-FI', fps=5)\n", + "\n", + "display(IPyImage(open(anim_file, 'rb').read()))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yzaHWsS58_PQ" + }, + "source": [ + "## (Optional) Download model\n", + "\n", + "This model can be run on-device with **TensorFlow Lite**. Look at [our SSD model signature](https://www.tensorflow.org/lite/models/object_detection/overview#uses_and_limitations) to understand how to interpret the model IO tensors. Our [Object Detection example](https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection) is a good starting point for integrating the model into your mobile app.\n", + "\n", + "Refer to TFLite's [inference documentation](https://www.tensorflow.org/lite/guide/inference) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gZ6vac3RAY3j" + }, + "outputs": [], + "source": [ + "from google.colab import files\n", + "files.download('tflite/model.tflite') " + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "eager_few_shot_od_training_tflite.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/inference_from_saved_model_tf2_colab.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/inference_from_saved_model_tf2_colab.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1e88f4c5d52435297bd2ba8c8bb47a2f3c346f30 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/inference_from_saved_model_tf2_colab.ipynb @@ -0,0 +1,313 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "inference_from_saved_model_tf2_colab.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cT5cdSLPX0ui" + }, + "source": [ + "# Intro to Object Detection Colab\n", + "\n", + "Welcome to the object detection colab! This demo will take you through the steps of running an \"out-of-the-box\" detection model in SavedModel format on a collection of images.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "vPs64QA1Zdov" + }, + "source": [ + "Imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "OBzb04bdNGM8", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install -U --pre tensorflow==\"2.2.0\"" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "NgSXyvKSNHIl", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "# Clone the tensorflow models repository if it doesn't already exist\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "rhpPgW7TNLs6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Install the Object Detection API\n", + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=.\n", + "cp object_detection/packages/tf2/setup.py .\n", + "python -m pip install ." + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "yn5_uV1HLvaz", + "colab": {} + }, + "source": [ + "import io\n", + "import os\n", + "import scipy.misc\n", + "import numpy as np\n", + "import six\n", + "import time\n", + "\n", + "from six import BytesIO\n", + "\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "\n", + "import tensorflow as tf\n", + "from object_detection.utils import visualization_utils as viz_utils\n", + "\n", + "%matplotlib inline" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "-y9R0Xllefec", + "colab": {} + }, + "source": [ + "def load_image_into_numpy_array(path):\n", + " \"\"\"Load an image from file into a numpy array.\n", + "\n", + " Puts image into numpy array to feed into tensorflow graph.\n", + " Note that by convention we put it into a numpy array with shape\n", + " (height, width, channels), where channels=3 for RGB.\n", + "\n", + " Args:\n", + " path: a file path (this can be local or on colossus)\n", + "\n", + " Returns:\n", + " uint8 numpy array with shape (img_height, img_width, 3)\n", + " \"\"\"\n", + " img_data = tf.io.gfile.GFile(path, 'rb').read()\n", + " image = Image.open(BytesIO(img_data))\n", + " (im_width, im_height) = image.size\n", + " return np.array(image.getdata()).reshape(\n", + " (im_height, im_width, 3)).astype(np.uint8)\n", + "\n", + "# Load the COCO Label Map\n", + "category_index = {\n", + " 1: {'id': 1, 'name': 'person'},\n", + " 2: {'id': 2, 'name': 'bicycle'},\n", + " 3: {'id': 3, 'name': 'car'},\n", + " 4: {'id': 4, 'name': 'motorcycle'},\n", + " 5: {'id': 5, 'name': 'airplane'},\n", + " 6: {'id': 6, 'name': 'bus'},\n", + " 7: {'id': 7, 'name': 'train'},\n", + " 8: {'id': 8, 'name': 'truck'},\n", + " 9: {'id': 9, 'name': 'boat'},\n", + " 10: {'id': 10, 'name': 'traffic light'},\n", + " 11: {'id': 11, 'name': 'fire hydrant'},\n", + " 13: {'id': 13, 'name': 'stop sign'},\n", + " 14: {'id': 14, 'name': 'parking meter'},\n", + " 15: {'id': 15, 'name': 'bench'},\n", + " 16: {'id': 16, 'name': 'bird'},\n", + " 17: {'id': 17, 'name': 'cat'},\n", + " 18: {'id': 18, 'name': 'dog'},\n", + " 19: {'id': 19, 'name': 'horse'},\n", + " 20: {'id': 20, 'name': 'sheep'},\n", + " 21: {'id': 21, 'name': 'cow'},\n", + " 22: {'id': 22, 'name': 'elephant'},\n", + " 23: {'id': 23, 'name': 'bear'},\n", + " 24: {'id': 24, 'name': 'zebra'},\n", + " 25: {'id': 25, 'name': 'giraffe'},\n", + " 27: {'id': 27, 'name': 'backpack'},\n", + " 28: {'id': 28, 'name': 'umbrella'},\n", + " 31: {'id': 31, 'name': 'handbag'},\n", + " 32: {'id': 32, 'name': 'tie'},\n", + " 33: {'id': 33, 'name': 'suitcase'},\n", + " 34: {'id': 34, 'name': 'frisbee'},\n", + " 35: {'id': 35, 'name': 'skis'},\n", + " 36: {'id': 36, 'name': 'snowboard'},\n", + " 37: {'id': 37, 'name': 'sports ball'},\n", + " 38: {'id': 38, 'name': 'kite'},\n", + " 39: {'id': 39, 'name': 'baseball bat'},\n", + " 40: {'id': 40, 'name': 'baseball glove'},\n", + " 41: {'id': 41, 'name': 'skateboard'},\n", + " 42: {'id': 42, 'name': 'surfboard'},\n", + " 43: {'id': 43, 'name': 'tennis racket'},\n", + " 44: {'id': 44, 'name': 'bottle'},\n", + " 46: {'id': 46, 'name': 'wine glass'},\n", + " 47: {'id': 47, 'name': 'cup'},\n", + " 48: {'id': 48, 'name': 'fork'},\n", + " 49: {'id': 49, 'name': 'knife'},\n", + " 50: {'id': 50, 'name': 'spoon'},\n", + " 51: {'id': 51, 'name': 'bowl'},\n", + " 52: {'id': 52, 'name': 'banana'},\n", + " 53: {'id': 53, 'name': 'apple'},\n", + " 54: {'id': 54, 'name': 'sandwich'},\n", + " 55: {'id': 55, 'name': 'orange'},\n", + " 56: {'id': 56, 'name': 'broccoli'},\n", + " 57: {'id': 57, 'name': 'carrot'},\n", + " 58: {'id': 58, 'name': 'hot dog'},\n", + " 59: {'id': 59, 'name': 'pizza'},\n", + " 60: {'id': 60, 'name': 'donut'},\n", + " 61: {'id': 61, 'name': 'cake'},\n", + " 62: {'id': 62, 'name': 'chair'},\n", + " 63: {'id': 63, 'name': 'couch'},\n", + " 64: {'id': 64, 'name': 'potted plant'},\n", + " 65: {'id': 65, 'name': 'bed'},\n", + " 67: {'id': 67, 'name': 'dining table'},\n", + " 70: {'id': 70, 'name': 'toilet'},\n", + " 72: {'id': 72, 'name': 'tv'},\n", + " 73: {'id': 73, 'name': 'laptop'},\n", + " 74: {'id': 74, 'name': 'mouse'},\n", + " 75: {'id': 75, 'name': 'remote'},\n", + " 76: {'id': 76, 'name': 'keyboard'},\n", + " 77: {'id': 77, 'name': 'cell phone'},\n", + " 78: {'id': 78, 'name': 'microwave'},\n", + " 79: {'id': 79, 'name': 'oven'},\n", + " 80: {'id': 80, 'name': 'toaster'},\n", + " 81: {'id': 81, 'name': 'sink'},\n", + " 82: {'id': 82, 'name': 'refrigerator'},\n", + " 84: {'id': 84, 'name': 'book'},\n", + " 85: {'id': 85, 'name': 'clock'},\n", + " 86: {'id': 86, 'name': 'vase'},\n", + " 87: {'id': 87, 'name': 'scissors'},\n", + " 88: {'id': 88, 'name': 'teddy bear'},\n", + " 89: {'id': 89, 'name': 'hair drier'},\n", + " 90: {'id': 90, 'name': 'toothbrush'},\n", + "}" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "QwcBC2TlPSwg", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Download the saved model and put it into models/research/object_detection/test_data/\n", + "!wget http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d5_coco17_tpu-32.tar.gz\n", + "!tar -xf efficientdet_d5_coco17_tpu-32.tar.gz\n", + "!mv efficientdet_d5_coco17_tpu-32/ models/research/object_detection/test_data/" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "Z2p-PmKLYCVU", + "colab": {} + }, + "source": [ + "start_time = time.time()\n", + "tf.keras.backend.clear_session()\n", + "detect_fn = tf.saved_model.load('models/research/object_detection/test_data/efficientdet_d5_coco17_tpu-32/saved_model/')\n", + "end_time = time.time()\n", + "elapsed_time = end_time - start_time\n", + "print('Elapsed time: ' + str(elapsed_time) + 's')" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "vukkhd5-9NSL", + "colab": {} + }, + "source": [ + "import time\n", + "\n", + "image_dir = 'models/research/object_detection/test_images'\n", + "\n", + "elapsed = []\n", + "for i in range(2):\n", + " image_path = os.path.join(image_dir, 'image' + str(i + 1) + '.jpg')\n", + " image_np = load_image_into_numpy_array(image_path)\n", + " input_tensor = np.expand_dims(image_np, 0)\n", + " start_time = time.time()\n", + " detections = detect_fn(input_tensor)\n", + " end_time = time.time()\n", + " elapsed.append(end_time - start_time)\n", + "\n", + " plt.rcParams['figure.figsize'] = [42, 21]\n", + " label_id_offset = 1\n", + " image_np_with_detections = image_np.copy()\n", + " viz_utils.visualize_boxes_and_labels_on_image_array(\n", + " image_np_with_detections,\n", + " detections['detection_boxes'][0].numpy(),\n", + " detections['detection_classes'][0].numpy().astype(np.int32),\n", + " detections['detection_scores'][0].numpy(),\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " max_boxes_to_draw=200,\n", + " min_score_thresh=.40,\n", + " agnostic_mode=False)\n", + " plt.subplot(2, 1, i+1)\n", + " plt.imshow(image_np_with_detections)\n", + "\n", + "mean_elapsed = sum(elapsed) / float(len(elapsed))\n", + "print('Elapsed time: ' + str(mean_elapsed) + ' second per image')" + ], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/inference_tf2_colab.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/inference_tf2_colab.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6b5cfaa787f074f142010eb2e33db7f38bd18c00 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/inference_tf2_colab.ipynb @@ -0,0 +1,470 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "rOvvWAVTkMR7" + }, + "source": [ + "# Intro to Object Detection Colab\n", + "\n", + "Welcome to the object detection colab! This demo will take you through the steps of running an \"out-of-the-box\" detection model on a collection of images." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "vPs64QA1Zdov" + }, + "source": [ + "## Imports and Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "LBZ9VWZZFUCT" + }, + "outputs": [], + "source": [ + "!pip install -U --pre tensorflow==\"2.2.0\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "oi28cqGGFWnY" + }, + "outputs": [], + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "# Clone the tensorflow models repository if it doesn't already exist\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "NwdsBdGhFanc" + }, + "outputs": [], + "source": [ + "# Install the Object Detection API\n", + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=.\n", + "cp object_detection/packages/tf2/setup.py .\n", + "python -m pip install ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "yn5_uV1HLvaz" + }, + "outputs": [], + "source": [ + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import io\n", + "import scipy.misc\n", + "import numpy as np\n", + "from six import BytesIO\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "\n", + "import tensorflow as tf\n", + "\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import config_util\n", + "from object_detection.utils import visualization_utils as viz_utils\n", + "from object_detection.builders import model_builder\n", + "\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "IogyryF2lFBL" + }, + "source": [ + "## Utilities" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "-y9R0Xllefec" + }, + "outputs": [], + "source": [ + "def load_image_into_numpy_array(path):\n", + " \"\"\"Load an image from file into a numpy array.\n", + "\n", + " Puts image into numpy array to feed into tensorflow graph.\n", + " Note that by convention we put it into a numpy array with shape\n", + " (height, width, channels), where channels=3 for RGB.\n", + "\n", + " Args:\n", + " path: the file path to the image\n", + "\n", + " Returns:\n", + " uint8 numpy array with shape (img_height, img_width, 3)\n", + " \"\"\"\n", + " img_data = tf.io.gfile.GFile(path, 'rb').read()\n", + " image = Image.open(BytesIO(img_data))\n", + " (im_width, im_height) = image.size\n", + " return np.array(image.getdata()).reshape(\n", + " (im_height, im_width, 3)).astype(np.uint8)\n", + "\n", + "def get_keypoint_tuples(eval_config):\n", + " \"\"\"Return a tuple list of keypoint edges from the eval config.\n", + " \n", + " Args:\n", + " eval_config: an eval config containing the keypoint edges\n", + " \n", + " Returns:\n", + " a list of edge tuples, each in the format (start, end)\n", + " \"\"\"\n", + " tuple_list = []\n", + " kp_list = eval_config.keypoint_edge\n", + " for edge in kp_list:\n", + " tuple_list.append((edge.start, edge.end))\n", + " return tuple_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "R4YjnOjME1gy" + }, + "outputs": [], + "source": [ + "# @title Choose the model to use, then evaluate the cell.\n", + "MODELS = {'centernet_with_keypoints': 'centernet_hg104_512x512_kpts_coco17_tpu-32', 'centernet_without_keypoints': 'centernet_hg104_512x512_coco17_tpu-8'}\n", + "\n", + "model_display_name = 'centernet_with_keypoints' # @param ['centernet_with_keypoints', 'centernet_without_keypoints']\n", + "model_name = MODELS[model_display_name]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "6917xnUSlp9x" + }, + "source": [ + "### Build a detection model and load pre-trained model weights\n", + "\n", + "This sometimes takes a little while, please be patient!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ctPavqlyPuU_" + }, + "outputs": [], + "source": [ + "# Download the checkpoint and put it into models/research/object_detection/test_data/\n", + "\n", + "if model_display_name == 'centernet_with_keypoints':\n", + " !wget http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_hg104_512x512_kpts_coco17_tpu-32.tar.gz\n", + " !tar -xf centernet_hg104_512x512_kpts_coco17_tpu-32.tar.gz\n", + " !mv centernet_hg104_512x512_kpts_coco17_tpu-32/checkpoint models/research/object_detection/test_data/\n", + "else:\n", + " !wget http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_hg104_512x512_coco17_tpu-8.tar.gz\n", + " !tar -xf centernet_hg104_512x512_coco17_tpu-8.tar.gz\n", + " !mv centernet_hg104_512x512_coco17_tpu-8/checkpoint models/research/object_detection/test_data/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4cni4SSocvP_" + }, + "outputs": [], + "source": [ + "pipeline_config = os.path.join('models/research/object_detection/configs/tf2/',\n", + " model_name + '.config')\n", + "model_dir = 'models/research/object_detection/test_data/checkpoint/'\n", + "\n", + "# Load pipeline config and build a detection model\n", + "configs = config_util.get_configs_from_pipeline_file(pipeline_config)\n", + "model_config = configs['model']\n", + "detection_model = model_builder.build(\n", + " model_config=model_config, is_training=False)\n", + "\n", + "# Restore checkpoint\n", + "ckpt = tf.compat.v2.train.Checkpoint(\n", + " model=detection_model)\n", + "ckpt.restore(os.path.join(model_dir, 'ckpt-0')).expect_partial()\n", + "\n", + "def get_model_detection_function(model):\n", + " \"\"\"Get a tf.function for detection.\"\"\"\n", + "\n", + " @tf.function\n", + " def detect_fn(image):\n", + " \"\"\"Detect objects in image.\"\"\"\n", + "\n", + " image, shapes = model.preprocess(image)\n", + " prediction_dict = model.predict(image, shapes)\n", + " detections = model.postprocess(prediction_dict, shapes)\n", + "\n", + " return detections, prediction_dict, tf.reshape(shapes, [-1])\n", + "\n", + " return detect_fn\n", + "\n", + "detect_fn = get_model_detection_function(detection_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "NKtD0IeclbL5" + }, + "source": [ + "# Load label map data (for plotting).\n", + "\n", + "Label maps correspond index numbers to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "5mucYUS6exUJ" + }, + "outputs": [], + "source": [ + "label_map_path = configs['eval_input_config'].label_map_path\n", + "label_map = label_map_util.load_labelmap(label_map_path)\n", + "categories = label_map_util.convert_label_map_to_categories(\n", + " label_map,\n", + " max_num_classes=label_map_util.get_max_label_map_index(label_map),\n", + " use_display_name=True)\n", + "category_index = label_map_util.create_category_index(categories)\n", + "label_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RLusV1o-mAx8" + }, + "source": [ + "### Putting everything together!\n", + "\n", + "Run the below code which loads an image, runs it through the detection model and visualizes the detection results, including the keypoints.\n", + "\n", + "Note that this will take a long time (several minutes) the first time you run this code due to tf.function's trace-compilation --- on subsequent runs (e.g. on new images), things will be faster.\n", + "\n", + "Here are some simple things to try out if you are curious:\n", + "* Try running inference on your own images (local paths work)\n", + "* Modify some of the input images and see if detection still works. Some simple things to try out here (just uncomment the relevant portions of code) include flipping the image horizontally, or converting to grayscale (note that we still expect the input image to have 3 channels).\n", + "* Print out `detections['detection_boxes']` and try to match the box locations to the boxes in the image. Notice that coordinates are given in normalized form (i.e., in the interval [0, 1]).\n", + "* Set min_score_thresh to other values (between 0 and 1) to allow more detections in or to filter out more detections.\n", + "\n", + "Note that you can run this cell repeatedly without rerunning earlier cells.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vr_Fux-gfaG9" + }, + "outputs": [], + "source": [ + "image_dir = 'models/research/object_detection/test_images/'\n", + "image_path = os.path.join(image_dir, 'image2.jpg')\n", + "image_np = load_image_into_numpy_array(image_path)\n", + "\n", + "# Things to try:\n", + "# Flip horizontally\n", + "# image_np = np.fliplr(image_np).copy()\n", + "\n", + "# Convert image to grayscale\n", + "# image_np = np.tile(\n", + "# np.mean(image_np, 2, keepdims=True), (1, 1, 3)).astype(np.uint8)\n", + "\n", + "input_tensor = tf.convert_to_tensor(\n", + " np.expand_dims(image_np, 0), dtype=tf.float32)\n", + "detections, predictions_dict, shapes = detect_fn(input_tensor)\n", + "\n", + "label_id_offset = 1\n", + "image_np_with_detections = image_np.copy()\n", + "\n", + "# Use keypoints if available in detections\n", + "keypoints, keypoint_scores = None, None\n", + "if 'detection_keypoints' in detections:\n", + " keypoints = detections['detection_keypoints'][0].numpy()\n", + " keypoint_scores = detections['detection_keypoint_scores'][0].numpy()\n", + "\n", + "viz_utils.visualize_boxes_and_labels_on_image_array(\n", + " image_np_with_detections,\n", + " detections['detection_boxes'][0].numpy(),\n", + " (detections['detection_classes'][0].numpy() + label_id_offset).astype(int),\n", + " detections['detection_scores'][0].numpy(),\n", + " category_index,\n", + " use_normalized_coordinates=True,\n", + " max_boxes_to_draw=200,\n", + " min_score_thresh=.30,\n", + " agnostic_mode=False,\n", + " keypoints=keypoints,\n", + " keypoint_scores=keypoint_scores,\n", + " keypoint_edges=get_keypoint_tuples(configs['eval_config']))\n", + "\n", + "plt.figure(figsize=(12,16))\n", + "plt.imshow(image_np_with_detections)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "lYnOxprty3TD" + }, + "source": [ + "## Digging into the model's intermediate predictions\n", + "\n", + "For this part we will assume that the detection model is a CenterNet model following Zhou et al (https://arxiv.org/abs/1904.07850). And more specifically, we will assume that `detection_model` is of type `meta_architectures.center_net_meta_arch.CenterNetMetaArch`.\n", + "\n", + "As one of its intermediate predictions, CenterNet produces a heatmap of box centers for each class (for example, it will produce a heatmap whose size is proportional to that of the image that lights up at the center of each, e.g., \"zebra\"). In the following, we will visualize these intermediate class center heatmap predictions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "xBgYgSGMhHVi" + }, + "outputs": [], + "source": [ + "if detection_model.__class__.__name__ != 'CenterNetMetaArch':\n", + " raise AssertionError('The meta-architecture for this section '\n", + " 'is assumed to be CenterNetMetaArch!')\n", + "\n", + "def get_heatmap(predictions_dict, class_name):\n", + " \"\"\"Grabs class center logits and apply inverse logit transform.\n", + "\n", + " Args:\n", + " predictions_dict: dictionary of tensors containing a `object_center`\n", + " field of shape [1, heatmap_width, heatmap_height, num_classes]\n", + " class_name: string name of category (e.g., `horse`)\n", + "\n", + " Returns:\n", + " heatmap: 2d Tensor heatmap representing heatmap of centers for a given class\n", + " (For CenterNet, this is 128x128 or 256x256) with values in [0,1]\n", + " \"\"\"\n", + " class_index = label_map_dict[class_name]\n", + " class_center_logits = predictions_dict['object_center'][0]\n", + " class_center_logits = class_center_logits[0][\n", + " :, :, class_index - label_id_offset]\n", + " heatmap = tf.exp(class_center_logits) / (tf.exp(class_center_logits) + 1)\n", + " return heatmap\n", + "\n", + "def unpad_heatmap(heatmap, image_np):\n", + " \"\"\"Reshapes/unpads heatmap appropriately.\n", + "\n", + " Reshapes/unpads heatmap appropriately to match image_np.\n", + "\n", + " Args:\n", + " heatmap: Output of `get_heatmap`, a 2d Tensor\n", + " image_np: uint8 numpy array with shape (img_height, img_width, 3). Note\n", + " that due to padding, the relationship between img_height and img_width\n", + " might not be a simple scaling.\n", + "\n", + " Returns:\n", + " resized_heatmap_unpadded: a resized heatmap (2d Tensor) that is the same\n", + " size as `image_np`\n", + " \"\"\"\n", + " heatmap = tf.tile(tf.expand_dims(heatmap, 2), [1, 1, 3]) * 255\n", + " pre_strided_size = detection_model._stride * heatmap.shape[0]\n", + " resized_heatmap = tf.image.resize(\n", + " heatmap, [pre_strided_size, pre_strided_size],\n", + " method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n", + " resized_heatmap_unpadded = tf.slice(resized_heatmap, begin=[0,0,0], size=shapes)\n", + " return tf.image.resize(\n", + " resized_heatmap_unpadded,\n", + " [image_np.shape[0], image_np.shape[1]],\n", + " method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[:,:,0]\n", + "\n", + "\n", + "class_name = 'kite'\n", + "heatmap = get_heatmap(predictions_dict, class_name)\n", + "resized_heatmap_unpadded = unpad_heatmap(heatmap, image_np)\n", + "plt.figure(figsize=(12,16))\n", + "plt.imshow(image_np_with_detections)\n", + "plt.imshow(resized_heatmap_unpadded, alpha=0.7,vmin=0, vmax=160, cmap='viridis')\n", + "plt.title('Object center heatmap (class: ' + class_name + ')')\n", + "plt.show()\n", + "\n", + "class_name = 'person'\n", + "heatmap = get_heatmap(predictions_dict, class_name)\n", + "resized_heatmap_unpadded = unpad_heatmap(heatmap, image_np)\n", + "plt.figure(figsize=(12,16))\n", + "plt.imshow(image_np_with_detections)\n", + "plt.imshow(resized_heatmap_unpadded, alpha=0.7,vmin=0, vmax=160, cmap='viridis')\n", + "plt.title('Object center heatmap (class: ' + class_name + ')')\n", + "plt.show()" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "inference_tf2_colab.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/object_detection_tutorial.ipynb b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/object_detection_tutorial.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2185a1c7cc40d9df56d8f52840786cbcd1762f1e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/colab_tutorials/object_detection_tutorial.ipynb @@ -0,0 +1,847 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "V8-yl-s-WKMG" + }, + "source": [ + "# Object Detection API Demo\n", + "\n", + "
\n", + " \n", + " Run in Google Colab\n", + " \n", + "\n", + " \n", + " View source on GitHub\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "3cIrseUv6WKz" + }, + "source": [ + "Welcome to the [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection). This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "VrJaG0cYN9yh" + }, + "source": [ + "> **Important**: This tutorial is to help you through the first step towards using [Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to build models. If you just just need an off the shelf model that does the job, see the [TFHub object detection example](https://colab.sandbox.google.com/github/tensorflow/hub/blob/master/examples/colab/object_detection.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "kFSqkTCdWKMI" + }, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "awjrpqy-6MaQ" + }, + "source": [ + "Important: If you're running on a local machine, be sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). This notebook includes only what's necessary to run in Colab." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "p3UGXxUii5Ym" + }, + "source": [ + "### Install" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hGL97-GXjSUw" + }, + "outputs": [], + "source": [ + "!pip install -U --pre tensorflow==\"2.*\"\n", + "!pip install tf_slim" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "n_ap_s9ajTHH" + }, + "source": [ + "Make sure you have `pycocotools` installed" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Bg8ZyA47i3pY" + }, + "outputs": [], + "source": [ + "!pip install pycocotools" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-vsOL3QR6kqs" + }, + "source": [ + "Get `tensorflow/models` or `cd` to parent directory of the repository." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ykA0c-om51s1" + }, + "outputs": [], + "source": [ + "import os\n", + "import pathlib\n", + "\n", + "\n", + "if \"models\" in pathlib.Path.cwd().parts:\n", + " while \"models\" in pathlib.Path.cwd().parts:\n", + " os.chdir('..')\n", + "elif not pathlib.Path('models').exists():\n", + " !git clone --depth 1 https://github.com/tensorflow/models" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "O219m6yWAj9l" + }, + "source": [ + "Compile protobufs and install the object_detection package" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "PY41vdYYNlXc" + }, + "outputs": [], + "source": [ + "%%bash\n", + "cd models/research/\n", + "protoc object_detection/protos/*.proto --python_out=." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "s62yJyQUcYbp" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing /home/job/models/research\n", + "Collecting Pillow>=1.0 (from object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/12/ad/61f8dfba88c4e56196bf6d056cdbba64dc9c5dfdfbc97d02e6472feed913/Pillow-6.2.2-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting Matplotlib>=2.1 (from object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/9d/40/5ba7d4a3f80d39d409f21899972596bf62c8606f1406a825029649eaa439/matplotlib-2.2.5-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting Cython>=0.28.1 (from object-detection==0.1)\n", + " Downloading https://files.pythonhosted.org/packages/59/c1/0b69d125ab9819869cffff2f416158acf2684bdb4bf54eccf887717e2cbd/Cython-0.29.21-cp27-cp27mu-manylinux1_x86_64.whl (1.9MB)\n", + "Collecting cycler>=0.10 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/f7/d2/e07d3ebb2bd7af696440ce7e754c59dd546ffe1bbe732c8ab68b9c834e61/cycler-0.10.0-py2.py3-none-any.whl\n", + "Collecting numpy>=1.7.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/3a/5f/47e578b3ae79e2624e205445ab77a1848acdaa2929a00eeef6b16eaaeb20/numpy-1.16.6-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting backports.functools-lru-cache (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/da/d1/080d2bb13773803648281a49e3918f65b31b7beebf009887a529357fd44a/backports.functools_lru_cache-1.6.1-py2.py3-none-any.whl\n", + "Collecting subprocess32 (from Matplotlib>=2.1->object-detection==0.1)\n", + "Collecting kiwisolver>=1.0.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/3d/78/cb9248b2289ec31e301137cedbe4ca503a74ca87f88cdbfd2f8be52323bf/kiwisolver-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl\n", + "Collecting pytz (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/4f/a4/879454d49688e2fad93e59d7d4efda580b783c745fd2ec2a3adf87b0808d/pytz-2020.1-py2.py3-none-any.whl\n", + "Collecting six>=1.10 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl\n", + "Collecting python-dateutil>=2.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/d4/70/d60450c3dd48ef87586924207ae8907090de0b306af2bce5d134d78615cb/python_dateutil-2.8.1-py2.py3-none-any.whl\n", + "Collecting pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 (from Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/8a/bb/488841f56197b13700afd5658fc279a2025a39e22449b7cf29864669b15d/pyparsing-2.4.7-py2.py3-none-any.whl\n", + "Collecting setuptools (from kiwisolver>=1.0.1->Matplotlib>=2.1->object-detection==0.1)\n", + " Using cached https://files.pythonhosted.org/packages/e1/b7/182161210a13158cd3ccc41ee19aadef54496b74f2817cc147006ec932b4/setuptools-44.1.1-py2.py3-none-any.whl\n", + "Installing collected packages: Pillow, six, cycler, numpy, backports.functools-lru-cache, subprocess32, setuptools, kiwisolver, pytz, python-dateutil, pyparsing, Matplotlib, Cython, object-detection\n", + " Running setup.py install for object-detection: started\n", + " Running setup.py install for object-detection: finished with status 'done'\n", + "Successfully installed Cython-0.29.21 Matplotlib-2.2.5 Pillow-6.2.2 backports.functools-lru-cache-1.6.1 cycler-0.10.0 kiwisolver-1.1.0 numpy-1.16.6 object-detection-0.1 pyparsing-2.4.7 python-dateutil-2.8.1 pytz-2020.1 setuptools-44.1.1 six-1.15.0 subprocess32-3.5.4\n" + ] + } + ], + "source": [ + "%%bash \n", + "cd models/research\n", + "pip install ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LBdjK2G5ywuc" + }, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hV4P5gyTWKMI" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/home/job/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import os\n", + "import six.moves.urllib as urllib\n", + "import sys\n", + "import tarfile\n", + "import tensorflow as tf\n", + "import zipfile\n", + "\n", + "from collections import defaultdict\n", + "from io import StringIO\n", + "from matplotlib import pyplot as plt\n", + "from PIL import Image\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r5FNuiRPWKMN" + }, + "source": [ + "Import the object detection module." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4-IMl4b6BdGO" + }, + "outputs": [], + "source": [ + "from object_detection.utils import ops as utils_ops\n", + "from object_detection.utils import label_map_util\n", + "from object_detection.utils import visualization_utils as vis_util" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RYPCiag2iz_q" + }, + "source": [ + "Patches:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "mF-YlMl8c_bM" + }, + "outputs": [], + "source": [ + "# patch tf1 into `utils.ops`\n", + "utils_ops.tf = tf.compat.v1\n", + "\n", + "# Patch the location of gfile\n", + "tf.gfile = tf.io.gfile" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "cfn_tRFOWKMO" + }, + "source": [ + "# Model preparation " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "X_sEBLpVWKMQ" + }, + "source": [ + "## Variables\n", + "\n", + "Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing the path.\n", + "\n", + "By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7ai8pLZZWKMS" + }, + "source": [ + "## Loader" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "zm8xp-0eoItE" + }, + "outputs": [], + "source": [ + "def load_model(model_name):\n", + "\n", + " model_dir = pathlib.Path('/home/job/models/inference_graph')/\"saved_model\"\n", + "\n", + " model = tf.compat.v2.saved_model.load(str(model_dir))\n", + "\n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_1MVVTcLWKMW" + }, + "source": [ + "## Loading label map\n", + "Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hDbpHkiWWKMX" + }, + "outputs": [], + "source": [ + "# List of the strings that is used to add correct label for each box.\n", + "PATH_TO_LABELS = 'models/annotations/label_map.pbtxt'\n", + "category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oVU3U_J6IJVb" + }, + "source": [ + "For the sake of simplicity we will test on 2 images:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "jG-zn5ykWKMd" + }, + "outputs": [], + "source": [ + "# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\n", + "#PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/images/test')\n", + "#TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpg\")))\n", + "#TEST_IMAGE_PATHS" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H0_1AGhrWKMc" + }, + "source": [ + "# Detection" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f7aOtOlebK7h" + }, + "source": [ + "Load an object detection model:" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "1XNT0wxybKR6" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n" + ] + } + ], + "source": [ + "model_name = 'ssd_mobilenet_v1_coco_2018_01_28'\n", + "detection_model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "yN1AYfAEJIGp" + }, + "source": [ + "Check the model's input signature, it expects a batch of 3-color images of type uint8:" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CK4cnry6wsHY" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[]\n" + ] + } + ], + "source": [ + "print(detection_model.signatures['serving_default'].inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q8u3BjpMJXZF" + }, + "source": [ + "And returns several outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "oLSZpfaYwuSk" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'detection_classes': tf.float32,\n", + " 'detection_scores': tf.float32,\n", + " 'raw_detection_scores': tf.float32,\n", + " 'raw_detection_boxes': tf.float32,\n", + " 'detection_boxes': tf.float32,\n", + " 'num_detections': tf.float32}" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "detection_model.signatures['serving_default'].output_dtypes" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FZyKUJeuxvpT" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'detection_classes': TensorShape([Dimension(None), Dimension(100)]),\n", + " 'detection_scores': TensorShape([Dimension(None), Dimension(100)]),\n", + " 'raw_detection_scores': TensorShape([Dimension(None), Dimension(None), Dimension(3)]),\n", + " 'raw_detection_boxes': TensorShape([Dimension(None), Dimension(None), Dimension(4)]),\n", + " 'detection_boxes': TensorShape([Dimension(None), Dimension(100), Dimension(4)]),\n", + " 'num_detections': TensorShape([Dimension(None)])}" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "detection_model.signatures['serving_default'].output_shapes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JP5qZ7sXJpwG" + }, + "source": [ + "Add a wrapper function to call the model, and cleanup the outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ajmR_exWyN76" + }, + "outputs": [], + "source": [ + "def run_inference_for_single_image(model, image):\n", + " image = np.asarray(image)\n", + " # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n", + " input_tensor = tf.convert_to_tensor(image)\n", + " # The model expects a batch of images, so add an axis with `tf.newaxis`.\n", + " input_tensor = input_tensor[tf.newaxis,...]\n", + "\n", + " # Run inference\n", + " model_fn = model.signatures['serving_default']\n", + " output_dict = model_fn(input_tensor)\n", + "\n", + " # All outputs are batches tensors.\n", + " # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n", + " # We're only interested in the first num_detections.\n", + " num_detections = int(output_dict.pop(1))\n", + " output_dict = {key:value[0, :num_detections].numpy() \n", + " for key,value in output_dict.items()}\n", + " output_dict['num_detections'] = num_detections\n", + "\n", + " # detection_classes should be ints.\n", + " output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n", + " \n", + " # Handle models with masks:\n", + " if 'detection_masks' in output_dict:\n", + " # Reframe the the bbox mask to the image size.\n", + " detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n", + " output_dict['detection_masks'], output_dict['detection_boxes'],\n", + " image.shape[0], image.shape[1]) \n", + " detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,\n", + " tf.uint8)\n", + " output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()\n", + " \n", + " return output_dict" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "z1wq0LVyMRR_" + }, + "source": [ + "Run it on each test image and show the results:" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DWh_1zz6aqxs" + }, + "outputs": [], + "source": [ + "def show_inference(model, image_path):\n", + " # the array based representation of the image will be used later in order to prepare the\n", + " # result image with boxes and labels on it.\n", + " image_np = np.array(Image.open(image_path))\n", + " # Actual detection.\n", + " output_dict = run_inference_for_single_image(model, image_np)\n", + " # Visualization of the results of a detection.\n", + " vis_util.visualize_boxes_and_labels_on_image_array(\n", + " image_np,\n", + " output_dict['detection_boxes'],\n", + " output_dict['detection_classes'],\n", + " output_dict['detection_scores'],\n", + " category_index,\n", + " instance_masks=output_dict.get('detection_masks_reframed', None),\n", + " use_normalized_coordinates=True,\n", + " line_thickness=8)\n", + "\n", + " display(Image.fromarray(image_np))" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "3a5wMHN8WKMh" + }, + "outputs": [ + { + "ename": "KeyError", + "evalue": "1", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m#for image_path in TEST_IMAGE_PATHS:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mimagepath\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"/home/job/models/test.jpg\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mshow_inference\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdetection_model\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m\u001b[0m in \u001b[0;36mshow_inference\u001b[0;34m(model, image_path)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mimage_np\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;31m# Actual detection.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0moutput_dict\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrun_inference_for_single_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_np\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;31m# Visualization of the results of a detection.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m vis_util.visualize_boxes_and_labels_on_image_array(\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36mrun_inference_for_single_image\u001b[0;34m(model, image)\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# Convert to numpy arrays, and take index [0] to remove the batch dimension.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# We're only interested in the first num_detections.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0mnum_detections\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_dict\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m output_dict = {key:value[0, :num_detections].numpy() \n\u001b[1;32m 17\u001b[0m for key,value in output_dict.items()}\n", + "\u001b[0;31mKeyError\u001b[0m: 1" + ] + } + ], + "source": [ + "#for image_path in TEST_IMAGE_PATHS:\n", + "imagepath = \"/home/job/models/test.jpg\"\n", + "show_inference(detection_model, image_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DsspMPX3Cssg" + }, + "source": [ + "## Instance Segmentation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CzkVv_n2MxKC" + }, + "outputs": [], + "source": [ + "model_name = \"mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28\"\n", + "masking_model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0S7aZi8ZOhVV" + }, + "source": [ + "The instance segmentation model includes a `detection_masks` output:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vQ2Sj2VIOZLA" + }, + "outputs": [], + "source": [ + "masking_model.output_shapes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "AS57rZlnNL7W" + }, + "outputs": [], + "source": [ + "for image_path in TEST_IMAGE_PATHS:\n", + " show_inference(masking_model, image_path)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "last_runtime": { + "build_target": "//learning/brain/python/client:colab_notebook", + "kind": "private" + }, + "name": "object_detection_tutorial.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/colab_tutorials/object_detection_tutorial.ipynb", + "timestamp": 1594335690840 + }, + { + "file_id": "1LNYL6Zsn9Xlil2CVNOTsgDZQSBKeOjCh", + "timestamp": 1566498233247 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1566488313397 + }, + { + "file_id": "/piper/depot/google3/third_party/py/tensorflow_docs/g3doc/en/r2/tutorials/generative/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1566145894046 + }, + { + "file_id": "1nBPoWynOV0auSIy40eQcBIk9C6YRSkI8", + "timestamp": 1566145841085 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1556295408037 + }, + { + "file_id": "1layerger-51XwWOwYMY_5zHaCavCeQkO", + "timestamp": 1556214267924 + }, + { + "file_id": "/piper/depot/google3/third_party/tensorflow_models/object_detection/object_detection_tutorial.ipynb?workspaceId=markdaoust:copybara_AFABFE845DCD573AD3D43A6BAFBE77D4_0::citc", + "timestamp": 1556207836484 + }, + { + "file_id": "1w6mqQiNV3liPIX70NOgitOlDF1_4sRMw", + "timestamp": 1556154824101 + }, + { + "file_id": "https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb", + "timestamp": 1556150293326 + } + ] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_1024x1024_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_1024x1024_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..c0a90ef44c95a2572d5a80d4fcf21e11e04fe669 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_1024x1024_coco17_tpu-32.config @@ -0,0 +1,129 @@ +# CenterNet meta-architecture from the "Objects as Points" [2] paper with the +# hourglass[1] backbone. +# [1]: https://arxiv.org/abs/1603.06937 +# [2]: https://arxiv.org/abs/1904.07850 +# Trained on COCO, initialized from Extremenet Detection checkpoint +# Train on TPU-32 v3 +# +# Achieves 44.6 mAP on COCO17 Val + + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "hourglass_104" + bgr_ordering: true + channel_means: [104.01362025, 114.03422265, 119.9165958 ] + channel_stds: [73.6027665 , 69.89082075, 70.9150767 ] + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1024 + max_dimension: 1024 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 50000 + + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 50000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-1" + fine_tune_checkpoint_type: "detection" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..da7136f15db8a7a6201700ff761b4cab1387fdd2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config @@ -0,0 +1,374 @@ +# CenterNet meta-architecture from the "Objects as Points" [2] paper with the +# hourglass[1] backbone. This config achieves an mAP of 42.8/64.5 +/- 0.16 on +# COCO 17 (averaged over 5 runs). This config is TPU compatible. +# [1]: https://arxiv.org/abs/1603.06937 +# [2]: https://arxiv.org/abs/1904.07850 + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "hourglass_104" + channel_means: 104.01361846923828 + channel_means: 114.03422546386719 + channel_means: 119.91659545898438 + channel_stds: 73.60276794433594 + channel_stds: 69.89082336425781 + channel_stds: 70.91507720947266 + bgr_ordering: true + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1024 + max_dimension: 1024 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.10000000149011612 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + min_box_overlap_iou: 0.699999988079071 + max_box_predictions: 100 + } + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.3499999940395355 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.7200000286102295 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.0700000524520874 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.7900000214576721 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.6200000047683716 + } + keypoint_label_to_std { + key: "nose" + value: 0.25999999046325684 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.3499999940395355 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.7200000286102295 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.0700000524520874 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.7900000214576721 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.6200000047683716 + } + keypoint_regression_loss_weight: 0.10000000149011612 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} +train_config { + batch_size: 128 + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + data_augmentation_options { + random_adjust_hue { + } + } + data_augmentation_options { + random_adjust_contrast { + } + } + data_augmentation_options { + random_adjust_saturation { + } + } + data_augmentation_options { + random_adjust_brightness { + } + } + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6000000238418579 + scale_max: 1.2999999523162842 + } + } + optimizer { + adam_optimizer { + learning_rate { + cosine_decay_learning_rate { + learning_rate_base: 0.0010000000474974513 + total_steps: 250000 + warmup_learning_rate: 0.0002500000118743628 + warmup_steps: 5000 + } + } + epsilon: 1.0000000116860974e-07 + } + use_moving_average: false + } + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + num_steps: 250000 + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + fine_tune_checkpoint_type: "detection" + fine_tune_checkpoint_version: V2 +} +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} +eval_config { + num_visualizations: 10 + metrics_set: "coco_detection_metrics" + use_moving_averages: false + min_score_threshold: 0.20000000298023224 + max_num_boxes_to_visualize: 20 + batch_size: 1 + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.08900000154972076 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.03500000014901161 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.07199999690055847 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.02500000037252903 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.10700000077486038 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.08699999749660492 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.07900000363588333 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.06199999898672104 + } + keypoint_label_to_sigmas { + key: "nose" + value: 0.026000000536441803 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.08900000154972076 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.03500000014901161 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.07199999690055847 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.02500000037252903 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.10700000077486038 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.08699999749660492 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.07900000363588333 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.06199999898672104 + } + } + } + keypoint_edge { + start: 0 + end: 1 + } + keypoint_edge { + start: 0 + end: 2 + } + keypoint_edge { + start: 1 + end: 3 + } + keypoint_edge { + start: 2 + end: 4 + } + keypoint_edge { + start: 0 + end: 5 + } + keypoint_edge { + start: 0 + end: 6 + } + keypoint_edge { + start: 5 + end: 7 + } + keypoint_edge { + start: 7 + end: 9 + } + keypoint_edge { + start: 6 + end: 8 + } + keypoint_edge { + start: 8 + end: 10 + } + keypoint_edge { + start: 5 + end: 6 + } + keypoint_edge { + start: 5 + end: 11 + } + keypoint_edge { + start: 6 + end: 12 + } + keypoint_edge { + start: 11 + end: 12 + } + keypoint_edge { + start: 11 + end: 13 + } + keypoint_edge { + start: 13 + end: 15 + } + keypoint_edge { + start: 12 + end: 14 + } + keypoint_edge { + start: 14 + end: 16 + } +} +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + num_keypoints: 17 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_512x512_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_512x512_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..9e38d98939b25edc176d83352aa4f526a91a5828 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_512x512_coco17_tpu-8.config @@ -0,0 +1,143 @@ +# CenterNet meta-architecture from the "Objects as Points" [2] paper with the +# hourglass[1] backbone. +# [1]: https://arxiv.org/abs/1603.06937 +# [2]: https://arxiv.org/abs/1904.07850 +# Trained on COCO, initialized from Extremenet Detection checkpoint +# Train on TPU-8 +# +# Achieves 41.9 mAP on COCO17 Val + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "hourglass_104" + bgr_ordering: true + channel_means: [104.01362025, 114.03422265, 119.9165958 ] + channel_stds: [73.6027665 , 69.89082075, 70.9150767 ] + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 140000 + + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + manual_step_learning_rate { + initial_learning_rate: 1e-3 + schedule { + step: 90000 + learning_rate: 1e-4 + } + schedule { + step: 120000 + learning_rate: 1e-5 + } + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-1" + fine_tune_checkpoint_type: "detection" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..ce5652895f9331261f28be1e23eca4ccb916d1e1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config @@ -0,0 +1,395 @@ +# CenterNet meta-architecture from the "Objects as Points" [2] paper with the +# hourglass[1] backbone. This config achieves an mAP of 40.0/61.4 +/- 0.16 on +# COCO 17 (averaged over 5 runs). This config is TPU compatible. +# [1]: https://arxiv.org/abs/1603.06937 +# [2]: https://arxiv.org/abs/1904.07850 + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "hourglass_104" + bgr_ordering: true + channel_means: [104.01362025, 114.03422265, 119.9165958 ] + channel_stds: [73.6027665 , 69.89082075, 70.9150767 ] + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.62 + } + keypoint_label_to_std { + key: "nose" + value: 0.26 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.62 + } + keypoint_regression_loss_weight: 0.1 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 250000 + + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 250000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + fine_tune_checkpoint_type: "detection" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + num_visualizations: 10 + max_num_boxes_to_visualize: 20 + min_score_threshold: 0.2 + batch_size: 1; + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "nose" + value: 0.026 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.089 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.089 + } + } + } + # Provide the edges to connect the keypoints. The setting is suitable for + # COCO's 17 human pose keypoints. + keypoint_edge { # nose-left eye + start: 0 + end: 1 + } + keypoint_edge { # nose-right eye + start: 0 + end: 2 + } + keypoint_edge { # left eye-left ear + start: 1 + end: 3 + } + keypoint_edge { # right eye-right ear + start: 2 + end: 4 + } + keypoint_edge { # nose-left shoulder + start: 0 + end: 5 + } + keypoint_edge { # nose-right shoulder + start: 0 + end: 6 + } + keypoint_edge { # left shoulder-left elbow + start: 5 + end: 7 + } + keypoint_edge { # left elbow-left wrist + start: 7 + end: 9 + } + keypoint_edge { # right shoulder-right elbow + start: 6 + end: 8 + } + keypoint_edge { # right elbow-right wrist + start: 8 + end: 10 + } + keypoint_edge { # left shoulder-right shoulder + start: 5 + end: 6 + } + keypoint_edge { # left shoulder-left hip + start: 5 + end: 11 + } + keypoint_edge { # right shoulder-right hip + start: 6 + end: 12 + } + keypoint_edge { # left hip-right hip + start: 11 + end: 12 + } + keypoint_edge { # left hip-left knee + start: 11 + end: 13 + } + keypoint_edge { # left knee-left ankle + start: 13 + end: 15 + } + keypoint_edge { # right hip-right knee + start: 12 + end: 14 + } + keypoint_edge { # right knee-right ankle + start: 14 + end: 16 + } +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..2bb7f07ce5e6c325457d174202484a413c288ccd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config @@ -0,0 +1,141 @@ +# CenterNet meta-architecture from the "Objects as Points" [1] paper +# with the ResNet-v1-101 FPN backbone. +# [1]: https://arxiv.org/abs/1904.07850 + +# Train on TPU-8 +# +# Achieves 34.18 mAP on COCO17 Val + + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "resnet_v2_101" + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 140000 + + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + manual_step_learning_rate { + initial_learning_rate: 1e-3 + schedule { + step: 90000 + learning_rate: 1e-4 + } + schedule { + step: 120000 + learning_rate: 1e-5 + } + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/weights-1" + fine_tune_checkpoint_type: "classification" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..ad25d5c347dafc7f442c62e534e1a7b551c1d728 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config @@ -0,0 +1,392 @@ +# CenterNet meta-architecture from the "Objects as Points" [1] paper +# with the ResNet-v1-50 backbone. The ResNet backbone has a few differences +# as compared to the one mentioned in the paper, hence the performance is +# slightly worse. This config is TPU comptatible. +# [1]: https://arxiv.org/abs/1904.07850 +# + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "resnet_v1_50_fpn" + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.62 + } + keypoint_label_to_std { + key: "nose" + value: 0.26 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.62 + } + keypoint_regression_loss_weight: 0.1 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 250000 + + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 250000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + fine_tune_checkpoint_type: "classification" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + num_visualizations: 10 + max_num_boxes_to_visualize: 20 + min_score_threshold: 0.2 + batch_size: 1; + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "nose" + value: 0.026 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.089 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.089 + } + } + } + # Provide the edges to connect the keypoints. The setting is suitable for + # COCO's 17 human pose keypoints. + keypoint_edge { # nose-left eye + start: 0 + end: 1 + } + keypoint_edge { # nose-right eye + start: 0 + end: 2 + } + keypoint_edge { # left eye-left ear + start: 1 + end: 3 + } + keypoint_edge { # right eye-right ear + start: 2 + end: 4 + } + keypoint_edge { # nose-left shoulder + start: 0 + end: 5 + } + keypoint_edge { # nose-right shoulder + start: 0 + end: 6 + } + keypoint_edge { # left shoulder-left elbow + start: 5 + end: 7 + } + keypoint_edge { # left elbow-left wrist + start: 7 + end: 9 + } + keypoint_edge { # right shoulder-right elbow + start: 6 + end: 8 + } + keypoint_edge { # right elbow-right wrist + start: 8 + end: 10 + } + keypoint_edge { # left shoulder-right shoulder + start: 5 + end: 6 + } + keypoint_edge { # left shoulder-left hip + start: 5 + end: 11 + } + keypoint_edge { # right shoulder-right hip + start: 6 + end: 12 + } + keypoint_edge { # left hip-right hip + start: 11 + end: 12 + } + keypoint_edge { # left hip-left knee + start: 11 + end: 13 + } + keypoint_edge { # left knee-left ankle + start: 13 + end: 15 + } + keypoint_edge { # right hip-right knee + start: 12 + end: 14 + } + keypoint_edge { # right knee-right ankle + start: 14 + end: 16 + } +} +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + num_keypoints: 17 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..3067ed417b1898b0b2b7839647d138c462c329c9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config @@ -0,0 +1,393 @@ +# CenterNet meta-architecture from the "Objects as Points" [1] paper +# with the ResNet-v2-50 backbone. The ResNet backbone has a few differences +# as compared to the one mentioned in the paper, hence the performance is +# slightly worse. This config is TPU comptatible. +# [1]: https://arxiv.org/abs/1904.07850 + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "resnet_v2_50" + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.62 + } + keypoint_label_to_std { + key: "nose" + value: 0.26 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.62 + } + keypoint_regression_loss_weight: 0.1 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 250000 + + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 250000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + fine_tune_checkpoint_type: "classification" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + num_visualizations: 10 + max_num_boxes_to_visualize: 20 + min_score_threshold: 0.2 + batch_size: 1; + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "nose" + value: 0.026 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.089 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.089 + } + } + } + # Provide the edges to connect the keypoints. The setting is suitable for + # COCO's 17 human pose keypoints. + keypoint_edge { # nose-left eye + start: 0 + end: 1 + } + keypoint_edge { # nose-right eye + start: 0 + end: 2 + } + keypoint_edge { # left eye-left ear + start: 1 + end: 3 + } + keypoint_edge { # right eye-right ear + start: 2 + end: 4 + } + keypoint_edge { # nose-left shoulder + start: 0 + end: 5 + } + keypoint_edge { # nose-right shoulder + start: 0 + end: 6 + } + keypoint_edge { # left shoulder-left elbow + start: 5 + end: 7 + } + keypoint_edge { # left elbow-left wrist + start: 7 + end: 9 + } + keypoint_edge { # right shoulder-right elbow + start: 6 + end: 8 + } + keypoint_edge { # right elbow-right wrist + start: 8 + end: 10 + } + keypoint_edge { # left shoulder-right shoulder + start: 5 + end: 6 + } + keypoint_edge { # left shoulder-left hip + start: 5 + end: 11 + } + keypoint_edge { # right shoulder-right hip + start: 6 + end: 12 + } + keypoint_edge { # left hip-right hip + start: 11 + end: 12 + } + keypoint_edge { # left hip-left knee + start: 11 + end: 13 + } + keypoint_edge { # left knee-left ankle + start: 13 + end: 15 + } + keypoint_edge { # right hip-right knee + start: 12 + end: 14 + } + keypoint_edge { # right knee-right ankle + start: 14 + end: 16 + } +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + num_keypoints: 17 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..c38f6b9e2143a7248964ac7f68ebd38c47627d68 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8.config @@ -0,0 +1,166 @@ +# Faster R-CNN with Resnet-101 (v1), +# w/high res inputs, long training schedule +# Trained on COCO, initialized from Imagenet classification checkpoint +# +# Train on TPU-8 +# +# Achieves 37.1 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + fixed_shape_resizer { + width: 1024 + height: 1024 + } + } + feature_extractor { + type: 'faster_rcnn_resnet101_keras' + batch_norm_trainable: true + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + share_box_across_classes: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 100000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 100000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true # works only on TPUs +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..af07c7df6278ba936b863abff7301446bf7cf1f8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_640x640_coco17_tpu-8.config @@ -0,0 +1,145 @@ +# Faster R-CNN with Resnet-50 (v1) +# Trained on COCO, initialized from Imagenet classification checkpoint +# +# Train on TPU-8 +# +# Achieves 31.8 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet101_keras' + batch_norm_trainable: true + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + share_box_across_classes: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 25000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true # works only on TPUs +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..8eb4da02f59cb0a955678865112b863bff37ac1c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8.config @@ -0,0 +1,154 @@ +# Faster R-CNN with Resnet-101 (v1), +# Initialized from Imagenet classification checkpoint +# +# Train on GPU-8 +# +# Achieves 36.6 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 800 + max_dimension: 1333 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet101_keras' + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + } +} + +train_config: { + batch_size: 16 + num_steps: 200000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 0.01 + total_steps: 200000 + warmup_learning_rate: 0.0 + warmup_steps: 5000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + gradient_clipping_by_norm: 10.0 + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..034667ffe38fb997b2e2d36406686faaa23492bd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8.config @@ -0,0 +1,166 @@ +# Faster R-CNN with Resnet-152 (v1) +# w/high res inputs, long training schedule +# Trained on COCO, initialized from Imagenet classification checkpoint +# +# Train on TPU-8 +# +# Achieves 37.6 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + fixed_shape_resizer { + width: 1024 + height: 1024 + } + } + feature_extractor { + type: 'faster_rcnn_resnet152_keras' + batch_norm_trainable: true + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + share_box_across_classes: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 100000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 100000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true # works only on TPUs +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..525c4ac456a058646145c1127593b2e0f891fb89 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_640x640_coco17_tpu-8.config @@ -0,0 +1,145 @@ +# Faster R-CNN with Resnet-152 (v1) +# Trained on COCO, initialized from Imagenet classification checkpoint +# +# Train on TPU-8 +# +# Achieves 32.4 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet152_keras' + batch_norm_trainable: true + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + share_box_across_classes: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 25000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true # works only on TPUs +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..8d1879f7b9b9d784244ea1504ec64b9200ab5e34 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8.config @@ -0,0 +1,154 @@ +# Faster R-CNN with Resnet-152 (v1), +# Initialized from Imagenet classification checkpoint +# +# Train on GPU-8 +# +# Achieves 37.3 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 800 + max_dimension: 1333 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet152_keras' + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + } +} + +train_config: { + batch_size: 16 + num_steps: 200000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 0.01 + total_steps: 200000 + warmup_learning_rate: 0.0 + warmup_steps: 5000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + gradient_clipping_by_norm: 10.0 + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..b6e590ee7179681a159e70df117916a77195fcf5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8.config @@ -0,0 +1,166 @@ +# Faster R-CNN with Resnet-50 (v1), +# w/high res inputs, long training schedule +# Trained on COCO, initialized from Imagenet classification checkpoint +# +# Train on TPU-8 +# +# Achieves 31.0 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + fixed_shape_resizer { + width: 1024 + height: 1024 + } + } + feature_extractor { + type: 'faster_rcnn_resnet50_keras' + batch_norm_trainable: true + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + share_box_across_classes: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 100000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 100000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true # works only on TPUs +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..c8601c6fed1f63de85fc7d53e13658ea498fb9eb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_640x640_coco17_tpu-8.config @@ -0,0 +1,145 @@ +# Faster R-CNN with Resnet-50 (v1) with 640x640 input resolution +# Trained on COCO, initialized from Imagenet classification checkpoint +# +# Train on TPU-8 +# +# Achieves 29.3 mAP on COCO17 Val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet50_keras' + batch_norm_trainable: true + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + share_box_across_classes: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 25000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true # works only on TPUs +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..264be5f0b79660d524aaa866c0bee98d3ee6b199 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8.config @@ -0,0 +1,154 @@ +# Faster R-CNN with Resnet-50 (v1), +# Initialized from Imagenet classification checkpoint +# +# Train on GPU-8 +# +# Achieves 31.4 mAP on COCO17 val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 800 + max_dimension: 1333 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet50_keras' + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + } +} + +train_config: { + batch_size: 16 + num_steps: 200000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 0.01 + total_steps: 200000 + warmup_learning_rate: 0.0 + warmup_steps: 5000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + gradient_clipping_by_norm: 10.0 + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6 + scale_max: 1.3 + } + } +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_fpn_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_fpn_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..acb5a91359bd3d0349f628d6f284c19e4dc0e326 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/faster_rcnn_resnet50_v1_fpn_640x640_coco17_tpu-8.config @@ -0,0 +1,173 @@ +# Faster RCNN with Resnet 50 v1 FPN feature extractor. +# See Lin et al, https://arxiv.org/abs/1612.03144 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 31.4 mAP on COCO17 Val + +model { + faster_rcnn { + num_classes: 90 + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + feature_extractor { + type: 'faster_rcnn_resnet50_fpn_keras' + batch_norm_trainable: true + fpn { + min_level: 2 + max_level: 6 + } + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + first_stage_anchor_generator { + multiscale_anchor_generator { + min_level: 2 + max_level: 6 + # According to the origial paper the value should be 8.0 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + # According to the original paper the value should be 1 + scales_per_octave: 2 + normalize_coordinates: false + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + # According to the origial paper, value should be 7. + initial_crop_size: 14 + maxpool_kernel_size: 2 + maxpool_stride: 2 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 300 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + use_static_shapes: true + use_matmul_crop_and_resize: true + clip_anchors_to_image: true + use_static_balanced_label_sampler: true + use_matmul_gather_in_matcher: true + } +} + +train_config: { + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 25000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 0.04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } + + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + use_bfloat16: true +} + +train_input_reader: { + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..974c1d1710b15ce639825cc57f625756cbed7134 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8.config @@ -0,0 +1,160 @@ +# Mask R-CNN with Inception Resnet v2 (no atrous) +# Sync-trained on COCO (with 8 GPUs) with batch size 16 (1024x1024 resolution) +# Initialized from Imagenet classification checkpoint +# +# Train on GPU-8 +# +# Achieves 40.4 box mAP and 35.5 mask mAP on COCO17 val + +model { + faster_rcnn { + number_of_stages: 3 + num_classes: 90 + image_resizer { + fixed_shape_resizer { + height: 1024 + width: 1024 + } + } + feature_extractor { + type: 'faster_rcnn_inception_resnet_v2_keras' + } + first_stage_anchor_generator { + grid_anchor_generator { + scales: [0.25, 0.5, 1.0, 2.0] + aspect_ratios: [0.5, 1.0, 2.0] + height_stride: 16 + width_stride: 16 + } + } + first_stage_box_predictor_conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + first_stage_nms_score_threshold: 0.0 + first_stage_nms_iou_threshold: 0.7 + first_stage_max_proposals: 300 + first_stage_localization_loss_weight: 2.0 + first_stage_objectness_loss_weight: 1.0 + initial_crop_size: 17 + maxpool_kernel_size: 1 + maxpool_stride: 1 + second_stage_box_predictor { + mask_rcnn_box_predictor { + use_dropout: false + dropout_keep_probability: 1.0 + fc_hyperparams { + op: FC + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + mask_height: 33 + mask_width: 33 + mask_prediction_conv_depth: 0 + mask_prediction_num_conv_layers: 4 + conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + predict_instance_masks: true + } + } + second_stage_post_processing { + batch_non_max_suppression { + score_threshold: 0.0 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SOFTMAX + } + second_stage_localization_loss_weight: 2.0 + second_stage_classification_loss_weight: 1.0 + second_stage_mask_prediction_loss_weight: 4.0 + resize_masks: false + } +} + +train_config: { + batch_size: 16 + num_steps: 200000 + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 0.008 + total_steps: 200000 + warmup_learning_rate: 0.0 + warmup_steps: 5000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + gradient_clipping_by_norm: 10.0 + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/inception_resnet_v2.ckpt-1" + fine_tune_checkpoint_type: "classification" + data_augmentation_options { + random_horizontal_flip { + } + } +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + load_instance_masks: true + mask_type: PNG_MASKS +} + +eval_config: { + metrics_set: "coco_detection_metrics" + metrics_set: "coco_mask_metrics" + eval_instance_masks: true + use_moving_averages: false + batch_size: 1 + include_metrics_per_category: true +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + load_instance_masks: true + mask_type: PNG_MASKS +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d0_512x512_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d0_512x512_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..ffcd461f77f4a1c6d0eb5335032f6b11d711da99 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d0_512x512_coco17_tpu-8.config @@ -0,0 +1,199 @@ + # SSD with EfficientNet-b0 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d0). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b0 checkpoint. +# +# Train on TPU-8 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 64 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 3 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b0_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 3 + num_filters: 64 + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 512 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d1_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d1_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..5eacfeda854606ed8cddc7c6653134fce39a17ca --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d1_640x640_coco17_tpu-8.config @@ -0,0 +1,199 @@ + # SSD with EfficientNet-b1 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d1). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b1 checkpoint. +# +# Train on TPU-8 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 88 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 3 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b1_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 4 + num_filters: 88 + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 640 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d2_768x768_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d2_768x768_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..d2ca75d468c9f062150b70800f911c458f85615a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d2_768x768_coco17_tpu-8.config @@ -0,0 +1,199 @@ + # SSD with EfficientNet-b2 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d2). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b2 checkpoint. +# +# Train on TPU-8 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 768 + max_dimension: 768 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 112 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 3 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b2_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 5 + num_filters: 112 + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 768 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d3_896x896_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d3_896x896_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..b072d13a89fbb901eaeca2dbe7505cb77ac51bb5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d3_896x896_coco17_tpu-32.config @@ -0,0 +1,199 @@ + # SSD with EfficientNet-b3 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d3). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b3 checkpoint. +# +# Train on TPU-32 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 896 + max_dimension: 896 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 160 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b3_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 6 + num_filters: 160 + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 896 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d4_1024x1024_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d4_1024x1024_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..b13b2d46974baa6c87f39ccb26a6070d5f1d7158 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d4_1024x1024_coco17_tpu-32.config @@ -0,0 +1,199 @@ + # SSD with EfficientNet-b4 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d4). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b4 checkpoint. +# +# Train on TPU-32 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1024 + max_dimension: 1024 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 224 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b4_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 7 + num_filters: 224 + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 1024 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d5_1280x1280_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d5_1280x1280_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..bcb33d50300dadc8e8381d60646632714dfde3f2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d5_1280x1280_coco17_tpu-32.config @@ -0,0 +1,199 @@ + # SSD with EfficientNet-b5 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d5). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b5 checkpoint. +# +# Train on TPU-32 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1280 + max_dimension: 1280 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 288 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b5_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 7 + num_filters: 288 + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 1280 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d6_1408x1408_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d6_1408x1408_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..1f24607431c3fdc1d008e8f05fe18a1ef527b7ab --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d6_1408x1408_coco17_tpu-32.config @@ -0,0 +1,201 @@ + # SSD with EfficientNet-b6 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d6). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b6 checkpoint. +# +# Train on TPU-32 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1408 + max_dimension: 1408 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 384 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 5 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b6_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 8 + num_filters: 384 + # Use unweighted sum for stability. + combine_method: 'sum' + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 1408 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d7_1536x1536_coco17_tpu-32.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d7_1536x1536_coco17_tpu-32.config new file mode 100644 index 0000000000000000000000000000000000000000..81954aa8bdd9509d0835b7abac8f593c909bceb8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_efficientdet_d7_1536x1536_coco17_tpu-32.config @@ -0,0 +1,201 @@ + # SSD with EfficientNet-b6 + BiFPN feature extractor, +# shared box predictor and focal loss (a.k.a EfficientDet-d7). +# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070 +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from an EfficientNet-b6 checkpoint. +# +# Train on TPU-32 + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + add_background_class: false + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 3 + } + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1536 + max_dimension: 1536 + pad_to_max_dimension: true + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 384 + class_prediction_bias_init: -4.6 + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true + decay: 0.99 + epsilon: 0.001 + } + } + num_layers_before_predictor: 5 + kernel_size: 3 + use_depthwise: true + } + } + feature_extractor { + type: 'ssd_efficientnet-b6_bifpn_keras' + bifpn { + min_level: 3 + max_level: 7 + num_iterations: 8 + num_filters: 384 + # Use unweighted sum for stability. + combine_method: 'sum' + } + conv_hyperparams { + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + } + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 1.5 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.5 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/ckpt-0" + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 300000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_scale_crop_and_pad_to_square { + output_size: 1536 + scale_min: 0.1 + scale_max: 2.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 8e-2 + total_steps: 300000 + warmup_learning_rate: .001 + warmup_steps: 2500 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BEE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..3cfe304f171061778587c8aaf59add2df5bb746e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Mobilenet v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 29.1 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_mobilenet_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/mobilenet_v1.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 25000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + batch_size: 1; +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_320x320_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_320x320_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..dc3a4a7f3e70e59f98dc108e6d7f0240881f928d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_320x320_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Mobilenet v2 +# Trained on COCO17, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 22.2 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + ssd_anchor_generator { + num_layers: 6 + min_scale: 0.2 + max_scale: 0.95 + aspect_ratios: 1.0 + aspect_ratios: 2.0 + aspect_ratios: 0.5 + aspect_ratios: 3.0 + aspect_ratios: 0.3333 + } + } + image_resizer { + fixed_shape_resizer { + height: 300 + width: 300 + } + } + box_predictor { + convolutional_box_predictor { + min_depth: 0 + max_depth: 0 + num_layers_before_predictor: 0 + use_dropout: false + dropout_keep_probability: 0.8 + kernel_size: 1 + box_code_size: 4 + apply_sigmoid_to_scores: false + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + train: true, + scale: true, + center: true, + decay: 0.97, + epsilon: 0.001, + } + } + } + } + feature_extractor { + type: 'ssd_mobilenet_v2_keras' + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + train: true, + scale: true, + center: true, + decay: 0.97, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.75, + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + delta: 1.0 + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/mobilenet_v2.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 512 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 50000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + ssd_random_crop { + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .8 + total_steps: 50000 + warmup_learning_rate: 0.13333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..656e324c5d99db860002bbb78db3ba7505d21a6b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config @@ -0,0 +1,201 @@ +# SSD with Mobilenet v2 FPN-lite (go/fpn-lite) feature extractor, shared box +# predictor and focal loss (a mobile version of Retinanet). +# Retinanet: see Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 22.2 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 320 + width: 320 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 128 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + share_prediction_tower: true + use_depthwise: true + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_mobilenet_v2_fpn_keras' + use_depthwise: true + fpn { + min_level: 3 + max_level: 7 + additional_layer_depth: 128 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/mobilenet_v2.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 50000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .08 + total_steps: 50000 + warmup_learning_rate: .026666 + warmup_steps: 1000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..5e4bca1688c678585ce075b75c489ca900627e5e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.config @@ -0,0 +1,201 @@ +# SSD with Mobilenet v2 FPN-lite (go/fpn-lite) feature extractor, shared box +# predictor and focal loss (a mobile version of Retinanet). +# Retinanet: see Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 28.2 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 128 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + share_prediction_tower: true + use_depthwise: true + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_mobilenet_v2_fpn_keras' + use_depthwise: true + fpn { + min_level: 3 + max_level: 7 + additional_layer_depth: 128 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/mobilenet_v2.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 128 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + num_steps: 50000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .08 + total_steps: 50000 + warmup_learning_rate: .026666 + warmup_steps: 1000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..015617ba444c940d8e91f7f0baeb763f2688a56e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Resnet 101 v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 39.5 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 1024 + width: 1024 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_resnet101_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 100000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 100000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..37e9b9b632cad937e1917732dcbda046dbc84770 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Resnet 101 v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 35.4 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_resnet101_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet101.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 25000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..9dbc06e3d72bbc8170b450f1b03b5d25a1b4c64f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Resnet 152 v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 39.6 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 1024 + width: 1024 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_resnet152_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 100000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 100000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..aa99f0a115e6a4fa29cfc6a4d8dbaa331dbe5173 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Resnet 152 v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 35.6 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_resnet152_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet152.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 25000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..e1575a002992fe8ae7748a482de8eb192871a92b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Resnet 50 v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 38.3 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 1024 + width: 1024 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_resnet50_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 100000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 100000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config new file mode 100644 index 0000000000000000000000000000000000000000..7164144b7305d47c7f5915d0d25979b89260eee7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config @@ -0,0 +1,197 @@ +# SSD with Resnet 50 v1 FPN feature extractor, shared box predictor and focal +# loss (a.k.a Retinanet). +# See Lin et al, https://arxiv.org/abs/1708.02002 +# Trained on COCO, initialized from Imagenet classification checkpoint +# Train on TPU-8 +# +# Achieves 34.3 mAP on COCO17 Val + +model { + ssd { + inplace_batchnorm_update: true + freeze_batchnorm: false + num_classes: 90 + box_coder { + faster_rcnn_box_coder { + y_scale: 10.0 + x_scale: 10.0 + height_scale: 5.0 + width_scale: 5.0 + } + } + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + similarity_calculator { + iou_similarity { + } + } + encode_background_as_zeros: true + anchor_generator { + multiscale_anchor_generator { + min_level: 3 + max_level: 7 + anchor_scale: 4.0 + aspect_ratios: [1.0, 2.0, 0.5] + scales_per_octave: 2 + } + } + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + box_predictor { + weight_shared_convolutional_box_predictor { + depth: 256 + class_prediction_bias_init: -4.6 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + num_layers_before_predictor: 4 + kernel_size: 3 + } + } + feature_extractor { + type: 'ssd_resnet50_v1_fpn_keras' + fpn { + min_level: 3 + max_level: 7 + } + min_depth: 16 + depth_multiplier: 1.0 + conv_hyperparams { + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + } + override_base_feature_extractor_hyperparams: true + } + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + normalize_loss_by_num_matches: true + normalize_loc_loss_by_codesize: true + post_processing { + batch_non_max_suppression { + score_threshold: 1e-8 + iou_threshold: 0.6 + max_detections_per_class: 100 + max_total_detections: 100 + } + score_converter: SIGMOID + } + } +} + +train_config: { + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/resnet50.ckpt-1" + fine_tune_checkpoint_type: "classification" + batch_size: 64 + sync_replicas: true + startup_delay_steps: 0 + replicas_to_aggregate: 8 + use_bfloat16: true + num_steps: 25000 + data_augmentation_options { + random_horizontal_flip { + } + } + data_augmentation_options { + random_crop_image { + min_object_covered: 0.0 + min_aspect_ratio: 0.75 + max_aspect_ratio: 3.0 + min_area: 0.75 + max_area: 1.0 + overlap_thresh: 0.0 + } + } + optimizer { + momentum_optimizer: { + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__init__.py @@ -0,0 +1 @@ + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20854c3b3c67afa5b96c6f828feef4f60a47e938 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66867758df2f028f36c677546dd97f4871217da9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/anchor_generator.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/anchor_generator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d9ef469703952a49e04c9a51acc3a1418fb7b2a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/anchor_generator.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/balanced_positive_negative_sampler.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/balanced_positive_negative_sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70874f387dd10b29bb95f809a27c8d4ee4b26abb Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/balanced_positive_negative_sampler.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/batcher.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/batcher.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f132b62876cc91b9c24eb6b3895266a3eab27b7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/batcher.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_coder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_coder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ebf81f264b0e42ccacecfed8e0e8a6c0bbaf60 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_coder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_list.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_list.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8933b1d5f5a1587c47b48801adb406275c37e72e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_list.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_list_ops.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_list_ops.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de80d3d289a8a3eee31376656a43d95313099d72 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_list_ops.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5603ac54bee5a61f574bba63c5d6c7b6763962da Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/data_decoder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/data_decoder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a167fe6477cc6a7bc83ce073f5697302f5673fe8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/data_decoder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/densepose_ops.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/densepose_ops.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7d1af00201413908ca4c7b99e9829bb246880f9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/densepose_ops.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/freezable_batch_norm.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/freezable_batch_norm.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dac4eaf460bd314e7a23b0a88ef5f09eda31cd4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/freezable_batch_norm.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/keypoint_ops.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/keypoint_ops.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7351571610cf33e085d01040ba3be00c7f27f8be Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/keypoint_ops.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/losses.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/losses.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afdadb0fce427785f94fb013affa6c86d9394fa2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/losses.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/matcher.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/matcher.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd778ce5302b3747291976ca3e18f3fe445b0942 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/matcher.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/minibatch_sampler.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/minibatch_sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03ceae7ff21b9e36746e2c51a8a2b6bac558fae9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/minibatch_sampler.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/model.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b0e09405d0e799d022464517ba6011445b94695 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/model.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/post_processing.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/post_processing.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39bef890122689e7a6ac71eeff8e120849db7952 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/post_processing.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/prefetcher.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/prefetcher.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0c15de4663a0000b90487c1956bf6b929c068f8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/prefetcher.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/preprocessor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/preprocessor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3b5b73262a41591482c8847b0f3e8ee173370c8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/preprocessor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/preprocessor_cache.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/preprocessor_cache.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24f79a5efd0ad2b55f0e16e36f631d31d5f52ed6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/preprocessor_cache.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/region_similarity_calculator.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/region_similarity_calculator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..577a34254eb7d28f9df8f8cf3d8395ad89c3915a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/region_similarity_calculator.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/standard_fields.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/standard_fields.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71f119f0c1e4265944d978796d1894d9a18c7c91 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/standard_fields.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/target_assigner.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/target_assigner.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6266007f7559d2e5e5027964b710b0010b8fe6d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/__pycache__/target_assigner.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/anchor_generator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..69e29d84db8817c79f00f4fdf4ee4aa14b9828a1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/anchor_generator.py @@ -0,0 +1,171 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base anchor generator. + +The job of the anchor generator is to create (or load) a collection +of bounding boxes to be used as anchors. + +Generated anchors are assumed to match some convolutional grid or list of grid +shapes. For example, we might want to generate anchors matching an 8x8 +feature map and a 4x4 feature map. If we place 3 anchors per grid location +on the first feature map and 6 anchors per grid location on the second feature +map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total. + +To support fully convolutional settings, feature map shapes are passed +dynamically at generation time. The number of anchors to place at each location +is static --- implementations of AnchorGenerator must always be able return +the number of anchors that it uses per location for each feature map. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod + +import six +from six.moves import zip +import tensorflow.compat.v1 as tf + + +class AnchorGenerator(six.with_metaclass(ABCMeta, object)): + """Abstract base class for anchor generators.""" + + @abstractmethod + def name_scope(self): + """Name scope. + + Must be defined by implementations. + + Returns: + a string representing the name scope of the anchor generation operation. + """ + pass + + @property + def check_num_anchors(self): + """Whether to dynamically check the number of anchors generated. + + Can be overridden by implementations that would like to disable this + behavior. + + Returns: + a boolean controlling whether the Generate function should dynamically + check the number of anchors generated against the mathematically + expected number of anchors. + """ + return True + + @abstractmethod + def num_anchors_per_location(self): + """Returns the number of anchors per spatial location. + + Returns: + a list of integers, one for each expected feature map to be passed to + the `generate` function. + """ + pass + + def generate(self, feature_map_shape_list, **params): + """Generates a collection of bounding boxes to be used as anchors. + + TODO(rathodv): remove **params from argument list and make stride and + offsets (for multiple_grid_anchor_generator) constructor arguments. + + Args: + feature_map_shape_list: list of (height, width) pairs in the format + [(height_0, width_0), (height_1, width_1), ...] that the generated + anchors must align with. Pairs can be provided as 1-dimensional + integer tensors of length 2 or simply as tuples of integers. + **params: parameters for anchor generation op + + Returns: + boxes_list: a list of BoxLists each holding anchor boxes corresponding to + the input feature map shapes. + + Raises: + ValueError: if the number of feature map shapes does not match the length + of NumAnchorsPerLocation. + """ + if self.check_num_anchors and ( + len(feature_map_shape_list) != len(self.num_anchors_per_location())): + raise ValueError('Number of feature maps is expected to equal the length ' + 'of `num_anchors_per_location`.') + with tf.name_scope(self.name_scope()): + anchors_list = self._generate(feature_map_shape_list, **params) + if self.check_num_anchors: + with tf.control_dependencies([ + self._assert_correct_number_of_anchors( + anchors_list, feature_map_shape_list)]): + for item in anchors_list: + item.set(tf.identity(item.get())) + return anchors_list + + @abstractmethod + def _generate(self, feature_map_shape_list, **params): + """To be overridden by implementations. + + Args: + feature_map_shape_list: list of (height, width) pairs in the format + [(height_0, width_0), (height_1, width_1), ...] that the generated + anchors must align with. + **params: parameters for anchor generation op + + Returns: + boxes_list: a list of BoxList, each holding a collection of N anchor + boxes. + """ + pass + + def anchor_index_to_feature_map_index(self, boxlist_list): + """Returns a 1-D array of feature map indices for each anchor. + + Args: + boxlist_list: a list of Boxlist, each holding a collection of N anchor + boxes. This list is produced in self.generate(). + + Returns: + A [num_anchors] integer array, where each element indicates which feature + map index the anchor belongs to. + """ + feature_map_indices_list = [] + for i, boxes in enumerate(boxlist_list): + feature_map_indices_list.append( + i * tf.ones([boxes.num_boxes()], dtype=tf.int32)) + return tf.concat(feature_map_indices_list, axis=0) + + def _assert_correct_number_of_anchors(self, anchors_list, + feature_map_shape_list): + """Assert that correct number of anchors was generated. + + Args: + anchors_list: A list of box_list.BoxList object holding anchors generated. + feature_map_shape_list: list of (height, width) pairs in the format + [(height_0, width_0), (height_1, width_1), ...] that the generated + anchors must align with. + Returns: + Op that raises InvalidArgumentError if the number of anchors does not + match the number of expected anchors. + """ + expected_num_anchors = 0 + actual_num_anchors = 0 + for num_anchors_per_location, feature_map_shape, anchors in zip( + self.num_anchors_per_location(), feature_map_shape_list, anchors_list): + expected_num_anchors += (num_anchors_per_location + * feature_map_shape[0] + * feature_map_shape[1]) + actual_num_anchors += anchors.num_boxes() + return tf.assert_equal(expected_num_anchors, actual_num_anchors) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/anchor_generator.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/anchor_generator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..927cd2d75218551cab7cec1a17de0566af4b9843 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/anchor_generator.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..6e09537d20e449d587d0ac027e1348df294bfb4b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler.py @@ -0,0 +1,262 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class to subsample minibatches by balancing positives and negatives. + +Subsamples minibatches based on a pre-specified positive fraction in range +[0,1]. The class presumes there are many more negatives than positive examples: +if the desired batch_size cannot be achieved with the pre-specified positive +fraction, it fills the rest with negative examples. If this is not sufficient +for obtaining the desired batch_size, it returns fewer examples. + +The main function to call is Subsample(self, indicator, labels). For convenience +one can also call SubsampleWeights(self, weights, labels) which is defined in +the minibatch_sampler base class. + +When is_static is True, it implements a method that guarantees static shapes. +It also ensures the length of output of the subsample is always batch_size, even +when number of examples set to True in indicator is less than batch_size. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.core import minibatch_sampler + + +class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): + """Subsamples minibatches to a desired balance of positives and negatives.""" + + def __init__(self, positive_fraction=0.5, is_static=False): + """Constructs a minibatch sampler. + + Args: + positive_fraction: desired fraction of positive examples (scalar in [0,1]) + in the batch. + is_static: If True, uses an implementation with static shape guarantees. + + Raises: + ValueError: if positive_fraction < 0, or positive_fraction > 1 + """ + if positive_fraction < 0 or positive_fraction > 1: + raise ValueError('positive_fraction should be in range [0,1]. ' + 'Received: %s.' % positive_fraction) + self._positive_fraction = positive_fraction + self._is_static = is_static + + def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): + """Counts the number of positives and negatives numbers to be sampled. + + Args: + sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains + the signed indices of the examples where the sign is based on the label + value. The examples that cannot be sampled are set to 0. It samples + atmost sample_size*positive_fraction positive examples and remaining + from negative examples. + sample_size: Size of subsamples. + + Returns: + A tuple containing the number of positive and negative labels in the + subsample. + """ + input_length = tf.shape(sorted_indices_tensor)[0] + valid_positive_index = tf.greater(sorted_indices_tensor, + tf.zeros(input_length, tf.int32)) + num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32)) + max_num_positive_samples = tf.constant( + int(sample_size * self._positive_fraction), tf.int32) + num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) + num_negative_samples = tf.constant(sample_size, + tf.int32) - num_positive_samples + + return num_positive_samples, num_negative_samples + + def _get_values_from_start_and_end(self, input_tensor, num_start_samples, + num_end_samples, total_num_samples): + """slices num_start_samples and last num_end_samples from input_tensor. + + Args: + input_tensor: An int32 tensor of shape [N] to be sliced. + num_start_samples: Number of examples to be sliced from the beginning + of the input tensor. + num_end_samples: Number of examples to be sliced from the end of the + input tensor. + total_num_samples: Sum of is num_start_samples and num_end_samples. This + should be a scalar. + + Returns: + A tensor containing the first num_start_samples and last num_end_samples + from input_tensor. + + """ + input_length = tf.shape(input_tensor)[0] + start_positions = tf.less(tf.range(input_length), num_start_samples) + end_positions = tf.greater_equal( + tf.range(input_length), input_length - num_end_samples) + selected_positions = tf.logical_or(start_positions, end_positions) + selected_positions = tf.cast(selected_positions, tf.float32) + indexed_positions = tf.multiply(tf.cumsum(selected_positions), + selected_positions) + one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, + total_num_samples, + dtype=tf.float32) + return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), + one_hot_selector, axes=[0, 0]), tf.int32) + + def _static_subsample(self, indicator, batch_size, labels): + """Returns subsampled minibatch. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + N should be a complie time constant. + batch_size: desired batch size. This scalar cannot be None. + labels: boolean tensor of shape [N] denoting positive(=True) and negative + (=False) examples. N should be a complie time constant. + + Returns: + sampled_idx_indicator: boolean tensor of shape [N], True for entries which + are sampled. It ensures the length of output of the subsample is always + batch_size, even when number of examples set to True in indicator is + less than batch_size. + + Raises: + ValueError: if labels and indicator are not 1D boolean tensors. + """ + # Check if indicator and labels have a static size. + if not indicator.shape.is_fully_defined(): + raise ValueError('indicator must be static in shape when is_static is' + 'True') + if not labels.shape.is_fully_defined(): + raise ValueError('labels must be static in shape when is_static is' + 'True') + if not isinstance(batch_size, int): + raise ValueError('batch_size has to be an integer when is_static is' + 'True.') + + input_length = tf.shape(indicator)[0] + + # Set the number of examples set True in indicator to be at least + # batch_size. + num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32)) + additional_false_sample = tf.less_equal( + tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), + batch_size - num_true_sampled) + indicator = tf.logical_or(indicator, additional_false_sample) + + # Shuffle indicator and label. Need to store the permutation to restore the + # order post sampling. + permutation = tf.random_shuffle(tf.range(input_length)) + indicator = tf.gather(indicator, permutation, axis=0) + labels = tf.gather(labels, permutation, axis=0) + + # index (starting from 1) when indicator is True, 0 when False + indicator_idx = tf.where( + indicator, tf.range(1, input_length + 1), + tf.zeros(input_length, tf.int32)) + + # Replace -1 for negative, +1 for positive labels + signed_label = tf.where( + labels, tf.ones(input_length, tf.int32), + tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) + # negative of index for negative label, positive index for positive label, + # 0 when indicator is False. + signed_indicator_idx = tf.multiply(indicator_idx, signed_label) + sorted_signed_indicator_idx = tf.nn.top_k( + signed_indicator_idx, input_length, sorted=True).values + + [num_positive_samples, + num_negative_samples] = self._get_num_pos_neg_samples( + sorted_signed_indicator_idx, batch_size) + + sampled_idx = self._get_values_from_start_and_end( + sorted_signed_indicator_idx, num_positive_samples, + num_negative_samples, batch_size) + + # Shift the indices to start from 0 and remove any samples that are set as + # False. + sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) + sampled_idx = tf.multiply( + tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), + sampled_idx) + + sampled_idx_indicator = tf.cast(tf.reduce_sum( + tf.one_hot(sampled_idx, depth=input_length), + axis=0), tf.bool) + + # project back the order based on stored permutations + idx_indicator = tf.scatter_nd( + tf.expand_dims(permutation, -1), sampled_idx_indicator, + shape=(input_length,)) + return idx_indicator + + def subsample(self, indicator, batch_size, labels, scope=None): + """Returns subsampled minibatch. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + batch_size: desired batch size. If None, keeps all positive samples and + randomly selects negative samples so that the positive sample fraction + matches self._positive_fraction. It cannot be None is is_static is True. + labels: boolean tensor of shape [N] denoting positive(=True) and negative + (=False) examples. + scope: name scope. + + Returns: + sampled_idx_indicator: boolean tensor of shape [N], True for entries which + are sampled. + + Raises: + ValueError: if labels and indicator are not 1D boolean tensors. + """ + if len(indicator.get_shape().as_list()) != 1: + raise ValueError('indicator must be 1 dimensional, got a tensor of ' + 'shape %s' % indicator.get_shape()) + if len(labels.get_shape().as_list()) != 1: + raise ValueError('labels must be 1 dimensional, got a tensor of ' + 'shape %s' % labels.get_shape()) + if labels.dtype != tf.bool: + raise ValueError('labels should be of type bool. Received: %s' % + labels.dtype) + if indicator.dtype != tf.bool: + raise ValueError('indicator should be of type bool. Received: %s' % + indicator.dtype) + with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'): + if self._is_static: + return self._static_subsample(indicator, batch_size, labels) + + else: + # Only sample from indicated samples + negative_idx = tf.logical_not(labels) + positive_idx = tf.logical_and(labels, indicator) + negative_idx = tf.logical_and(negative_idx, indicator) + + # Sample positive and negative samples separately + if batch_size is None: + max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) + else: + max_num_pos = int(self._positive_fraction * batch_size) + sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) + num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) + if batch_size is None: + negative_positive_ratio = ( + 1 - self._positive_fraction) / self._positive_fraction + max_num_neg = tf.cast( + negative_positive_ratio * + tf.cast(num_sampled_pos, dtype=tf.float32), + dtype=tf.int32) + else: + max_num_neg = batch_size - num_sampled_pos + sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) + + return tf.logical_or(sampled_pos_idx, sampled_neg_idx) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03838230390e344dd6623e107b2a3a627d521150 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler_test.py new file mode 100644 index 0000000000000000000000000000000000000000..10b8ca740448c776b3ed8c642ba5722776175e5e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/balanced_positive_negative_sampler_test.py @@ -0,0 +1,212 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.balanced_positive_negative_sampler.""" + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import balanced_positive_negative_sampler +from object_detection.utils import test_case + + +class BalancedPositiveNegativeSamplerTest(test_case.TestCase): + + def test_subsample_all_examples(self): + if self.has_tpu(): return + numpy_labels = np.random.permutation(300) + indicator = np.array(np.ones(300) == 1, np.bool) + numpy_labels = (numpy_labels - 200) > 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 32) + + def test_subsample_all_examples_static(self): + if not self.has_tpu(): return + numpy_labels = np.random.permutation(300) + indicator = np.array(np.ones(300) == 1, np.bool) + numpy_labels = (numpy_labels - 200) > 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + is_static=True)) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 32) + + def test_subsample_selection(self): + if self.has_tpu(): return + # Test random sampling when only some examples can be sampled: + # 100 samples, 20 positives, 10 positives cannot be sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 90 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 80) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 54) + self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) + + def test_subsample_selection_static(self): + if not self.has_tpu(): return + # Test random sampling when only some examples can be sampled: + # 100 samples, 20 positives, 10 positives cannot be sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 90 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 80) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + is_static=True)) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled)), 54) + self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) + + def test_subsample_selection_larger_batch_size(self): + if self.has_tpu(): return + # Test random sampling when total number of examples that can be sampled are + # less than batch size: + # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. + # It should still return 64 samples, with 4 of them that couldn't have been + # sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 60 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 50) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 60) + self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertGreaterEqual( + sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50) + self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60) + + def test_subsample_selection_larger_batch_size_static(self): + if not self.has_tpu(): return + # Test random sampling when total number of examples that can be sampled are + # less than batch size: + # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. + # It should still return 64 samples, with 4 of them that couldn't have been + # sampled. + numpy_labels = np.arange(100) + numpy_indicator = numpy_labels < 60 + indicator = np.array(numpy_indicator, np.bool) + numpy_labels = (numpy_labels - 50) >= 0 + + labels = np.array(numpy_labels, np.bool) + + def graph_fn(indicator, labels): + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( + is_static=True)) + return sampler.subsample(indicator, 64, labels) + + is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) + self.assertEqual(sum(is_sampled), 64) + self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) + self.assertGreaterEqual( + sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50) + self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60) + + def test_subsample_selection_no_batch_size(self): + if self.has_tpu(): return + # Test random sampling when only some examples can be sampled: + # 1000 samples, 6 positives (5 can be sampled). + numpy_labels = np.arange(1000) + numpy_indicator = numpy_labels < 999 + numpy_labels = (numpy_labels - 994) >= 0 + + def graph_fn(indicator, labels): + sampler = (balanced_positive_negative_sampler. + BalancedPositiveNegativeSampler(0.01)) + is_sampled = sampler.subsample(indicator, None, labels) + return is_sampled + is_sampled_out = self.execute_cpu(graph_fn, [numpy_indicator, numpy_labels]) + self.assertEqual(sum(is_sampled_out), 500) + self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled_out)), 5) + self.assertEqual(sum(np.logical_and( + np.logical_not(numpy_labels), is_sampled_out)), 495) + self.assertAllEqual(is_sampled_out, np.logical_and(is_sampled_out, + numpy_indicator)) + + def test_subsample_selection_no_batch_size_static(self): + labels = tf.constant([[True, False, False]]) + indicator = tf.constant([True, False, True]) + sampler = ( + balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) + with self.assertRaises(ValueError): + sampler.subsample(indicator, None, labels) + + def test_raises_error_with_incorrect_label_shape(self): + labels = tf.constant([[True, False, False]]) + indicator = tf.constant([True, False, True]) + sampler = (balanced_positive_negative_sampler. + BalancedPositiveNegativeSampler()) + with self.assertRaises(ValueError): + sampler.subsample(indicator, 64, labels) + + def test_raises_error_with_incorrect_indicator_shape(self): + labels = tf.constant([True, False, False]) + indicator = tf.constant([[True, False, True]]) + sampler = (balanced_positive_negative_sampler. + BalancedPositiveNegativeSampler()) + with self.assertRaises(ValueError): + sampler.subsample(indicator, 64, labels) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batch_multiclass_nms_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batch_multiclass_nms_test.py new file mode 100644 index 0000000000000000000000000000000000000000..06f17103b2b6bd7df5d449a270f0bddfd3514249 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batch_multiclass_nms_test.py @@ -0,0 +1,686 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for google3.third_party.tensorflow_models.object_detection.core.batch_multiclass_nms.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from absl.testing import parameterized +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf +from object_detection.core import post_processing +from object_detection.utils import test_case + + +class BatchMulticlassNonMaxSuppressionTest(test_case.TestCase, + parameterized.TestCase): + + def test_batch_multiclass_nms_with_batch_size_1(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]], + [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 999, 2, 1004], + [0, 100, 1, 101]]] + exp_nms_scores = [[.95, .9, .85, .3]] + exp_nms_classes = [[0, 0, 1, 0]] + def graph_fn(boxes, scores): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertEqual(num_detections, [4]) + + def test_batch_iou_with_negative_data(self): + def graph_fn(): + boxes = tf.constant([[[0, -0.01, 0.1, 1.1], [0, 0.2, 0.2, 5.0], + [0, -0.01, 0.1, 1.], [-1, -1, -1, -1]]], tf.float32) + iou = post_processing.batch_iou(boxes, boxes) + return iou + iou = self.execute_cpu(graph_fn, []) + expected_iou = [[[0.99999994, 0.0917431, 0.9099099, -1.], + [0.0917431, 1., 0.08154944, -1.], + [0.9099099, 0.08154944, 1., -1.], [-1., -1., -1., -1.]]] + self.assertAllClose(iou, expected_iou) + + @parameterized.parameters(False, True) + def test_batch_multiclass_nms_with_batch_size_2(self, use_dynamic_map_fn): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 999, 2, 1004], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.85, .5, .3, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [1, 0, 0, 0]]) + def graph_fn(boxes, scores): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size, + use_dynamic_map_fn=use_dynamic_map_fn) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), + exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), + exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), + exp_nms_classes.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [2, 3]) + + def test_batch_multiclass_nms_with_per_batch_clip_window(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + clip_window = np.array([0., 0., 200., 200.], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.5, .3, 0, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [0, 0, 0, 0]]) + def graph_fn(boxes, scores, clip_window): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + clip_window=clip_window) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), + exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), + exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), + exp_nms_classes.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [2, 2]) + + def test_batch_multiclass_nms_with_per_image_clip_window(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + clip_window = np.array([[0., 0., 5., 5.], + [0., 0., 200., 200.]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.9, 0., 0., 0.], + [.5, .3, 0, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [0, 0, 0, 0]]) + + def graph_fn(boxes, scores, clip_window): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + clip_window=clip_window) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), + exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), + exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), + exp_nms_classes.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [1, 2]) + + def test_batch_multiclass_nms_with_masks(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]], + [[[2, 3], [4, 5]], [[3, 4], [5, 6]]], + [[[4, 5], [6, 7]], [[5, 6], [7, 8]]], + [[[6, 7], [8, 9]], [[7, 8], [9, 10]]]], + [[[[8, 9], [10, 11]], [[9, 10], [11, 12]]], + [[[10, 11], [12, 13]], [[11, 12], [13, 14]]], + [[[12, 13], [14, 15]], [[13, 14], [15, 16]]], + [[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 999, 2, 1004], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.85, .5, .3, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [1, 0, 0, 0]]) + exp_nms_masks = np.array([[[[6, 7], [8, 9]], + [[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[10, 11], [12, 13]], + [[0, 0], [0, 0]]]]) + + def graph_fn(boxes, scores, masks): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + masks=masks) + self.assertIsNone(nmsed_additional_fields) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) + self.assertAllEqual(nmsed_masks.shape.as_list(), exp_nms_masks.shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [2, 3]) + self.assertAllClose(nmsed_masks, exp_nms_masks) + + def test_batch_multiclass_nms_with_additional_fields(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + keypoints = np.array( + [[[[6, 7], [8, 9]], + [[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[10, 11], [12, 13]], + [[0, 0], [0, 0]]]], + np.float32) + size = np.array( + [[[[6], [8]], [[0], [2]], [[0], [0]], [[0], [0]]], + [[[13], [15]], [[8], [10]], [[10], [12]], [[0], [0]]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 999, 2, 1004], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.95, .9, 0, 0], + [.85, .5, .3, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [1, 0, 0, 0]]) + exp_nms_additional_fields = { + 'keypoints': np.array([[[[0, 0], [0, 0]], + [[6, 7], [8, 9]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[10, 11], [12, 13]], + [[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[0, 0], [0, 0]]]]) + } + exp_nms_additional_fields['size'] = np.array([[[[0], [0]], [[6], [8]], + [[0], [0]], [[0], [0]]], + [[[10], [12]], [[13], [15]], + [[8], [10]], [[0], [0]]]]) + + def graph_fn(boxes, scores, keypoints, size): + additional_fields = {'keypoints': keypoints, 'size': size} + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + additional_fields=additional_fields) + self.assertIsNone(nmsed_masks) + # Check static shapes + self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) + self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) + self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) + self.assertEqual(len(nmsed_additional_fields), + len(exp_nms_additional_fields)) + for key in exp_nms_additional_fields: + self.assertAllEqual(nmsed_additional_fields[key].shape.as_list(), + exp_nms_additional_fields[key].shape) + self.assertEqual(num_detections.shape.as_list(), [2]) + return (nmsed_boxes, nmsed_scores, nmsed_classes, + nmsed_additional_fields['keypoints'], + nmsed_additional_fields['size'], + num_detections) + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints, + size]) + + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(nmsed_keypoints, + exp_nms_additional_fields['keypoints']) + self.assertAllClose(nmsed_size, + exp_nms_additional_fields['size']) + self.assertAllClose(num_detections, [2, 3]) + + def test_batch_multiclass_nms_with_masks_and_num_valid_boxes(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]], + [[[2, 3], [4, 5]], [[3, 4], [5, 6]]], + [[[4, 5], [6, 7]], [[5, 6], [7, 8]]], + [[[6, 7], [8, 9]], [[7, 8], [9, 10]]]], + [[[[8, 9], [10, 11]], [[9, 10], [11, 12]]], + [[[10, 11], [12, 13]], [[11, 12], [13, 14]]], + [[[12, 13], [14, 15]], [[13, 14], [15, 16]]], + [[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]], + np.float32) + num_valid_boxes = np.array([1, 1], np.int32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_nms_scores = [[.9, 0, 0, 0], + [.5, 0, 0, 0]] + exp_nms_classes = [[0, 0, 0, 0], + [0, 0, 0, 0]] + exp_nms_masks = [[[[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[8, 9], [10, 11]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]]] + + def graph_fn(boxes, scores, masks, num_valid_boxes): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + masks=masks, num_valid_boxes=num_valid_boxes) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks, + num_valid_boxes]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(num_detections, [1, 1]) + self.assertAllClose(nmsed_masks, exp_nms_masks) + + def test_batch_multiclass_nms_with_additional_fields_and_num_valid_boxes( + self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], np.float32) + keypoints = np.array( + [[[[6, 7], [8, 9]], + [[0, 1], [2, 3]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[8, 9], [10, 11]], + [[10, 11], [12, 13]], + [[0, 0], [0, 0]]]], + np.float32) + size = np.array( + [[[[7], [9]], [[1], [3]], [[0], [0]], [[0], [0]]], + [[[14], [16]], [[9], [11]], [[11], [13]], [[0], [0]]]], np.float32) + + num_valid_boxes = np.array([1, 1], np.int32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_nms_scores = [[.9, 0, 0, 0], + [.5, 0, 0, 0]] + exp_nms_classes = [[0, 0, 0, 0], + [0, 0, 0, 0]] + exp_nms_additional_fields = { + 'keypoints': np.array([[[[6, 7], [8, 9]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]], + [[[13, 14], [15, 16]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]]]) + } + + exp_nms_additional_fields['size'] = np.array([[[[7], [9]], [[0], [0]], + [[0], [0]], [[0], [0]]], + [[[14], [16]], [[0], [0]], + [[0], [0]], [[0], [0]]]]) + def graph_fn(boxes, scores, keypoints, size, num_valid_boxes): + additional_fields = {'keypoints': keypoints, 'size': size} + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + num_valid_boxes=num_valid_boxes, + additional_fields=additional_fields) + self.assertIsNone(nmsed_masks) + return (nmsed_boxes, nmsed_scores, nmsed_classes, + nmsed_additional_fields['keypoints'], + nmsed_additional_fields['size'], num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints, + size, num_valid_boxes]) + + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertAllClose(nmsed_keypoints, + exp_nms_additional_fields['keypoints']) + self.assertAllClose(nmsed_size, + exp_nms_additional_fields['size']) + self.assertAllClose(num_detections, [1, 1]) + + def test_combined_nms_with_batch_size_2(self): + """Test use_combined_nms.""" + boxes = np.array([[[[0, 0, 0.1, 0.1], [0, 0, 0.1, 0.1]], + [[0, 0.01, 1, 0.11], [0, 0.6, 0.1, 0.7]], + [[0, -0.01, 0.1, 0.09], [0, -0.1, 0.1, 0.09]], + [[0, 0.11, 0.1, 0.2], [0, 0.11, 0.1, 0.2]]], + [[[0, 0, 0.2, 0.2], [0, 0, 0.2, 0.2]], + [[0, 0.02, 0.2, 0.22], [0, 0.02, 0.2, 0.22]], + [[0, -0.02, 0.2, 0.19], [0, -0.02, 0.2, 0.19]], + [[0, 0.21, 0.2, 0.3], [0, 0.21, 0.2, 0.3]]]], + np.float32) + scores = np.array([[[.1, 0.9], [.75, 0.8], + [.6, 0.3], [0.95, 0.1]], + [[.1, 0.9], [.75, 0.8], + [.6, .3], [.95, .1]]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 3 + + exp_nms_corners = np.array([[[0, 0.11, 0.1, 0.2], + [0, 0, 0.1, 0.1], + [0, 0.6, 0.1, 0.7]], + [[0, 0.21, 0.2, 0.3], + [0, 0, 0.2, 0.2], + [0, 0.02, 0.2, 0.22]]]) + exp_nms_scores = np.array([[.95, .9, 0.8], + [.95, .9, .75]]) + exp_nms_classes = np.array([[0, 1, 1], + [0, 1, 0]]) + + def graph_fn(boxes, scores): + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, max_total_size=max_output_size, + use_static_shapes=True, + use_combined_nms=True) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertListEqual(num_detections.tolist(), [3, 3]) + + def test_batch_multiclass_nms_with_use_static_shapes(self): + boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], + [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], + [[0, 10, 1, 11], [0, 10, 1, 11]]], + [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], + np.float32) + scores = np.array([[[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0]], + [[.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]]], + np.float32) + clip_window = np.array([[0., 0., 5., 5.], + [0., 0., 200., 200.]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[[0, 0, 1, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 10.1, 1, 11.1], + [0, 100, 1, 101], + [0, 0, 0, 0], + [0, 0, 0, 0]]]) + exp_nms_scores = np.array([[.9, 0., 0., 0.], + [.5, .3, 0, 0]]) + exp_nms_classes = np.array([[0, 0, 0, 0], + [0, 0, 0, 0]]) + + def graph_fn(boxes, scores, clip_window): + (nmsed_boxes, nmsed_scores, nmsed_classes, _, _, num_detections + ) = post_processing.batch_multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, + max_size_per_class=max_output_size, clip_window=clip_window, + use_static_shapes=True) + return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections + + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute(graph_fn, [boxes, scores, clip_window]) + for i in range(len(num_detections)): + self.assertAllClose(nmsed_boxes[i, 0:num_detections[i]], + exp_nms_corners[i, 0:num_detections[i]]) + self.assertAllClose(nmsed_scores[i, 0:num_detections[i]], + exp_nms_scores[i, 0:num_detections[i]]) + self.assertAllClose(nmsed_classes[i, 0:num_detections[i]], + exp_nms_classes[i, 0:num_detections[i]]) + self.assertAllClose(num_detections, [1, 2]) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher.py new file mode 100644 index 0000000000000000000000000000000000000000..26832e30efa43a15436070e8676b1d020712a794 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher.py @@ -0,0 +1,141 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides functions to batch a dictionary of input tensors.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.core import prefetcher + +rt_shape_str = '_runtime_shapes' + + +class BatchQueue(object): + """BatchQueue class. + + This class creates a batch queue to asynchronously enqueue tensors_dict. + It also adds a FIFO prefetcher so that the batches are readily available + for the consumers. Dequeue ops for a BatchQueue object can be created via + the Dequeue method which evaluates to a batch of tensor_dict. + + Example input pipeline with batching: + ------------------------------------ + key, string_tensor = slim.parallel_reader.parallel_read(...) + tensor_dict = decoder.decode(string_tensor) + tensor_dict = preprocessor.preprocess(tensor_dict, ...) + batch_queue = batcher.BatchQueue(tensor_dict, + batch_size=32, + batch_queue_capacity=2000, + num_batch_queue_threads=8, + prefetch_queue_capacity=20) + tensor_dict = batch_queue.dequeue() + outputs = Model(tensor_dict) + ... + ----------------------------------- + + Notes: + ----- + This class batches tensors of unequal sizes by zero padding and unpadding + them after generating a batch. This can be computationally expensive when + batching tensors (such as images) that are of vastly different sizes. So it is + recommended that the shapes of such tensors be fully defined in tensor_dict + while other lightweight tensors such as bounding box corners and class labels + can be of varying sizes. Use either crop or resize operations to fully define + the shape of an image in tensor_dict. + + It is also recommended to perform any preprocessing operations on tensors + before passing to BatchQueue and subsequently calling the Dequeue method. + + Another caveat is that this class does not read the last batch if it is not + full. The current implementation makes it hard to support that use case. So, + for evaluation, when it is critical to run all the examples through your + network use the input pipeline example mentioned in core/prefetcher.py. + """ + + def __init__(self, tensor_dict, batch_size, batch_queue_capacity, + num_batch_queue_threads, prefetch_queue_capacity): + """Constructs a batch queue holding tensor_dict. + + Args: + tensor_dict: dictionary of tensors to batch. + batch_size: batch size. + batch_queue_capacity: max capacity of the queue from which the tensors are + batched. + num_batch_queue_threads: number of threads to use for batching. + prefetch_queue_capacity: max capacity of the queue used to prefetch + assembled batches. + """ + # Remember static shapes to set shapes of batched tensors. + static_shapes = collections.OrderedDict( + {key: tensor.get_shape() for key, tensor in tensor_dict.items()}) + # Remember runtime shapes to unpad tensors after batching. + runtime_shapes = collections.OrderedDict( + {(key + rt_shape_str): tf.shape(tensor) + for key, tensor in tensor_dict.items()}) + + all_tensors = tensor_dict + all_tensors.update(runtime_shapes) + batched_tensors = tf.train.batch( + all_tensors, + capacity=batch_queue_capacity, + batch_size=batch_size, + dynamic_pad=True, + num_threads=num_batch_queue_threads) + + self._queue = prefetcher.prefetch(batched_tensors, + prefetch_queue_capacity) + self._static_shapes = static_shapes + self._batch_size = batch_size + + def dequeue(self): + """Dequeues a batch of tensor_dict from the BatchQueue. + + TODO: use allow_smaller_final_batch to allow running over the whole eval set + + Returns: + A list of tensor_dicts of the requested batch_size. + """ + batched_tensors = self._queue.dequeue() + # Separate input tensors from tensors containing their runtime shapes. + tensors = {} + shapes = {} + for key, batched_tensor in batched_tensors.items(): + unbatched_tensor_list = tf.unstack(batched_tensor) + for i, unbatched_tensor in enumerate(unbatched_tensor_list): + if rt_shape_str in key: + shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor + else: + tensors[(key, i)] = unbatched_tensor + + # Undo that padding using shapes and create a list of size `batch_size` that + # contains tensor dictionaries. + tensor_dict_list = [] + batch_size = self._batch_size + for batch_id in range(batch_size): + tensor_dict = {} + for key in self._static_shapes: + tensor_dict[key] = tf.slice(tensors[(key, batch_id)], + tf.zeros_like(shapes[(key, batch_id)]), + shapes[(key, batch_id)]) + tensor_dict[key].set_shape(self._static_shapes[key]) + tensor_dict_list.append(tensor_dict) + + return tensor_dict_list diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c60a8a73fc4dcb89ccc27fc915715e59edfbbc Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1688b87cdf08bc29ddb2413776757066047c80da --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/batcher_tf1_test.py @@ -0,0 +1,165 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.batcher.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.core import batcher +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class BatcherTest(tf.test.TestCase): + + def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self): + with self.test_session() as sess: + batch_size = 3 + num_batches = 2 + examples = tf.Variable(tf.constant(2, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 2) + boxes = tf.tile( + tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)])) + batch_queue = batcher.BatchQueue( + tensor_dict={'boxes': boxes}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([None, 4], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 2 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1))) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions( + self): + with self.test_session() as sess: + batch_size = 3 + num_batches = 2 + examples = tf.Variable(tf.constant(2, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 2) + image = tf.reshape( + tf.range(counter * counter), tf.stack([counter, counter])) + batch_queue = batcher.BatchQueue( + tensor_dict={'image': image}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([None, None], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 2 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self): + with self.test_session() as sess: + batch_size = 3 + num_batches = 2 + examples = tf.Variable(tf.constant(1, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 1) + image = tf.reshape(tf.range(1, 13), [4, 3]) * counter + batch_queue = batcher.BatchQueue( + tensor_dict={'image': image}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([4, 3], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 1 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + def test_batcher_when_batch_size_is_one(self): + with self.test_session() as sess: + batch_size = 1 + num_batches = 2 + examples = tf.Variable(tf.constant(2, dtype=tf.int32)) + counter = examples.count_up_to(num_batches * batch_size + 2) + image = tf.reshape( + tf.range(counter * counter), tf.stack([counter, counter])) + batch_queue = batcher.BatchQueue( + tensor_dict={'image': image}, + batch_size=batch_size, + batch_queue_capacity=100, + num_batch_queue_threads=1, + prefetch_queue_capacity=100) + batch = batch_queue.dequeue() + + for tensor_dict in batch: + for tensor in tensor_dict.values(): + self.assertAllEqual([None, None], tensor.get_shape().as_list()) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + i = 2 + for _ in range(num_batches): + batch_np = sess.run(batch) + for tensor_dict in batch_np: + for tensor in tensor_dict.values(): + self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) + i += 1 + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(batch) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e54a44033f4f17dac0976132e1449ea3fedc3d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder.py @@ -0,0 +1,158 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base box coder. + +Box coders convert between coordinate frames, namely image-centric +(with (0,0) on the top left of image) and anchor-centric (with (0,0) being +defined by a specific anchor). + +Users of a BoxCoder can call two methods: + encode: which encodes a box with respect to a given anchor + (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and + decode: which inverts this encoding with a decode operation. +In both cases, the arguments are assumed to be in 1-1 correspondence already; +it is not the job of a BoxCoder to perform matching. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod +from abc import abstractproperty + +import six +import tensorflow.compat.v1 as tf + +from object_detection.utils import shape_utils + + +# Box coder types. +FASTER_RCNN = 'faster_rcnn' +KEYPOINT = 'keypoint' +MEAN_STDDEV = 'mean_stddev' +SQUARE = 'square' + + +class BoxCoder(six.with_metaclass(ABCMeta, object)): + """Abstract base class for box coder.""" + + @abstractproperty + def code_size(self): + """Return the size of each code. + + This number is a constant and should agree with the output of the `encode` + op (e.g. if rel_codes is the output of self.encode(...), then it should have + shape [N, code_size()]). This abstractproperty should be overridden by + implementations. + + Returns: + an integer constant + """ + pass + + def encode(self, boxes, anchors): + """Encode a box list relative to an anchor collection. + + Args: + boxes: BoxList holding N boxes to be encoded + anchors: BoxList of N anchors + + Returns: + a tensor representing N relative-encoded boxes + """ + with tf.name_scope('Encode'): + return self._encode(boxes, anchors) + + def decode(self, rel_codes, anchors): + """Decode boxes that are encoded relative to an anchor collection. + + Args: + rel_codes: a tensor representing N relative-encoded boxes + anchors: BoxList of anchors + + Returns: + boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., + with corners y_min, x_min, y_max, x_max) + """ + with tf.name_scope('Decode'): + return self._decode(rel_codes, anchors) + + @abstractmethod + def _encode(self, boxes, anchors): + """Method to be overriden by implementations. + + Args: + boxes: BoxList holding N boxes to be encoded + anchors: BoxList of N anchors + + Returns: + a tensor representing N relative-encoded boxes + """ + pass + + @abstractmethod + def _decode(self, rel_codes, anchors): + """Method to be overriden by implementations. + + Args: + rel_codes: a tensor representing N relative-encoded boxes + anchors: BoxList of anchors + + Returns: + boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., + with corners y_min, x_min, y_max, x_max) + """ + pass + + +def batch_decode(encoded_boxes, box_coder, anchors): + """Decode a batch of encoded boxes. + + This op takes a batch of encoded bounding boxes and transforms + them to a batch of bounding boxes specified by their corners in + the order of [y_min, x_min, y_max, x_max]. + + Args: + encoded_boxes: a float32 tensor of shape [batch_size, num_anchors, + code_size] representing the location of the objects. + box_coder: a BoxCoder object. + anchors: a BoxList of anchors used to encode `encoded_boxes`. + + Returns: + decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, + coder_size] representing the corners of the objects in the order + of [y_min, x_min, y_max, x_max]. + + Raises: + ValueError: if batch sizes of the inputs are inconsistent, or if + the number of anchors inferred from encoded_boxes and anchors are + inconsistent. + """ + encoded_boxes.get_shape().assert_has_rank(3) + if (shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1]) + != anchors.num_boxes_static()): + raise ValueError('The number of anchors inferred from encoded_boxes' + ' and anchors are inconsistent: shape[1] of encoded_boxes' + ' %s should be equal to the number of anchors: %s.' % + (shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1]), + anchors.num_boxes_static())) + + decoded_boxes = tf.stack([ + box_coder.decode(boxes, anchors).get() + for boxes in tf.unstack(encoded_boxes) + ]) + return decoded_boxes diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfc7b25a4c2d9062fbcc9c79be5919f75b2bd156 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..52765a9d06c990c483aaf87dcba3ecfe604d7adc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_coder_test.py @@ -0,0 +1,62 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.box_coder.""" +import tensorflow.compat.v1 as tf + +from object_detection.core import box_coder +from object_detection.core import box_list +from object_detection.utils import test_case + + +class MockBoxCoder(box_coder.BoxCoder): + """Test BoxCoder that encodes/decodes using the multiply-by-two function.""" + + def code_size(self): + return 4 + + def _encode(self, boxes, anchors): + return 2.0 * boxes.get() + + def _decode(self, rel_codes, anchors): + return box_list.BoxList(rel_codes / 2.0) + + +class BoxCoderTest(test_case.TestCase): + + def test_batch_decode(self): + + expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]], + [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]] + + def graph_fn(): + mock_anchor_corners = tf.constant( + [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32) + mock_anchors = box_list.BoxList(mock_anchor_corners) + mock_box_coder = MockBoxCoder() + + encoded_boxes_list = [mock_box_coder.encode( + box_list.BoxList(tf.constant(boxes)), mock_anchors) + for boxes in expected_boxes] + encoded_boxes = tf.stack(encoded_boxes_list) + decoded_boxes = box_coder.batch_decode( + encoded_boxes, mock_box_coder, mock_anchors) + return decoded_boxes + decoded_boxes_result = self.execute(graph_fn, []) + self.assertAllClose(expected_boxes, decoded_boxes_result) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6b97e995f483918aa2416c5f3b2e5c8c94a0c3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list.py @@ -0,0 +1,210 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List definition. + +BoxList represents a list of bounding boxes as tensorflow +tensors, where each bounding box is represented as a row of 4 numbers, +[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes +within a given list correspond to a single image. See also +box_list_ops.py for common box related operations (such as area, iou, etc). + +Optionally, users can add additional related fields (such as weights). +We assume the following things to be true about fields: +* they correspond to boxes in the box_list along the 0th dimension +* they have inferrable rank at graph construction time +* all dimensions except for possibly the 0th can be inferred + (i.e., not None) at graph construction time. + +Some other notes: + * Following tensorflow conventions, we use height, width ordering, + and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering + * Tensors are always provided as (flat) [N, 4] tensors. +""" + +import tensorflow.compat.v1 as tf + +from object_detection.utils import shape_utils + + +class BoxList(object): + """Box collection.""" + + def __init__(self, boxes): + """Constructs box collection. + + Args: + boxes: a tensor of shape [N, 4] representing box corners + + Raises: + ValueError: if invalid dimensions for bbox data or if bbox data is not in + float32 format. + """ + if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: + raise ValueError('Invalid dimensions for box data: {}'.format( + boxes.shape)) + if boxes.dtype != tf.float32: + raise ValueError('Invalid tensor type: should be tf.float32') + self.data = {'boxes': boxes} + + def num_boxes(self): + """Returns number of boxes held in collection. + + Returns: + a tensor representing the number of boxes held in the collection. + """ + return tf.shape(self.data['boxes'])[0] + + def num_boxes_static(self): + """Returns number of boxes held in collection. + + This number is inferred at graph construction time rather than run-time. + + Returns: + Number of boxes held in collection (integer) or None if this is not + inferrable at graph construction time. + """ + return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0]) + + def get_all_fields(self): + """Returns all fields.""" + return self.data.keys() + + def get_extra_fields(self): + """Returns all non-box fields (i.e., everything not named 'boxes').""" + return [k for k in self.data.keys() if k != 'boxes'] + + def add_field(self, field, field_data): + """Add field to box list. + + This method can be used to add related box data such as + weights/labels, etc. + + Args: + field: a string key to access the data via `get` + field_data: a tensor containing the data to store in the BoxList + """ + self.data[field] = field_data + + def has_field(self, field): + return field in self.data + + def get(self): + """Convenience function for accessing box coordinates. + + Returns: + a tensor with shape [N, 4] representing box coordinates. + """ + return self.get_field('boxes') + + def set(self, boxes): + """Convenience function for setting box coordinates. + + Args: + boxes: a tensor of shape [N, 4] representing box corners + + Raises: + ValueError: if invalid dimensions for bbox data + """ + if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: + raise ValueError('Invalid dimensions for box data.') + self.data['boxes'] = boxes + + def get_field(self, field): + """Accesses a box collection and associated fields. + + This function returns specified field with object; if no field is specified, + it returns the box coordinates. + + Args: + field: this optional string parameter can be used to specify + a related field to be accessed. + + Returns: + a tensor representing the box collection or an associated field. + + Raises: + ValueError: if invalid field + """ + if not self.has_field(field): + raise ValueError('field ' + str(field) + ' does not exist') + return self.data[field] + + def set_field(self, field, value): + """Sets the value of a field. + + Updates the field of a box_list with a given value. + + Args: + field: (string) name of the field to set value. + value: the value to assign to the field. + + Raises: + ValueError: if the box_list does not have specified field. + """ + if not self.has_field(field): + raise ValueError('field %s does not exist' % field) + self.data[field] = value + + def get_center_coordinates_and_sizes(self, scope=None): + """Computes the center coordinates, height and width of the boxes. + + Args: + scope: name scope of the function. + + Returns: + a list of 4 1-D tensors [ycenter, xcenter, height, width]. + """ + with tf.name_scope(scope, 'get_center_coordinates_and_sizes'): + box_corners = self.get() + ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners)) + width = xmax - xmin + height = ymax - ymin + ycenter = ymin + height / 2. + xcenter = xmin + width / 2. + return [ycenter, xcenter, height, width] + + def transpose_coordinates(self, scope=None): + """Transpose the coordinate representation in a boxlist. + + Args: + scope: name scope of the function. + """ + with tf.name_scope(scope, 'transpose_coordinates'): + y_min, x_min, y_max, x_max = tf.split( + value=self.get(), num_or_size_splits=4, axis=1) + self.set(tf.concat([x_min, y_min, x_max, y_max], 1)) + + def as_tensor_dict(self, fields=None): + """Retrieves specified fields as a dictionary of tensors. + + Args: + fields: (optional) list of fields to return in the dictionary. + If None (default), all fields are returned. + + Returns: + tensor_dict: A dictionary of tensors specified by fields. + + Raises: + ValueError: if specified field is not contained in boxlist. + """ + tensor_dict = {} + if fields is None: + fields = self.get_all_fields() + for field in fields: + if not self.has_field(field): + raise ValueError('boxlist must contain all specified fields') + tensor_dict[field] = self.get_field(field) + return tensor_dict diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list.pyc new file mode 100644 index 0000000000000000000000000000000000000000..523660681bd0259dbb6ad36a5ae75bdc97fad992 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cb457b728449cc637de8ab28f5f60711954d3c27 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops.py @@ -0,0 +1,1213 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List operations. + +Example box operations that are supported: + * areas: compute bounding box areas + * iou: pairwise intersection-over-union scores + * sq_dist: pairwise distances between bounding boxes + +Whenever box_list_ops functions output a BoxList, the fields of the incoming +BoxList are retained unless documented otherwise. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class SortOrder(object): + """Enum class for sort order. + + Attributes: + ascend: ascend order. + descend: descend order. + """ + ascend = 1 + descend = 2 + + +def area(boxlist, scope=None): + """Computes area of boxes. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing box areas. + """ + with tf.name_scope(scope, 'Area'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) + + +def height_width(boxlist, scope=None): + """Computes height and width of boxes in boxlist. + + Args: + boxlist: BoxList holding N boxes + scope: name scope. + + Returns: + Height: A tensor with shape [N] representing box heights. + Width: A tensor with shape [N] representing box widths. + """ + with tf.name_scope(scope, 'HeightWidth'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1]) + + +def scale(boxlist, y_scale, x_scale, scope=None): + """scale box coordinates in x and y dimensions. + + Args: + boxlist: BoxList holding N boxes + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + boxlist: BoxList holding N boxes + """ + with tf.name_scope(scope, 'Scale'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + y_min = y_scale * y_min + y_max = y_scale * y_max + x_min = x_scale * x_min + x_max = x_scale * x_max + scaled_boxlist = box_list.BoxList( + tf.concat([y_min, x_min, y_max, x_max], 1)) + return _copy_extra_fields(scaled_boxlist, boxlist) + + +def scale_height_width(boxlist, y_scale, x_scale, scope=None): + """Scale the height and width of boxes, leaving centers unchanged. + + Args: + boxlist: BoxList holding N boxes + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + boxlist: BoxList holding N boxes + """ + with tf.name_scope(scope, 'ScaleHeightWidth'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + yc, xc, height_orig, width_orig = boxlist.get_center_coordinates_and_sizes() + y_min = yc - 0.5 * y_scale * height_orig + y_max = yc + 0.5 * y_scale * height_orig + x_min = xc - 0.5 * x_scale * width_orig + x_max = xc + 0.5 * x_scale * width_orig + scaled_boxlist = box_list.BoxList( + tf.stack([y_min, x_min, y_max, x_max], 1)) + return _copy_extra_fields(scaled_boxlist, boxlist) + + +def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None): + """Clip bounding boxes to a window. + + This op clips any input bounding boxes (represented by bounding box + corners) to a window, optionally filtering out boxes that do not + overlap at all with the window. + + Args: + boxlist: BoxList holding M_in boxes + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window to which the op should clip boxes. + filter_nonoverlapping: whether to filter out boxes that do not overlap at + all with the window. + scope: name scope. + + Returns: + a BoxList holding M_out boxes where M_out <= M_in + """ + with tf.name_scope(scope, 'ClipToWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min = window[0] + win_x_min = window[1] + win_y_max = window[2] + win_x_max = window[3] + y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min) + y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min) + x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min) + x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min) + clipped = box_list.BoxList( + tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped], + 1)) + clipped = _copy_extra_fields(clipped, boxlist) + if filter_nonoverlapping: + areas = area(clipped) + nonzero_area_indices = tf.cast( + tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32) + clipped = gather(clipped, nonzero_area_indices) + return clipped + + +def prune_outside_window(boxlist, window, scope=None): + """Prunes bounding boxes that fall outside a given window. + + This function prunes bounding boxes that even partially fall outside the given + window. See also clip_to_window which only prunes bounding boxes that fall + completely outside the window, and clips any bounding boxes that partially + overflow. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] + of the window + scope: name scope. + + Returns: + pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + with tf.name_scope(scope, 'PruneOutsideWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + coordinate_violations = tf.concat([ + tf.less(y_min, win_y_min), tf.less(x_min, win_x_min), + tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max) + ], 1) + valid_indices = tf.reshape( + tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def prune_completely_outside_window(boxlist, window, scope=None): + """Prunes bounding boxes that fall completely outside of the given window. + + The function clip_to_window prunes bounding boxes that fall + completely outside the window, but also clips any bounding boxes that + partially overflow. This function does not clip partially overflowing boxes. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] + of the window + scope: name scope. + + Returns: + pruned_boxlist: a new BoxList with all bounding boxes partially or fully in + the window. + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'): + y_min, x_min, y_max, x_max = tf.split( + value=boxlist.get(), num_or_size_splits=4, axis=1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + coordinate_violations = tf.concat([ + tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), + tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min) + ], 1) + valid_indices = tf.reshape( + tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def intersection(boxlist1, boxlist2, scope=None): + """Compute pairwise intersection areas between boxes. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise intersections + """ + with tf.name_scope(scope, 'Intersection'): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) + all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) + intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) + all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) + all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) + intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) + return intersect_heights * intersect_widths + + +def matched_intersection(boxlist1, boxlist2, scope=None): + """Compute intersection areas between corresponding boxes in two boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing pairwise intersections + """ + with tf.name_scope(scope, 'MatchedIntersection'): + y_min1, x_min1, y_max1, x_max1 = tf.split( + value=boxlist1.get(), num_or_size_splits=4, axis=1) + y_min2, x_min2, y_max2, x_max2 = tf.split( + value=boxlist2.get(), num_or_size_splits=4, axis=1) + min_ymax = tf.minimum(y_max1, y_max2) + max_ymin = tf.maximum(y_min1, y_min2) + intersect_heights = tf.maximum(0.0, min_ymax - max_ymin) + min_xmax = tf.minimum(x_max1, x_max2) + max_xmin = tf.maximum(x_min1, x_min2) + intersect_widths = tf.maximum(0.0, min_xmax - max_xmin) + return tf.reshape(intersect_heights * intersect_widths, [-1]) + + +def iou(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise iou scores. + """ + with tf.name_scope(scope, 'IOU'): + intersections = intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = ( + tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) + return tf.where( + tf.equal(intersections, 0.0), + tf.zeros_like(intersections), tf.truediv(intersections, unions)) + + +def l1(boxlist1, boxlist2, scope=None): + """Computes l1 loss (pairwise) between two boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing the pairwise L1 loss. + """ + with tf.name_scope(scope, 'PairwiseL1'): + ycenter1, xcenter1, h1, w1 = boxlist1.get_center_coordinates_and_sizes() + ycenter2, xcenter2, h2, w2 = boxlist2.get_center_coordinates_and_sizes() + ycenters = tf.abs(tf.expand_dims(ycenter2, axis=0) - tf.expand_dims( + tf.transpose(ycenter1), axis=1)) + xcenters = tf.abs(tf.expand_dims(xcenter2, axis=0) - tf.expand_dims( + tf.transpose(xcenter1), axis=1)) + heights = tf.abs(tf.expand_dims(h2, axis=0) - tf.expand_dims( + tf.transpose(h1), axis=1)) + widths = tf.abs(tf.expand_dims(w2, axis=0) - tf.expand_dims( + tf.transpose(w1), axis=1)) + return ycenters + xcenters + heights + widths + + +def giou(boxlist1, boxlist2, scope=None): + """Computes pairwise generalized IOU between two boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing the pairwise GIoU loss. + """ + with tf.name_scope(scope, 'PairwiseGIoU'): + n = boxlist1.num_boxes() + m = boxlist2.num_boxes() + boxes1 = tf.repeat(boxlist1.get(), repeats=m, axis=0) + boxes2 = tf.tile(boxlist2.get(), multiples=[n, 1]) + return tf.reshape(ops.giou(boxes1, boxes2), [n, m]) + + +def matched_iou(boxlist1, boxlist2, scope=None): + """Compute intersection-over-union between corresponding boxes in boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding N boxes + scope: name scope. + + Returns: + a tensor with shape [N] representing pairwise iou scores. + """ + with tf.name_scope(scope, 'MatchedIOU'): + intersections = matched_intersection(boxlist1, boxlist2) + areas1 = area(boxlist1) + areas2 = area(boxlist2) + unions = areas1 + areas2 - intersections + return tf.where( + tf.equal(intersections, 0.0), + tf.zeros_like(intersections), tf.truediv(intersections, unions)) + + +def ioa(boxlist1, boxlist2, scope=None): + """Computes pairwise intersection-over-area between box collections. + + intersection-over-area (IOA) between two boxes box1 and box2 is defined as + their intersection area over box2's area. Note that ioa is not symmetric, + that is, ioa(box1, box2) != ioa(box2, box1). + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise ioa scores. + """ + with tf.name_scope(scope, 'IOA'): + intersections = intersection(boxlist1, boxlist2) + areas = tf.expand_dims(area(boxlist2), 0) + return tf.truediv(intersections, areas) + + +def prune_non_overlapping_boxes( + boxlist1, boxlist2, min_overlap=0.0, scope=None): + """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. + + For each box in boxlist1, we want its IOA to be more than minoverlap with + at least one of the boxes in boxlist2. If it does not, we remove it. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + min_overlap: Minimum required overlap between boxes, to count them as + overlapping. + scope: name scope. + + Returns: + new_boxlist1: A pruned boxlist with size [N', 4]. + keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the + first input BoxList `boxlist1`. + """ + with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): + ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor + ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor + keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) + keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1]) + new_boxlist1 = gather(boxlist1, keep_inds) + return new_boxlist1, keep_inds + + +def prune_small_boxes(boxlist, min_side, scope=None): + """Prunes small boxes in the boxlist which have a side smaller than min_side. + + Args: + boxlist: BoxList holding N boxes. + min_side: Minimum width AND height of box to survive pruning. + scope: name scope. + + Returns: + A pruned boxlist. + """ + with tf.name_scope(scope, 'PruneSmallBoxes'): + height, width = height_width(boxlist) + is_valid = tf.logical_and(tf.greater_equal(width, min_side), + tf.greater_equal(height, min_side)) + return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) + + +def change_coordinate_frame(boxlist, window, scope=None): + """Change coordinate frame of the boxlist to be relative to window's frame. + + Given a window of the form [ymin, xmin, ymax, xmax], + changes bounding box coordinates from boxlist to be relative to this window + (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). + + An example use case is data augmentation: where we are given groundtruth + boxes (boxlist) and would like to randomly crop the image to some + window (window). In this case we need to change the coordinate frame of + each groundtruth box to be relative to this new window. + + Args: + boxlist: A BoxList object holding N boxes. + window: A rank 1 tensor [4]. + scope: name scope. + + Returns: + Returns a BoxList object with N boxes. + """ + with tf.name_scope(scope, 'ChangeCoordinateFrame'): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + boxlist_new = scale(box_list.BoxList( + boxlist.get() - [window[0], window[1], window[0], window[1]]), + 1.0 / win_height, 1.0 / win_width) + boxlist_new = _copy_extra_fields(boxlist_new, boxlist) + return boxlist_new + + +def sq_dist(boxlist1, boxlist2, scope=None): + """Computes the pairwise squared distances between box corners. + + This op treats each box as if it were a point in a 4d Euclidean space and + computes pairwise squared distances. + + Mathematically, we are given two matrices of box coordinates X and Y, + where X(i,:) is the i'th row of X, containing the 4 numbers defining the + corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to + boxlist2. We compute + Z(i,j) = ||X(i,:) - Y(j,:)||^2 + = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:), + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing pairwise distances + """ + with tf.name_scope(scope, 'SqDist'): + sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True) + sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True) + innerprod = tf.matmul(boxlist1.get(), boxlist2.get(), + transpose_a=False, transpose_b=True) + return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod + + +def boolean_mask(boxlist, indicator, fields=None, scope=None, + use_static_shapes=False, indicator_sum=None): + """Select boxes from BoxList according to indicator and return new BoxList. + + `boolean_mask` returns the subset of boxes that are marked as "True" by the + indicator tensor. By default, `boolean_mask` returns boxes corresponding to + the input index list, as well as all additional fields stored in the boxlist + (indexing into the first dimension). However one can optionally only draw + from a subset of fields. + + Args: + boxlist: BoxList holding N boxes + indicator: a rank-1 boolean tensor + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + scope: name scope. + use_static_shapes: Whether to use an implementation with static shape + gurantees. + indicator_sum: An integer containing the sum of `indicator` vector. Only + required if `use_static_shape` is True. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indicator + Raises: + ValueError: if `indicator` is not a rank-1 boolean tensor. + """ + with tf.name_scope(scope, 'BooleanMask'): + if indicator.shape.ndims != 1: + raise ValueError('indicator should have rank 1') + if indicator.dtype != tf.bool: + raise ValueError('indicator should be a boolean tensor') + if use_static_shapes: + if not (indicator_sum and isinstance(indicator_sum, int)): + raise ValueError('`indicator_sum` must be a of type int') + selected_positions = tf.cast(indicator, dtype=tf.float32) + indexed_positions = tf.cast( + tf.multiply( + tf.cumsum(selected_positions), selected_positions), + dtype=tf.int32) + one_hot_selector = tf.one_hot( + indexed_positions - 1, indicator_sum, dtype=tf.float32) + sampled_indices = tf.cast( + tf.tensordot( + tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32), + one_hot_selector, + axes=[0, 0]), + dtype=tf.int32) + return gather(boxlist, sampled_indices, use_static_shapes=True) + else: + subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator)) + if fields is None: + fields = boxlist.get_extra_fields() + for field in fields: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all specified fields') + subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator) + subboxlist.add_field(field, subfieldlist) + return subboxlist + + +def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False): + """Gather boxes from BoxList according to indices and return new BoxList. + + By default, `gather` returns boxes corresponding to the input index list, as + well as all additional fields stored in the boxlist (indexing into the + first dimension). However one can optionally only gather from a + subset of fields. + + Args: + boxlist: BoxList holding N boxes + indices: a rank-1 tensor of type int32 / int64 + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + scope: name scope. + use_static_shapes: Whether to use an implementation with static shape + gurantees. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indices + Raises: + ValueError: if specified field is not contained in boxlist or if the + indices are not of type int32 + """ + with tf.name_scope(scope, 'Gather'): + if len(indices.shape.as_list()) != 1: + raise ValueError('indices should have rank 1') + if indices.dtype != tf.int32 and indices.dtype != tf.int64: + raise ValueError('indices should be an int32 / int64 tensor') + gather_op = tf.gather + if use_static_shapes: + gather_op = ops.matmul_gather_on_zeroth_axis + subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices)) + if fields is None: + fields = boxlist.get_extra_fields() + fields += ['boxes'] + for field in fields: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all specified fields') + subfieldlist = gather_op(boxlist.get_field(field), indices) + subboxlist.add_field(field, subfieldlist) + return subboxlist + + +def concatenate(boxlists, fields=None, scope=None): + """Concatenate list of BoxLists. + + This op concatenates a list of input BoxLists into a larger BoxList. It also + handles concatenation of BoxList fields as long as the field tensor shapes + are equal except for the first dimension. + + Args: + boxlists: list of BoxList objects + fields: optional list of fields to also concatenate. By default, all + fields from the first BoxList in the list are included in the + concatenation. + scope: name scope. + + Returns: + a BoxList with number of boxes equal to + sum([boxlist.num_boxes() for boxlist in BoxList]) + Raises: + ValueError: if boxlists is invalid (i.e., is not a list, is empty, or + contains non BoxList objects), or if requested fields are not contained in + all boxlists + """ + with tf.name_scope(scope, 'Concatenate'): + if not isinstance(boxlists, list): + raise ValueError('boxlists should be a list') + if not boxlists: + raise ValueError('boxlists should have nonzero length') + for boxlist in boxlists: + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('all elements of boxlists should be BoxList objects') + concatenated = box_list.BoxList( + tf.concat([boxlist.get() for boxlist in boxlists], 0)) + if fields is None: + fields = boxlists[0].get_extra_fields() + for field in fields: + first_field_shape = boxlists[0].get_field(field).get_shape().as_list() + first_field_shape[0] = -1 + if None in first_field_shape: + raise ValueError('field %s must have fully defined shape except for the' + ' 0th dimension.' % field) + for boxlist in boxlists: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all requested fields') + field_shape = boxlist.get_field(field).get_shape().as_list() + field_shape[0] = -1 + if field_shape != first_field_shape: + raise ValueError('field %s must have same shape for all boxlists ' + 'except for the 0th dimension.' % field) + concatenated_field = tf.concat( + [boxlist.get_field(field) for boxlist in boxlists], 0) + concatenated.add_field(field, concatenated_field) + return concatenated + + +def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None): + """Sort boxes and associated fields according to a scalar field. + + A common use case is reordering the boxes according to descending scores. + + Args: + boxlist: BoxList holding N boxes. + field: A BoxList field for sorting and reordering the BoxList. + order: (Optional) descend or ascend. Default is descend. + scope: name scope. + + Returns: + sorted_boxlist: A sorted BoxList with the field in the specified order. + + Raises: + ValueError: if specified field does not exist + ValueError: if the order is not either descend or ascend + """ + with tf.name_scope(scope, 'SortByField'): + if order != SortOrder.descend and order != SortOrder.ascend: + raise ValueError('Invalid sort order') + + field_to_sort = boxlist.get_field(field) + if len(field_to_sort.shape.as_list()) != 1: + raise ValueError('Field should have rank 1') + + num_boxes = boxlist.num_boxes() + num_entries = tf.size(field_to_sort) + length_assert = tf.Assert( + tf.equal(num_boxes, num_entries), + ['Incorrect field size: actual vs expected.', num_entries, num_boxes]) + + with tf.control_dependencies([length_assert]): + _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True) + + if order == SortOrder.ascend: + sorted_indices = tf.reverse_v2(sorted_indices, [0]) + + return gather(boxlist, sorted_indices) + + +def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None): + """Overlay bounding box list on image. + + Currently this visualization plots a 1 pixel thick red bounding box on top + of the image. Note that tf.image.draw_bounding_boxes essentially is + 1 indexed. + + Args: + image: an image tensor with shape [height, width, 3] + boxlist: a BoxList + normalized: (boolean) specify whether corners are to be interpreted + as absolute coordinates in image space or normalized with respect to the + image size. + scope: name scope. + + Returns: + image_and_boxes: an image tensor with shape [height, width, 3] + """ + with tf.name_scope(scope, 'VisualizeBoxesInImage'): + if not normalized: + height, width, _ = tf.unstack(tf.shape(image)) + boxlist = scale(boxlist, + 1.0 / tf.cast(height, tf.float32), + 1.0 / tf.cast(width, tf.float32)) + corners = tf.expand_dims(boxlist.get(), 0) + image = tf.expand_dims(image, 0) + return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0]) + + +def filter_field_value_equals(boxlist, field, value, scope=None): + """Filter to keep only boxes with field entries equal to the given value. + + Args: + boxlist: BoxList holding N boxes. + field: field name for filtering. + value: scalar value. + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not have + the specified field. + """ + with tf.name_scope(scope, 'FilterFieldValueEquals'): + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field(field): + raise ValueError('boxlist must contain the specified field') + filter_field = boxlist.get_field(field) + gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1]) + return gather(boxlist, gather_index) + + +def filter_greater_than(boxlist, thresh, scope=None): + """Filter to keep only boxes with score exceeding a given threshold. + + This op keeps the collection of boxes whose corresponding scores are + greater than the input threshold. + + TODO(jonathanhuang): Change function name to filter_scores_greater_than + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not + have a scores field + """ + with tf.name_scope(scope, 'FilterGreaterThan'): + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + scores = boxlist.get_field('scores') + if len(scores.shape.as_list()) > 2: + raise ValueError('Scores should have rank 1 or 2') + if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1: + raise ValueError('Scores should have rank 1 or have shape ' + 'consistent with [None, 1]') + high_score_indices = tf.cast(tf.reshape( + tf.where(tf.greater(scores, thresh)), + [-1]), tf.int32) + return gather(boxlist, high_score_indices) + + +def non_max_suppression(boxlist, thresh, max_output_size, scope=None): + """Non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. Note that this only works for a single class --- + to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression. + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + max_output_size: maximum number of retained boxes + scope: name scope. + + Returns: + a BoxList holding M boxes where M <= max_output_size + Raises: + ValueError: if thresh is not in [0, 1] + """ + with tf.name_scope(scope, 'NonMaxSuppression'): + if not 0 <= thresh <= 1.0: + raise ValueError('thresh must be between 0 and 1') + if not isinstance(boxlist, box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + selected_indices = tf.image.non_max_suppression( + boxlist.get(), boxlist.get_field('scores'), + max_output_size, iou_threshold=thresh) + return gather(boxlist, selected_indices) + + +def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): + """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. + + Args: + boxlist_to_copy_to: BoxList to which extra fields are copied. + boxlist_to_copy_from: BoxList from which fields are copied. + + Returns: + boxlist_to_copy_to with extra fields. + """ + for field in boxlist_to_copy_from.get_extra_fields(): + boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) + return boxlist_to_copy_to + + +def to_normalized_coordinates(boxlist, height, width, + check_range=True, scope=None): + """Converts absolute box coordinates to normalized coordinates in [0, 1]. + + Usually one uses the dynamic shape of the image or conv-layer tensor: + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(images)[1], + tf.shape(images)[2]), + + This function raises an assertion failed error at graph execution time when + the maximum coordinate is smaller than 1.01 (which means that coordinates are + already normalized). The value 1.01 is to deal with small rounding errors. + + Args: + boxlist: BoxList with coordinates in terms of pixel-locations. + height: Maximum value for height of absolute box coordinates. + width: Maximum value for width of absolute box coordinates. + check_range: If True, checks if the coordinates are normalized or not. + scope: name scope. + + Returns: + boxlist with normalized coordinates in [0, 1]. + """ + with tf.name_scope(scope, 'ToNormalizedCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(boxlist.get()) + max_assert = tf.Assert(tf.greater(max_val, 1.01), + ['max value is lower than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(boxlist, 1 / height, 1 / width) + + +def to_absolute_coordinates(boxlist, + height, + width, + check_range=True, + maximum_normalized_coordinate=1.1, + scope=None): + """Converts normalized box coordinates to absolute pixel coordinates. + + This function raises an assertion failed error when the maximum box coordinate + value is larger than maximum_normalized_coordinate (in which case coordinates + are already absolute). + + Args: + boxlist: BoxList with coordinates in range [0, 1]. + height: Maximum value for height of absolute box coordinates. + width: Maximum value for width of absolute box coordinates. + check_range: If True, checks if the coordinates are normalized or not. + maximum_normalized_coordinate: Maximum coordinate value to be considered + as normalized, default to 1.1. + scope: name scope. + + Returns: + boxlist with absolute coordinates in terms of the image size. + + """ + with tf.name_scope(scope, 'ToAbsoluteCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + # Ensure range of input boxes is correct. + if check_range: + box_maximum = tf.reduce_max(boxlist.get()) + max_assert = tf.Assert( + tf.greater_equal(maximum_normalized_coordinate, box_maximum), + ['maximum box coordinate value is larger ' + 'than %f: ' % maximum_normalized_coordinate, box_maximum]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(boxlist, height, width) + + +def refine_boxes_multi_class(pool_boxes, + num_classes, + nms_iou_thresh, + nms_max_detections, + voting_iou_thresh=0.5): + """Refines a pool of boxes using non max suppression and box voting. + + Box refinement is done independently for each class. + + Args: + pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must + have a rank 1 'scores' field and a rank 1 'classes' field. + num_classes: (int scalar) Number of classes. + nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). + nms_max_detections: (int scalar) maximum output size for NMS. + voting_iou_thresh: (float scalar) iou threshold for box voting. + + Returns: + BoxList of refined boxes. + + Raises: + ValueError: if + a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. + b) pool_boxes is not a BoxList. + c) pool_boxes does not have a scores and classes field. + """ + if not 0.0 <= nms_iou_thresh <= 1.0: + raise ValueError('nms_iou_thresh must be between 0 and 1') + if not 0.0 <= voting_iou_thresh <= 1.0: + raise ValueError('voting_iou_thresh must be between 0 and 1') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + if not pool_boxes.has_field('classes'): + raise ValueError('pool_boxes must have a \'classes\' field') + + refined_boxes = [] + for i in range(num_classes): + boxes_class = filter_field_value_equals(pool_boxes, 'classes', i) + refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh, + nms_max_detections, voting_iou_thresh) + refined_boxes.append(refined_boxes_class) + return sort_by_field(concatenate(refined_boxes), 'scores') + + +def refine_boxes(pool_boxes, + nms_iou_thresh, + nms_max_detections, + voting_iou_thresh=0.5): + """Refines a pool of boxes using non max suppression and box voting. + + Args: + pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must + have a rank 1 'scores' field. + nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). + nms_max_detections: (int scalar) maximum output size for NMS. + voting_iou_thresh: (float scalar) iou threshold for box voting. + + Returns: + BoxList of refined boxes. + + Raises: + ValueError: if + a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. + b) pool_boxes is not a BoxList. + c) pool_boxes does not have a scores field. + """ + if not 0.0 <= nms_iou_thresh <= 1.0: + raise ValueError('nms_iou_thresh must be between 0 and 1') + if not 0.0 <= voting_iou_thresh <= 1.0: + raise ValueError('voting_iou_thresh must be between 0 and 1') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + + nms_boxes = non_max_suppression( + pool_boxes, nms_iou_thresh, nms_max_detections) + return box_voting(nms_boxes, pool_boxes, voting_iou_thresh) + + +def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5): + """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015. + + Performs box voting as described in 'Object detection via a multi-region & + semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For + each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes + with iou overlap >= iou_thresh. The location of B is set to the weighted + average location of boxes in S (scores are used for weighting). And the score + of B is set to the average score of boxes in S. + + Args: + selected_boxes: BoxList containing a subset of boxes in pool_boxes. These + boxes are usually selected from pool_boxes using non max suppression. + pool_boxes: BoxList containing a set of (possibly redundant) boxes. + iou_thresh: (float scalar) iou threshold for matching boxes in + selected_boxes and pool_boxes. + + Returns: + BoxList containing averaged locations and scores for each box in + selected_boxes. + + Raises: + ValueError: if + a) selected_boxes or pool_boxes is not a BoxList. + b) if iou_thresh is not in [0, 1]. + c) pool_boxes does not have a scores field. + """ + if not 0.0 <= iou_thresh <= 1.0: + raise ValueError('iou_thresh must be between 0 and 1') + if not isinstance(selected_boxes, box_list.BoxList): + raise ValueError('selected_boxes must be a BoxList') + if not isinstance(pool_boxes, box_list.BoxList): + raise ValueError('pool_boxes must be a BoxList') + if not pool_boxes.has_field('scores'): + raise ValueError('pool_boxes must have a \'scores\' field') + + iou_ = iou(selected_boxes, pool_boxes) + match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32) + num_matches = tf.reduce_sum(match_indicator, 1) + # TODO(kbanoop): Handle the case where some boxes in selected_boxes do not + # match to any boxes in pool_boxes. For such boxes without any matches, we + # should return the original boxes without voting. + match_assert = tf.Assert( + tf.reduce_all(tf.greater(num_matches, 0)), + ['Each box in selected_boxes must match with at least one box ' + 'in pool_boxes.']) + + scores = tf.expand_dims(pool_boxes.get_field('scores'), 1) + scores_assert = tf.Assert( + tf.reduce_all(tf.greater_equal(scores, 0)), + ['Scores must be non negative.']) + + with tf.control_dependencies([scores_assert, match_assert]): + sum_scores = tf.matmul(match_indicator, scores) + averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches + + box_locations = tf.matmul(match_indicator, + pool_boxes.get() * scores) / sum_scores + averaged_boxes = box_list.BoxList(box_locations) + _copy_extra_fields(averaged_boxes, selected_boxes) + averaged_boxes.add_field('scores', averaged_scores) + return averaged_boxes + + +def pad_or_clip_box_list(boxlist, num_boxes, scope=None): + """Pads or clips all fields of a BoxList. + + Args: + boxlist: A BoxList with arbitrary of number of boxes. + num_boxes: First num_boxes in boxlist are kept. + The fields are zero-padded if num_boxes is bigger than the + actual number of boxes. + scope: name scope. + + Returns: + BoxList with all fields padded or clipped. + """ + with tf.name_scope(scope, 'PadOrClipBoxList'): + subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor( + boxlist.get(), num_boxes)) + for field in boxlist.get_extra_fields(): + subfield = shape_utils.pad_or_clip_tensor( + boxlist.get_field(field), num_boxes) + subboxlist.add_field(field, subfield) + return subboxlist + + +def select_random_box(boxlist, + default_box=None, + seed=None, + scope=None): + """Selects a random bounding box from a `BoxList`. + + Args: + boxlist: A BoxList. + default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, + this default box will be returned. If None, will use a default box of + [[-1., -1., -1., -1.]]. + seed: Random seed. + scope: Name scope. + + Returns: + bbox: A [1, 4] tensor with a random bounding box. + valid: A bool tensor indicating whether a valid bounding box is returned + (True) or whether the default box is returned (False). + """ + with tf.name_scope(scope, 'SelectRandomBox'): + bboxes = boxlist.get() + combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes) + number_of_boxes = combined_shape[0] + default_box = default_box or tf.constant([[-1., -1., -1., -1.]]) + + def select_box(): + random_index = tf.random_uniform([], + maxval=number_of_boxes, + dtype=tf.int32, + seed=seed) + return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True) + + return tf.cond( + tf.greater_equal(number_of_boxes, 1), + true_fn=select_box, + false_fn=lambda: (default_box, tf.constant(False))) + + +def get_minimal_coverage_box(boxlist, + default_box=None, + scope=None): + """Creates a single bounding box which covers all boxes in the boxlist. + + Args: + boxlist: A Boxlist. + default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, + this default box will be returned. If None, will use a default box of + [[0., 0., 1., 1.]]. + scope: Name scope. + + Returns: + A [1, 4] float32 tensor with a bounding box that tightly covers all the + boxes in the box list. If the boxlist does not contain any boxes, the + default box is returned. + """ + with tf.name_scope(scope, 'CreateCoverageBox'): + num_boxes = boxlist.num_boxes() + + def coverage_box(bboxes): + y_min, x_min, y_max, x_max = tf.split( + value=bboxes, num_or_size_splits=4, axis=1) + y_min_coverage = tf.reduce_min(y_min, axis=0) + x_min_coverage = tf.reduce_min(x_min, axis=0) + y_max_coverage = tf.reduce_max(y_max, axis=0) + x_max_coverage = tf.reduce_max(x_max, axis=0) + return tf.stack( + [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage], + axis=1) + + default_box = default_box or tf.constant([[0., 0., 1., 1.]]) + return tf.cond( + tf.greater_equal(num_boxes, 1), + true_fn=lambda: coverage_box(boxlist.get()), + false_fn=lambda: default_box) + + +def sample_boxes_by_jittering(boxlist, + num_boxes_to_sample, + stddev=0.1, + scope=None): + """Samples num_boxes_to_sample boxes by jittering around boxlist boxes. + + It is possible that this function might generate boxes with size 0. The larger + the stddev, this is more probable. For a small stddev of 0.1 this probability + is very small. + + Args: + boxlist: A boxlist containing N boxes in normalized coordinates. + num_boxes_to_sample: A positive integer containing the number of boxes to + sample. + stddev: Standard deviation. This is used to draw random offsets for the + box corners from a normal distribution. The offset is multiplied by the + box size so will be larger in terms of pixels for larger boxes. + scope: Name scope. + + Returns: + sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in + normalized coordinates. + """ + with tf.name_scope(scope, 'SampleBoxesByJittering'): + num_boxes = boxlist.num_boxes() + box_indices = tf.random_uniform( + [num_boxes_to_sample], + minval=0, + maxval=num_boxes, + dtype=tf.int32) + sampled_boxes = tf.gather(boxlist.get(), box_indices) + sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0] + sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1] + rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) + miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0] + minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1] + maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2] + maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3] + maxy = tf.maximum(miny, maxy) + maxx = tf.maximum(minx, maxx) + sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1) + sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0) + return box_list.BoxList(sampled_boxes) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a759da79ce151fd1ce80eb902938274cd45e9bf5 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..767c1899727b9e0280c096dc8e4a440c535d5839 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_ops_test.py @@ -0,0 +1,1104 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.box_list_ops.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.utils import test_case + + +class BoxListOpsTest(test_case.TestCase): + """Tests for common bounding box operations.""" + + def test_area(self): + def graph_fn(): + corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]]) + boxes = box_list.BoxList(corners) + areas = box_list_ops.area(boxes) + return areas + areas_out = self.execute(graph_fn, []) + exp_output = [200.0, 4.0] + self.assertAllClose(areas_out, exp_output) + + def test_height_width(self): + def graph_fn(): + corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]]) + boxes = box_list.BoxList(corners) + return box_list_ops.height_width(boxes) + heights_out, widths_out = self.execute(graph_fn, []) + exp_output_heights = [10., 2.] + exp_output_widths = [20., 2.] + self.assertAllClose(heights_out, exp_output_heights) + self.assertAllClose(widths_out, exp_output_widths) + + def test_scale(self): + def graph_fn(): + corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], + dtype=tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2]])) + + y_scale = tf.constant(1.0/100) + x_scale = tf.constant(1.0/200) + scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) + return scaled_boxes.get(), scaled_boxes.get_field('extra_data') + scaled_corners_out, extra_data_out = self.execute(graph_fn, []) + exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] + self.assertAllClose(scaled_corners_out, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2]]) + + def test_scale_height_width(self): + def graph_fn(): + corners = tf.constant([[-10, -20, 10, 20], [0, 100, 100, 200]], + dtype=tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2]])) + + y_scale = tf.constant(2.) + x_scale = tf.constant(0.5) + scaled_boxes = box_list_ops.scale_height_width(boxes, y_scale, x_scale) + return scaled_boxes.get(), scaled_boxes.get_field('extra_data') + exp_output = [ + [-20., -10, 20., 10], + [-50., 125, 150., 175.]] + scaled_corners_out, extra_data_out = self.execute(graph_fn, []) + self.assertAllClose(scaled_corners_out, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2]]) + + def test_clip_to_window_filter_boxes_which_fall_outside_the_window( + self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-100.0, -100.0, 300.0, 600.0], + [-10.0, -10.0, -9.0, -9.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned = box_list_ops.clip_to_window( + boxes, window, filter_nonoverlapping=True) + return pruned.get(), pruned.get_field('extra_data') + exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], + [0.0, 0.0, 9.0, 14.0]] + pruned_output, extra_data_out = self.execute_cpu(graph_fn, []) + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5]]) + + def test_clip_to_window_without_filtering_boxes_which_fall_outside_the_window( + self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-100.0, -100.0, 300.0, 600.0], + [-10.0, -10.0, -9.0, -9.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned = box_list_ops.clip_to_window( + boxes, window, filter_nonoverlapping=False) + return pruned.get(), pruned.get_field('extra_data') + pruned_output, extra_data_out = self.execute(graph_fn, []) + exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], + [0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 0.0, 0.0]] + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5], [6]]) + + def test_prune_outside_window_filters_boxes_which_fall_outside_the_window( + self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-10.0, -10.0, -9.0, -9.0], + [-100.0, -100.0, 300.0, 600.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned, keep_indices = box_list_ops.prune_outside_window(boxes, window) + return pruned.get(), pruned.get_field('extra_data'), keep_indices + pruned_output, extra_data_out, keep_indices_out = self.execute_cpu(graph_fn, + []) + exp_output = [[5.0, 5.0, 6.0, 6.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0]] + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(keep_indices_out, [0, 2, 3]) + self.assertAllEqual(extra_data_out, [[1], [3], [4]]) + + def test_prune_completely_outside_window(self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.constant([[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-10.0, -10.0, -9.0, -9.0], + [-100.0, -100.0, 300.0, 600.0]]) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) + pruned, keep_indices = box_list_ops.prune_completely_outside_window( + boxes, window) + return pruned.get(), pruned.get_field('extra_data'), keep_indices + pruned_output, extra_data_out, keep_indices_out = self.execute(graph_fn, []) + exp_output = [[5.0, 5.0, 6.0, 6.0], + [-1.0, -2.0, 4.0, 5.0], + [2.0, 3.0, 5.0, 9.0], + [0.0, 0.0, 9.0, 14.0], + [-100.0, -100.0, 300.0, 600.0]] + self.assertAllClose(pruned_output, exp_output) + self.assertAllEqual(keep_indices_out, [0, 1, 2, 3, 5]) + self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [6]]) + + def test_prune_completely_outside_window_with_empty_boxlist(self): + def graph_fn(): + window = tf.constant([0, 0, 9, 14], tf.float32) + corners = tf.zeros(shape=[0, 4], dtype=tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('extra_data', tf.zeros(shape=[0], dtype=tf.int32)) + pruned, keep_indices = box_list_ops.prune_completely_outside_window( + boxes, window) + pruned_boxes = pruned.get() + extra = pruned.get_field('extra_data') + return pruned_boxes, extra, keep_indices + + pruned_boxes_out, extra_out, keep_indices_out = self.execute(graph_fn, []) + exp_pruned_boxes = np.zeros(shape=[0, 4], dtype=np.float32) + exp_extra = np.zeros(shape=[0], dtype=np.int32) + self.assertAllClose(exp_pruned_boxes, pruned_boxes_out) + self.assertAllEqual([], keep_indices_out) + self.assertAllEqual(exp_extra, extra_out) + + def test_intersection(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + intersect = box_list_ops.intersection(boxes1, boxes2) + return intersect + exp_output = [[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]] + intersect_out = self.execute(graph_fn, []) + self.assertAllClose(intersect_out, exp_output) + + def test_matched_intersection(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + intersect = box_list_ops.matched_intersection(boxes1, boxes2) + return intersect + exp_output = [2.0, 0.0] + intersect_out = self.execute(graph_fn, []) + self.assertAllClose(intersect_out, exp_output) + + def test_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + iou = box_list_ops.iou(boxes1, boxes2) + return iou + exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_l1(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + l1 = box_list_ops.l1(boxes1, boxes2) + return l1 + exp_output = [[5.0, 22.5, 45.5], [8.5, 19.0, 40.0]] + l1_output = self.execute(graph_fn, []) + self.assertAllClose(l1_output, exp_output) + + def test_giou(self): + def graph_fn(): + corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]]) + corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + giou = box_list_ops.giou(boxes1, boxes2) + return giou + exp_output = [[1.0, -1.0 / 3.0]] + giou_output = self.execute(graph_fn, []) + self.assertAllClose(giou_output, exp_output) + + def test_matched_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + iou = box_list_ops.matched_iou(boxes1, boxes2) + return iou + exp_output = [2.0 / 16.0, 0] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_iouworks_on_empty_inputs(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + boxes_empty = box_list.BoxList(tf.zeros((0, 4))) + iou_empty_1 = box_list_ops.iou(boxes1, boxes_empty) + iou_empty_2 = box_list_ops.iou(boxes_empty, boxes2) + iou_empty_3 = box_list_ops.iou(boxes_empty, boxes_empty) + return iou_empty_1, iou_empty_2, iou_empty_3 + iou_output_1, iou_output_2, iou_output_3 = self.execute(graph_fn, []) + self.assertAllEqual(iou_output_1.shape, (2, 0)) + self.assertAllEqual(iou_output_2.shape, (0, 3)) + self.assertAllEqual(iou_output_3.shape, (0, 0)) + + def test_ioa(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + ioa_1 = box_list_ops.ioa(boxes1, boxes2) + ioa_2 = box_list_ops.ioa(boxes2, boxes1) + return ioa_1, ioa_2 + exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], + [1.0 / 12.0, 0.0, 5.0 / 400.0]] + exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], + [0, 0], + [6.0 / 6.0, 5.0 / 5.0]] + ioa_output_1, ioa_output_2 = self.execute(graph_fn, []) + self.assertAllClose(ioa_output_1, exp_output_1) + self.assertAllClose(ioa_output_2, exp_output_2) + + def test_prune_non_overlapping_boxes(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + minoverlap = 0.5 + + exp_output_1 = boxes1 + exp_output_2 = box_list.BoxList(tf.constant(0.0, shape=[0, 4])) + output_1, keep_indices_1 = box_list_ops.prune_non_overlapping_boxes( + boxes1, boxes2, min_overlap=minoverlap) + output_2, keep_indices_2 = box_list_ops.prune_non_overlapping_boxes( + boxes2, boxes1, min_overlap=minoverlap) + return (output_1.get(), keep_indices_1, output_2.get(), keep_indices_2, + exp_output_1.get(), exp_output_2.get()) + + (output_1_, keep_indices_1_, output_2_, keep_indices_2_, exp_output_1_, + exp_output_2_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(output_1_, exp_output_1_) + self.assertAllClose(output_2_, exp_output_2_) + self.assertAllEqual(keep_indices_1_, [0, 1]) + self.assertAllEqual(keep_indices_2_, []) + + def test_prune_small_boxes(self): + def graph_fn(): + boxes = tf.constant([[4.0, 3.0, 7.0, 5.0], + [5.0, 6.0, 10.0, 7.0], + [3.0, 4.0, 6.0, 8.0], + [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes = box_list.BoxList(boxes) + pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3) + return pruned_boxes.get() + exp_boxes = [[3.0, 4.0, 6.0, 8.0], + [0.0, 0.0, 20.0, 20.0]] + pruned_boxes = self.execute(graph_fn, []) + self.assertAllEqual(pruned_boxes, exp_boxes) + + def test_prune_small_boxes_prunes_boxes_with_negative_side(self): + def graph_fn(): + boxes = tf.constant([[4.0, 3.0, 7.0, 5.0], + [5.0, 6.0, 10.0, 7.0], + [3.0, 4.0, 6.0, 8.0], + [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0], + [2.0, 3.0, 1.5, 7.0], # negative height + [2.0, 3.0, 5.0, 1.7]]) # negative width + boxes = box_list.BoxList(boxes) + pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3) + return pruned_boxes.get() + exp_boxes = [[3.0, 4.0, 6.0, 8.0], + [0.0, 0.0, 20.0, 20.0]] + pruned_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(pruned_boxes, exp_boxes) + + def test_change_coordinate_frame(self): + def graph_fn(): + corners = tf.constant([[0.25, 0.5, 0.75, 0.75], [0.5, 0.0, 1.0, 1.0]]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + boxes = box_list.BoxList(corners) + + expected_corners = tf.constant([[0, 0.5, 1.0, 1.0], + [0.5, -0.5, 1.5, 1.5]]) + expected_boxes = box_list.BoxList(expected_corners) + output = box_list_ops.change_coordinate_frame(boxes, window) + return output.get(), expected_boxes.get() + output_, expected_boxes_ = self.execute(graph_fn, []) + self.assertAllClose(output_, expected_boxes_) + + def test_ioaworks_on_empty_inputs(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + boxes_empty = box_list.BoxList(tf.zeros((0, 4))) + ioa_empty_1 = box_list_ops.ioa(boxes1, boxes_empty) + ioa_empty_2 = box_list_ops.ioa(boxes_empty, boxes2) + ioa_empty_3 = box_list_ops.ioa(boxes_empty, boxes_empty) + return ioa_empty_1, ioa_empty_2, ioa_empty_3 + ioa_output_1, ioa_output_2, ioa_output_3 = self.execute(graph_fn, []) + self.assertAllEqual(ioa_output_1.shape, (2, 0)) + self.assertAllEqual(ioa_output_2.shape, (0, 3)) + self.assertAllEqual(ioa_output_3.shape, (0, 0)) + + def test_pairwise_distances(self): + def graph_fn(): + corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 2.0]]) + corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], + [-4.0, 0.0, 0.0, 3.0], + [0.0, 0.0, 0.0, 0.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + dist_matrix = box_list_ops.sq_dist(boxes1, boxes2) + return dist_matrix + exp_output = [[26, 25, 0], [18, 27, 6]] + dist_output = self.execute(graph_fn, []) + self.assertAllClose(dist_output, exp_output) + + def test_boolean_mask(self): + def graph_fn(): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + indicator = tf.constant([True, False, True, False, True], tf.bool) + boxes = box_list.BoxList(corners) + subset = box_list_ops.boolean_mask(boxes, indicator) + return subset.get() + expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + subset_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(subset_output, expected_subset) + + def test_static_boolean_mask_with_field(self): + + def graph_fn(corners, weights, indicator): + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + subset = box_list_ops.boolean_mask( + boxes, + indicator, ['weights'], + use_static_shapes=True, + indicator_sum=3) + return (subset.get_field('boxes'), subset.get_field('weights')) + + corners = np.array( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]], + dtype=np.float32) + indicator = np.array([True, False, True, False, True], dtype=np.bool) + weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32) + result_boxes, result_weights = self.execute_cpu( + graph_fn, [corners, weights, indicator]) + expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + expected_weights = [[.1], [.5], [.9]] + + self.assertAllClose(result_boxes, expected_boxes) + self.assertAllClose(result_weights, expected_weights) + + def test_gather(self): + def graph_fn(): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + indices = tf.constant([0, 2, 4], tf.int32) + boxes = box_list.BoxList(corners) + subset = box_list_ops.gather(boxes, indices) + return subset.get() + expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + subset_output = self.execute(graph_fn, []) + self.assertAllClose(subset_output, expected_subset) + + def test_static_gather_with_field(self): + + def graph_fn(corners, weights, indices): + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + subset = box_list_ops.gather( + boxes, indices, ['weights'], use_static_shapes=True) + return (subset.get_field('boxes'), subset.get_field('weights')) + + corners = np.array([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], + 4 * [4.0]], dtype=np.float32) + weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32) + indices = np.array([0, 2, 4], dtype=np.int32) + + result_boxes, result_weights = self.execute(graph_fn, + [corners, weights, indices]) + expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + expected_weights = [[.1], [.5], [.9]] + self.assertAllClose(result_boxes, expected_boxes) + self.assertAllClose(result_weights, expected_weights) + + def test_gather_with_invalid_field(self): + corners = tf.constant([4 * [0.0], 4 * [1.0]]) + indices = tf.constant([0, 1], tf.int32) + weights = tf.constant([[.1], [.3]], tf.float32) + + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + with self.assertRaises(ValueError): + box_list_ops.gather(boxes, indices, ['foo', 'bar']) + + def test_gather_with_invalid_inputs(self): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + indices_float32 = tf.constant([0, 2, 4], tf.float32) + boxes = box_list.BoxList(corners) + with self.assertRaises(ValueError): + _ = box_list_ops.gather(boxes, indices_float32) + indices_2d = tf.constant([[0, 2, 4]], tf.int32) + boxes = box_list.BoxList(corners) + with self.assertRaises(ValueError): + _ = box_list_ops.gather(boxes, indices_2d) + + def test_gather_with_dynamic_indexing(self): + def graph_fn(): + corners = tf.constant( + [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) + weights = tf.constant([.5, .3, .7, .1, .9], tf.float32) + indices = tf.reshape(tf.where(tf.greater(weights, 0.4)), [-1]) + boxes = box_list.BoxList(corners) + boxes.add_field('weights', weights) + subset = box_list_ops.gather(boxes, indices, ['weights']) + return subset.get(), subset.get_field('weights') + expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] + expected_weights = [.5, .7, .9] + subset_output, weights_output = self.execute(graph_fn, []) + self.assertAllClose(subset_output, expected_subset) + self.assertAllClose(weights_output, expected_weights) + + def test_sort_by_field_ascending_order(self): + exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], + [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] + exp_scores = [.95, .9, .75, .6, .5, .3] + exp_weights = [.2, .45, .6, .75, .8, .92] + + def graph_fn(): + shuffle = [2, 4, 0, 5, 1, 3] + corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant( + [exp_scores[i] for i in shuffle], tf.float32)) + boxes.add_field('weights', tf.constant( + [exp_weights[i] for i in shuffle], tf.float32)) + sort_by_weight = box_list_ops.sort_by_field( + boxes, + 'weights', + order=box_list_ops.SortOrder.ascend) + return [sort_by_weight.get(), sort_by_weight.get_field('scores'), + sort_by_weight.get_field('weights')] + corners_out, scores_out, weights_out = self.execute(graph_fn, []) + self.assertAllClose(corners_out, exp_corners) + self.assertAllClose(scores_out, exp_scores) + self.assertAllClose(weights_out, exp_weights) + + def test_sort_by_field_descending_order(self): + exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], + [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] + exp_scores = [.95, .9, .75, .6, .5, .3] + exp_weights = [.2, .45, .6, .75, .8, .92] + + def graph_fn(): + shuffle = [2, 4, 0, 5, 1, 3] + corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant( + [exp_scores[i] for i in shuffle], tf.float32)) + boxes.add_field('weights', tf.constant( + [exp_weights[i] for i in shuffle], tf.float32)) + sort_by_score = box_list_ops.sort_by_field(boxes, 'scores') + return (sort_by_score.get(), sort_by_score.get_field('scores'), + sort_by_score.get_field('weights')) + + corners_out, scores_out, weights_out = self.execute(graph_fn, []) + self.assertAllClose(corners_out, exp_corners) + self.assertAllClose(scores_out, exp_scores) + self.assertAllClose(weights_out, exp_weights) + + def test_sort_by_field_invalid_inputs(self): + corners = tf.constant([4 * [0.0], 4 * [0.5], 4 * [1.0], 4 * [2.0], 4 * + [3.0], 4 * [4.0]]) + misc = tf.constant([[.95, .9], [.5, .3]], tf.float32) + weights = tf.constant([[.1, .2]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('misc', misc) + boxes.add_field('weights', weights) + + with self.assertRaises(ValueError): + box_list_ops.sort_by_field(boxes, 'area') + + with self.assertRaises(ValueError): + box_list_ops.sort_by_field(boxes, 'misc') + + with self.assertRaises(ValueError): + box_list_ops.sort_by_field(boxes, 'weights') + + def test_visualize_boxes_in_image(self): + def graph_fn(): + image = tf.zeros((6, 4, 3)) + corners = tf.constant([[0, 0, 5, 3], + [0, 0, 3, 2]], tf.float32) + boxes = box_list.BoxList(corners) + image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes) + image_and_boxes_bw = tf.cast( + tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0), dtype=tf.float32) + return image_and_boxes_bw + exp_result = [[1, 1, 1, 0], + [1, 1, 1, 0], + [1, 1, 1, 0], + [1, 0, 1, 0], + [1, 1, 1, 0], + [0, 0, 0, 0]] + output = self.execute_cpu(graph_fn, []) + self.assertAllEqual(output.astype(int), exp_result) + + def test_filter_field_value_equals(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('classes', tf.constant([1, 2, 1, 2, 2, 1])) + filtered_boxes1 = box_list_ops.filter_field_value_equals( + boxes, 'classes', 1) + filtered_boxes2 = box_list_ops.filter_field_value_equals( + boxes, 'classes', 2) + return filtered_boxes1.get(), filtered_boxes2.get() + exp_output1 = [[0, 0, 1, 1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]] + exp_output2 = [[0, 0.1, 1, 1.1], [0, 10, 1, 11], [0, 10.1, 1, 11.1]] + filtered_output1, filtered_output2 = self.execute_cpu(graph_fn, []) + self.assertAllClose(filtered_output1, exp_output1) + self.assertAllClose(filtered_output2, exp_output2) + + def test_filter_greater_than(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.1, .75, .9, .5, .5, .8])) + thresh = .6 + filtered_boxes = box_list_ops.filter_greater_than(boxes, thresh) + return filtered_boxes.get() + exp_output = [[0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]] + filtered_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(filtered_output, exp_output) + + def test_clip_box_list(self): + def graph_fn(): + boxlist = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) + boxlist.add_field('classes', tf.constant([0, 0, 1, 1])) + boxlist.add_field('scores', tf.constant([0.75, 0.65, 0.3, 0.2])) + num_boxes = 2 + clipped_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes) + return (clipped_boxlist.get(), clipped_boxlist.get_field('classes'), + clipped_boxlist.get_field('scores')) + + expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]] + expected_classes = [0, 0] + expected_scores = [0.75, 0.65] + boxes_out, classes_out, scores_out = self.execute(graph_fn, []) + + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllEqual(expected_classes, classes_out) + self.assertAllClose(expected_scores, scores_out) + + def test_pad_box_list(self): + def graph_fn(): + boxlist = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) + boxlist.add_field('classes', tf.constant([0, 1])) + boxlist.add_field('scores', tf.constant([0.75, 0.2])) + num_boxes = 4 + padded_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes) + return (padded_boxlist.get(), padded_boxlist.get_field('classes'), + padded_boxlist.get_field('scores')) + expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0, 0, 0, 0], [0, 0, 0, 0]] + expected_classes = [0, 1, 0, 0] + expected_scores = [0.75, 0.2, 0, 0] + boxes_out, classes_out, scores_out = self.execute(graph_fn, []) + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllEqual(expected_classes, classes_out) + self.assertAllClose(expected_scores, scores_out) + + def test_select_random_box(self): + boxes = [[0., 0., 1., 1.], + [0., 1., 2., 3.], + [0., 2., 3., 4.]] + def graph_fn(): + corners = tf.constant(boxes, dtype=tf.float32) + boxlist = box_list.BoxList(corners) + random_bbox, valid = box_list_ops.select_random_box(boxlist) + return random_bbox, valid + random_bbox_out, valid_out = self.execute(graph_fn, []) + norm_small = any( + [np.linalg.norm(random_bbox_out - box) < 1e-6 for box in boxes]) + self.assertTrue(norm_small) + self.assertTrue(valid_out) + + def test_select_random_box_with_empty_boxlist(self): + def graph_fn(): + corners = tf.constant([], shape=[0, 4], dtype=tf.float32) + boxlist = box_list.BoxList(corners) + random_bbox, valid = box_list_ops.select_random_box(boxlist) + return random_bbox, valid + random_bbox_out, valid_out = self.execute_cpu(graph_fn, []) + expected_bbox_out = np.array([[-1., -1., -1., -1.]], dtype=np.float32) + self.assertAllEqual(expected_bbox_out, random_bbox_out) + self.assertFalse(valid_out) + + def test_get_minimal_coverage_box(self): + def graph_fn(): + boxes = [[0., 0., 1., 1.], + [-1., 1., 2., 3.], + [0., 2., 3., 4.]] + corners = tf.constant(boxes, dtype=tf.float32) + boxlist = box_list.BoxList(corners) + coverage_box = box_list_ops.get_minimal_coverage_box(boxlist) + return coverage_box + coverage_box_out = self.execute(graph_fn, []) + expected_coverage_box = [[-1., 0., 3., 4.]] + self.assertAllClose(expected_coverage_box, coverage_box_out) + + def test_get_minimal_coverage_box_with_empty_boxlist(self): + def graph_fn(): + corners = tf.constant([], shape=[0, 4], dtype=tf.float32) + boxlist = box_list.BoxList(corners) + coverage_box = box_list_ops.get_minimal_coverage_box(boxlist) + return coverage_box + coverage_box_out = self.execute(graph_fn, []) + self.assertAllClose([[0.0, 0.0, 1.0, 1.0]], coverage_box_out) + + +class ConcatenateTest(test_case.TestCase): + + def test_invalid_input_box_list_list(self): + with self.assertRaises(ValueError): + box_list_ops.concatenate(None) + with self.assertRaises(ValueError): + box_list_ops.concatenate([]) + with self.assertRaises(ValueError): + corners = tf.constant([[0, 0, 0, 0]], tf.float32) + boxlist = box_list.BoxList(corners) + box_list_ops.concatenate([boxlist, 2]) + + def test_concatenate_with_missing_fields(self): + corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) + scores1 = tf.constant([1.0, 2.1]) + corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32) + boxlist1 = box_list.BoxList(corners1) + boxlist1.add_field('scores', scores1) + boxlist2 = box_list.BoxList(corners2) + with self.assertRaises(ValueError): + box_list_ops.concatenate([boxlist1, boxlist2]) + + def test_concatenate_with_incompatible_field_shapes(self): + corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) + scores1 = tf.constant([1.0, 2.1]) + corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32) + scores2 = tf.constant([[1.0, 1.0], [2.1, 3.2]]) + boxlist1 = box_list.BoxList(corners1) + boxlist1.add_field('scores', scores1) + boxlist2 = box_list.BoxList(corners2) + boxlist2.add_field('scores', scores2) + with self.assertRaises(ValueError): + box_list_ops.concatenate([boxlist1, boxlist2]) + + def test_concatenate_is_correct(self): + def graph_fn(): + corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) + scores1 = tf.constant([1.0, 2.1]) + corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8], [1, 0, 5, 10]], + tf.float32) + scores2 = tf.constant([1.0, 2.1, 5.6]) + boxlist1 = box_list.BoxList(corners1) + boxlist1.add_field('scores', scores1) + boxlist2 = box_list.BoxList(corners2) + boxlist2.add_field('scores', scores2) + result = box_list_ops.concatenate([boxlist1, boxlist2]) + return result.get(), result.get_field('scores') + exp_corners = [[0, 0, 0, 0], + [1, 2, 3, 4], + [0, 3, 1, 6], + [2, 4, 3, 8], + [1, 0, 5, 10]] + exp_scores = [1.0, 2.1, 1.0, 2.1, 5.6] + corners_output, scores_output = self.execute(graph_fn, []) + self.assertAllClose(corners_output, exp_corners) + self.assertAllClose(scores_output, exp_scores) + + +class NonMaxSuppressionTest(test_case.TestCase): + + def test_select_from_three_clusters(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) + iou_thresh = .5 + max_output_size = 3 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 100, 1, 101]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_at_most_two_boxes_from_three_clusters(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) + iou_thresh = .5 + max_output_size = 2 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_at_most_thirty_boxes_from_three_clusters(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1], + [0, -0.1, 1, 0.9], + [0, 10, 1, 11], + [0, 10.1, 1, 11.1], + [0, 100, 1, 101]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) + iou_thresh = .5 + max_output_size = 30 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 100, 1, 101]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_single_box(self): + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant([.9])) + iou_thresh = .5 + max_output_size = 3 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 0, 1, 1]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_select_from_ten_identical_boxes(self): + def graph_fn(): + corners = tf.constant(10 * [[0, 0, 1, 1]], tf.float32) + boxes = box_list.BoxList(corners) + boxes.add_field('scores', tf.constant(10 * [.9])) + iou_thresh = .5 + max_output_size = 3 + nms = box_list_ops.non_max_suppression( + boxes, iou_thresh, max_output_size) + return nms.get() + exp_nms = [[0, 0, 1, 1]] + nms_output = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_output, exp_nms) + + def test_copy_extra_fields(self): + tensor1 = np.array([[1], [4]]) + tensor2 = np.array([[1, 1], [2, 2]]) + def graph_fn(): + corners = tf.constant([[0, 0, 1, 1], + [0, 0.1, 1, 1.1]], tf.float32) + boxes = box_list.BoxList(corners) + + boxes.add_field('tensor1', tf.constant(tensor1)) + boxes.add_field('tensor2', tf.constant(tensor2)) + new_boxes = box_list.BoxList(tf.constant([[0, 0, 10, 10], + [1, 3, 5, 5]], tf.float32)) + new_boxes = box_list_ops._copy_extra_fields(new_boxes, boxes) + return new_boxes.get_field('tensor1'), new_boxes.get_field('tensor2') + tensor1_out, tensor2_out = self.execute_cpu(graph_fn, []) + self.assertAllClose(tensor1, tensor1_out) + self.assertAllClose(tensor2, tensor2_out) + + +class CoordinatesConversionTest(test_case.TestCase): + + def test_to_normalized_coordinates(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 100, 100], + [25, 25, 75, 75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + normalized_boxlist = box_list_ops.to_normalized_coordinates( + boxlist, tf.shape(img)[1], tf.shape(img)[2]) + return normalized_boxlist.get() + expected_boxes = [[0, 0, 1, 1], + [0.25, 0.25, 0.75, 0.75]] + normalized_boxes = self.execute(graph_fn, []) + self.assertAllClose(normalized_boxes, expected_boxes) + + def test_to_normalized_coordinates_already_normalized(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 1, 1], + [0.25, 0.25, 0.75, 0.75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + normalized_boxlist = box_list_ops.to_normalized_coordinates( + boxlist, tf.shape(img)[1], tf.shape(img)[2]) + return normalized_boxlist.get() + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_to_absolute_coordinates(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 1, 1], + [0.25, 0.25, 0.75, 0.75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return absolute_boxlist.get() + expected_boxes = [[0, 0, 100, 100], + [25, 25, 75, 75]] + absolute_boxes = self.execute(graph_fn, []) + self.assertAllClose(absolute_boxes, expected_boxes) + + def test_to_absolute_coordinates_already_abolute(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 100, 100], + [25, 25, 75, 75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return absolute_boxlist.get() + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_convert_to_normalized_and_back(self): + coordinates = np.random.uniform(size=(100, 4)) + coordinates = np.round(np.sort(coordinates) * 200) + coordinates[:, 2:4] += 1 + coordinates[99, :] = [0, 0, 201, 201] + def graph_fn(): + img = tf.ones((128, 202, 202, 3)) + + boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return boxlist.get() + out = self.execute(graph_fn, []) + self.assertAllClose(out, coordinates) + + def test_convert_to_absolute_and_back(self): + coordinates = np.random.uniform(size=(100, 4)) + coordinates = np.sort(coordinates) + coordinates[99, :] = [0, 0, 1, 1] + def graph_fn(): + img = tf.ones((128, 202, 202, 3)) + boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) + boxlist = box_list_ops.to_absolute_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + boxlist = box_list_ops.to_normalized_coordinates(boxlist, + tf.shape(img)[1], + tf.shape(img)[2]) + return boxlist.get() + out = self.execute(graph_fn, []) + self.assertAllClose(out, coordinates) + + def test_to_absolute_coordinates_maximum_coordinate_check(self): + def graph_fn(): + coordinates = tf.constant([[0, 0, 1.2, 1.2], + [0.25, 0.25, 0.75, 0.75]], tf.float32) + img = tf.ones((128, 100, 100, 3)) + boxlist = box_list.BoxList(coordinates) + absolute_boxlist = box_list_ops.to_absolute_coordinates( + boxlist, + tf.shape(img)[1], + tf.shape(img)[2], + maximum_normalized_coordinate=1.1) + return absolute_boxlist.get() + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + +class BoxRefinementTest(test_case.TestCase): + + def test_box_voting(self): + def graph_fn(): + candidates = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.6, 0.6, 0.8, 0.8]], tf.float32)) + candidates.add_field('ExtraField', tf.constant([1, 2])) + pool = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8]], tf.float32)) + pool.add_field('scores', tf.constant([0.75, 0.25, 0.3])) + averaged_boxes = box_list_ops.box_voting(candidates, pool) + return (averaged_boxes.get(), averaged_boxes.get_field('scores'), + averaged_boxes.get_field('ExtraField')) + + expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]] + expected_scores = [0.5, 0.3] + boxes_out, scores_out, extra_field_out = self.execute(graph_fn, []) + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllClose(expected_scores, scores_out) + self.assertAllEqual(extra_field_out, [1, 2]) + + def test_box_voting_fails_with_negative_scores(self): + def graph_fn(): + candidates = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) + pool = box_list.BoxList(tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) + pool.add_field('scores', tf.constant([-0.2])) + averaged_boxes = box_list_ops.box_voting(candidates, pool) + return averaged_boxes.get() + + with self.assertRaisesOpError('Scores must be non negative'): + self.execute_cpu(graph_fn, []) + + def test_box_voting_fails_when_unmatched(self): + def graph_fn(): + candidates = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) + pool = box_list.BoxList(tf.constant([[0.6, 0.6, 0.8, 0.8]], tf.float32)) + pool.add_field('scores', tf.constant([0.2])) + averaged_boxes = box_list_ops.box_voting(candidates, pool) + return averaged_boxes.get() + with self.assertRaisesOpError('Each box in selected_boxes must match ' + 'with at least one box in pool_boxes.'): + self.execute_cpu(graph_fn, []) + + def test_refine_boxes(self): + def graph_fn(): + pool = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8]], tf.float32)) + pool.add_field('ExtraField', tf.constant([1, 2, 3])) + pool.add_field('scores', tf.constant([0.75, 0.25, 0.3])) + averaged_boxes = box_list_ops.refine_boxes(pool, 0.5, 10) + return (averaged_boxes.get(), averaged_boxes.get_field('scores'), + averaged_boxes.get_field('ExtraField')) + boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, []) + expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]] + expected_scores = [0.5, 0.3] + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllClose(expected_scores, scores_out) + self.assertAllEqual(extra_field_out, [1, 3]) + + def test_refine_boxes_multi_class(self): + def graph_fn(): + pool = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) + pool.add_field('classes', tf.constant([0, 0, 1, 1])) + pool.add_field('scores', tf.constant([0.75, 0.25, 0.3, 0.2])) + averaged_boxes = box_list_ops.refine_boxes_multi_class(pool, 3, 0.5, 10) + return (averaged_boxes.get(), averaged_boxes.get_field('scores'), + averaged_boxes.get_field('classes')) + boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, []) + expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8], + [0.2, 0.2, 0.3, 0.3]] + expected_scores = [0.5, 0.3, 0.2] + self.assertAllClose(expected_boxes, boxes_out) + self.assertAllClose(expected_scores, scores_out) + self.assertAllEqual(extra_field_out, [0, 1, 1]) + + def test_sample_boxes_by_jittering(self): + def graph_fn(): + boxes = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], + [0.1, 0.1, 0.5, 0.5], + [0.6, 0.6, 0.8, 0.8], + [0.2, 0.2, 0.3, 0.3]], tf.float32)) + sampled_boxes = box_list_ops.sample_boxes_by_jittering( + boxlist=boxes, num_boxes_to_sample=10) + iou = box_list_ops.iou(boxes, sampled_boxes) + iou_max = tf.reduce_max(iou, axis=0) + return sampled_boxes.get(), iou_max + np_sampled_boxes, np_iou_max = self.execute(graph_fn, []) + self.assertAllEqual(np_sampled_boxes.shape, [10, 4]) + self.assertAllGreater(np_iou_max, 0.3) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c1389dbf8ae51f82ee28780d59ca599b1eff0d3e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_list_test.py @@ -0,0 +1,121 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.box_list.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.utils import test_case + + +class BoxListTest(test_case.TestCase): + """Tests for BoxList class.""" + + def test_num_boxes(self): + def graph_fn(): + data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) + boxes = box_list.BoxList(data) + return boxes.num_boxes() + num_boxes_out = self.execute(graph_fn, []) + self.assertEqual(num_boxes_out, 3) + + def test_get_correct_center_coordinates_and_sizes(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + def graph_fn(boxes): + boxes = box_list.BoxList(boxes) + centers_sizes = boxes.get_center_coordinates_and_sizes() + return centers_sizes + centers_sizes_out = self.execute(graph_fn, [boxes]) + expected_centers_sizes = [[15, 0.35], [12.5, 0.25], [10, 0.3], [5, 0.3]] + self.assertAllClose(centers_sizes_out, expected_centers_sizes) + + def test_create_box_list_with_dynamic_shape(self): + def graph_fn(): + data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) + indices = tf.reshape(tf.where(tf.greater([1, 0, 1], 0)), [-1]) + data = tf.gather(data, indices) + assert data.get_shape().as_list() == [None, 4] + boxes = box_list.BoxList(data) + return boxes.num_boxes() + num_boxes = self.execute(graph_fn, []) + self.assertEqual(num_boxes, 2) + + def test_transpose_coordinates(self): + boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], + np.float32) + def graph_fn(boxes): + boxes = box_list.BoxList(boxes) + boxes.transpose_coordinates() + return boxes.get() + transpoded_boxes = self.execute(graph_fn, [boxes]) + expected_corners = [[10.0, 10.0, 15.0, 20.0], [0.1, 0.2, 0.4, 0.5]] + self.assertAllClose(transpoded_boxes, expected_corners) + + def test_box_list_invalid_inputs(self): + data0 = tf.constant([[[0, 0, 1, 1], [3, 4, 5, 5]]], tf.float32) + data1 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.float32) + data2 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.int32) + + with self.assertRaises(ValueError): + _ = box_list.BoxList(data0) + with self.assertRaises(ValueError): + _ = box_list.BoxList(data1) + with self.assertRaises(ValueError): + _ = box_list.BoxList(data2) + + def test_num_boxes_static(self): + box_corners = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] + boxes = box_list.BoxList(tf.constant(box_corners)) + self.assertEqual(boxes.num_boxes_static(), 2) + self.assertEqual(type(boxes.num_boxes_static()), int) + + def test_as_tensor_dict(self): + boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], + tf.float32) + boxlist = box_list.BoxList(boxes) + classes = tf.constant([0, 1]) + boxlist.add_field('classes', classes) + scores = tf.constant([0.75, 0.2]) + boxlist.add_field('scores', scores) + tensor_dict = boxlist.as_tensor_dict() + + self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes, + 'boxes': boxes}) + + def test_as_tensor_dict_with_features(self): + boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], + tf.float32) + boxlist = box_list.BoxList(boxes) + classes = tf.constant([0, 1]) + boxlist.add_field('classes', classes) + scores = tf.constant([0.75, 0.2]) + boxlist.add_field('scores', scores) + tensor_dict = boxlist.as_tensor_dict(['scores', 'classes']) + + self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes}) + + def test_as_tensor_dict_missing_field(self): + boxlist = box_list.BoxList( + tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) + boxlist.add_field('classes', tf.constant([0, 1])) + boxlist.add_field('scores', tf.constant([0.75, 0.2])) + with self.assertRaises(ValueError): + boxlist.as_tensor_dict(['foo', 'bar']) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..27d77d299bfb8c44de338dd364258b2840ed3927 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_predictor.py @@ -0,0 +1,227 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Box predictor for object detectors. + +Box predictors are classes that take a high level +image feature map as input and produce two predictions, +(1) a tensor encoding box locations, and +(2) a tensor encoding classes for each box. + +These components are passed directly to loss functions +in our detection models. + +These modules are separated from the main model since the same +few box predictor architectures are shared across many models. +""" +from abc import abstractmethod +import tensorflow.compat.v1 as tf + +BOX_ENCODINGS = 'box_encodings' +CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' +MASK_PREDICTIONS = 'mask_predictions' + + +class BoxPredictor(object): + """BoxPredictor.""" + + def __init__(self, is_training, num_classes): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + """ + self._is_training = is_training + self._num_classes = num_classes + + @property + def is_keras_model(self): + return False + + @property + def num_classes(self): + return self._num_classes + + def predict(self, image_features, num_predictions_per_location, + scope=None, **params): + """Computes encoded object locations and corresponding confidences. + + Takes a list of high level image feature maps as input and produces a list + of box encodings and a list of class scores where each element in the output + lists correspond to the feature maps in the input list. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + scope: Variable and Op scope name. + **params: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + + Raises: + ValueError: If length of `image_features` is not equal to length of + `num_predictions_per_location`. + """ + if len(image_features) != len(num_predictions_per_location): + raise ValueError('image_feature and num_predictions_per_location must ' + 'be of same length, found: {} vs {}'. + format(len(image_features), + len(num_predictions_per_location))) + if scope is not None: + with tf.variable_scope(scope): + return self._predict(image_features, num_predictions_per_location, + **params) + return self._predict(image_features, num_predictions_per_location, + **params) + + # TODO(rathodv): num_predictions_per_location could be moved to constructor. + # This is currently only used by ConvolutionalBoxPredictor. + @abstractmethod + def _predict(self, image_features, num_predictions_per_location, **params): + """Implementations must override this method. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + **params: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + pass + + +class KerasBoxPredictor(tf.keras.layers.Layer): + """Keras-based BoxPredictor.""" + + def __init__(self, is_training, num_classes, freeze_batchnorm, + inplace_batchnorm_update, name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + """ + super(KerasBoxPredictor, self).__init__(name=name) + + self._is_training = is_training + self._num_classes = num_classes + self._freeze_batchnorm = freeze_batchnorm + self._inplace_batchnorm_update = inplace_batchnorm_update + + @property + def is_keras_model(self): + return True + + @property + def num_classes(self): + return self._num_classes + + def call(self, image_features, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Takes a list of high level image feature maps as input and produces a list + of box encodings and a list of class scores where each element in the output + lists correspond to the feature maps in the input list. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + return self._predict(image_features, **kwargs) + + @abstractmethod + def _predict(self, image_features, **kwargs): + """Implementations must override this method. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Additional keyword arguments for specific implementations of + BoxPredictor. + + Returns: + A dictionary containing at least the following tensors. + box_encodings: A list of float tensors. Each entry in the list + corresponds to a feature map in the input `image_features` list. All + tensors in the list have one of the two following shapes: + a. [batch_size, num_anchors_i, q, code_size] representing the location + of the objects, where q is 1 or the number of classes. + b. [batch_size, num_anchors_i, code_size]. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + raise NotImplementedError diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62da1a995ad74bab09860086afa93446eee1385f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/class_agnostic_nms_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/class_agnostic_nms_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ed205c51d3651a473facae987aa02451dac4135a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/class_agnostic_nms_test.py @@ -0,0 +1,144 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for google3.third_party.tensorflow_models.object_detection.core.class_agnostic_nms.""" +from absl.testing import parameterized +import tensorflow.compat.v1 as tf +from object_detection.core import post_processing +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class ClassAgnosticNonMaxSuppressionTest(test_case.TestCase, + parameterized.TestCase): + + def test_class_agnostic_nms_select_with_shared_boxes(self): + def graph_fn(): + boxes = tf.constant( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], tf.float32) + scores = tf.constant([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]) + score_thresh = 0.1 + iou_thresh = .5 + max_classes_per_detection = 1 + max_output_size = 4 + nms, _ = post_processing.class_agnostic_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, max_classes_per_detection, + max_output_size) + return (nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes)) + + exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, []) + self.assertAllClose(nms_corners_output, exp_nms_corners) + self.assertAllClose(nms_scores_output, exp_nms_scores) + self.assertAllClose(nms_classes_output, exp_nms_classes) + + + def test_class_agnostic_nms_select_with_per_class_boxes(self): + def graph_fn(): + boxes = tf.constant( + [[[4, 5, 9, 10], [0, 0, 1, 1]], + [[0, 0.1, 1, 1.1], [4, 5, 9, 10]], + [[0, -0.1, 1, 0.9], [4, 5, 9, 10]], + [[0, 10, 1, 11], [4, 5, 9, 10]], + [[0, 10.1, 1, 11.1], [4, 5, 9, 10]], + [[0, 100, 1, 101], [4, 5, 9, 10]], + [[4, 5, 9, 10], [0, 1000, 1, 1002]], + [[4, 5, 9, 10], [0, 1000, 1, 1002.1]]], tf.float32) + scores = tf.constant([[.01, 0.9], + [.75, 0.05], + [.6, 0.01], + [.95, 0], + [.5, 0.01], + [.3, 0.01], + [.01, .85], + [.01, .5]]) + score_thresh = 0.1 + iou_thresh = .5 + max_classes_per_detection = 1 + max_output_size = 4 + nms, _ = post_processing.class_agnostic_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, max_classes_per_detection, + max_output_size) + return (nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes)) + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, []) + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 1, 1, 0] + self.assertAllClose(nms_corners_output, exp_nms_corners) + self.assertAllClose(nms_scores_output, exp_nms_scores) + self.assertAllClose(nms_classes_output, exp_nms_classes) + + # Two cases will be tested here: using / not using static shapes. + # Named the two test cases for easier control during testing, with a flag of + # '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1' + # or + # '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1_use_static_shapes'. + @parameterized.named_parameters(('', False), ('_use_static_shapes', True)) + def test_batch_classagnostic_nms_with_batch_size_1(self, + use_static_shapes=False): + def graph_fn(): + boxes = tf.constant( + [[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]]], tf.float32) + scores = tf.constant([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]]) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + max_classes_per_detection = 1 + use_class_agnostic_nms = True + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, + num_detections) = post_processing.batch_multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size, + use_class_agnostic_nms=use_class_agnostic_nms, + use_static_shapes=use_static_shapes, + max_classes_per_detection=max_classes_per_detection) + self.assertIsNone(nmsed_masks) + self.assertIsNone(nmsed_additional_fields) + return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) + exp_nms_corners = [[[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], + [0, 100, 1, 101]]] + exp_nms_scores = [[.95, .9, .85, .3]] + exp_nms_classes = [[0, 0, 1, 0]] + (nmsed_boxes, nmsed_scores, nmsed_classes, + num_detections) = self.execute_cpu(graph_fn, []) + self.assertAllClose(nmsed_boxes, exp_nms_corners) + self.assertAllClose(nmsed_scores, exp_nms_scores) + self.assertAllClose(nmsed_classes, exp_nms_classes) + self.assertEqual(num_detections, [4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_decoder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..87ddf72c1b04eba7a78f8584a38dc9f859cf8dfa --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_decoder.py @@ -0,0 +1,44 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Interface for data decoders. + +Data decoders decode the input data and return a dictionary of tensors keyed by +the entries in core.reader.Fields. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from abc import ABCMeta +from abc import abstractmethod +import six + + +class DataDecoder(six.with_metaclass(ABCMeta, object)): + """Interface for data decoders.""" + + @abstractmethod + def decode(self, data): + """Return a single image and associated labels. + + Args: + data: a string tensor holding a serialized protocol buffer corresponding + to data for a single image. + + Returns: + tensor_dict: a dictionary containing tensors. Possible keys are defined in + reader.Fields. + """ + pass diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_decoder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_decoder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a828560a9adf1299fcdd151b3a05e28c340428 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_decoder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_parser.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..889545db78fbc8adffaa2f082e4301cc15d7698f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/data_parser.py @@ -0,0 +1,45 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Interface for data parsers. + +Data parser parses input data and returns a dictionary of numpy arrays +keyed by the entries in standard_fields.py. Since the parser parses records +to numpy arrays (materialized tensors) directly, it is used to read data for +evaluation/visualization; to parse the data during training, DataDecoder should +be used. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from abc import ABCMeta +from abc import abstractmethod +import six + + +class DataToNumpyParser(six.with_metaclass(ABCMeta, object)): + """Abstract interface for data parser that produces numpy arrays.""" + + @abstractmethod + def parse(self, input_data): + """Parses input and returns a numpy array or a dictionary of numpy arrays. + + Args: + input_data: an input data + + Returns: + A numpy array or a dictionary of numpy arrays or None, if input + cannot be parsed. + """ + pass diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd8f39bafa357f242170b9ae2ec6c29cd24ef4f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops.py @@ -0,0 +1,380 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""DensePose operations. + +DensePose part ids are represented as tensors of shape +[num_instances, num_points] and coordinates are represented as tensors of shape +[num_instances, num_points, 4] where each point holds (y, x, v, u). The location +of the DensePose sampled point is (y, x) in normalized coordinates. The surface +coordinate (in the part coordinate frame) is (v, u). Note that dim 1 of both +tensors may contain padding, since the number of sampled points per instance +is not fixed. The value `num_points` represents the maximum number of sampled +points for an instance in the example. +""" +import os + +import numpy as np +import scipy.io +import tensorflow.compat.v1 as tf + +from object_detection.utils import shape_utils + +PART_NAMES = [ + b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot', + b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back', + b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back', + b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front', + b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front', + b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back', + b'left_lower_arm_front', b'right_lower_arm_front', b'right_face', + b'left_face', +] + + +def scale(dp_surface_coords, y_scale, x_scale, scope=None): + """Scales DensePose coordinates in y and x dimensions. + + Args: + dp_surface_coords: a tensor of shape [num_instances, num_points, 4], with + coordinates in (y, x, v, u) format. + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] + """ + with tf.name_scope(scope, 'DensePoseScale'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + new_keypoints = dp_surface_coords * [[[y_scale, x_scale, 1, 1]]] + return new_keypoints + + +def clip_to_window(dp_surface_coords, window, scope=None): + """Clips DensePose points to a window. + + This op clips any input DensePose points to a window. + + Args: + dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose surface coordinates in (y, x, v, u) format. + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window to which the op should clip the keypoints. + scope: name scope. + + Returns: + new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4]. + """ + with tf.name_scope(scope, 'DensePoseClipToWindow'): + y, x, v, u = tf.split(value=dp_surface_coords, num_or_size_splits=4, axis=2) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) + x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) + new_dp_surface_coords = tf.concat([y, x, v, u], 2) + return new_dp_surface_coords + + +def prune_outside_window(dp_num_points, dp_part_ids, dp_surface_coords, window, + scope=None): + """Prunes DensePose points that fall outside a given window. + + This function replaces points that fall outside the given window with zeros. + See also clip_to_window which clips any DensePose points that fall outside the + given window. + + Note that this operation uses dynamic shapes, and therefore is not currently + suitable for TPU. + + Args: + dp_num_points: a tensor of shape [num_instances] that indicates how many + (non-padded) DensePose points there are per instance. + dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose + part ids. These part_ids are 0-indexed, where the first non-background + part has index 0. + dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose surface coordinates in (y, x, v, u) format. + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window outside of which the op should prune the points. + scope: name scope. + + Returns: + new_dp_num_points: a tensor of shape [num_instances] that indicates how many + (non-padded) DensePose points there are per instance after pruning. + new_dp_part_ids: a tensor of shape [num_instances, num_points] with + DensePose part ids. These part_ids are 0-indexed, where the first + non-background part has index 0. + new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose surface coordinates after pruning. + """ + with tf.name_scope(scope, 'DensePosePruneOutsideWindow'): + y, x, _, _ = tf.unstack(dp_surface_coords, axis=-1) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + + num_instances, num_points = shape_utils.combined_static_and_dynamic_shape( + dp_part_ids) + dp_num_points_tiled = tf.tile(dp_num_points[:, tf.newaxis], + multiples=[1, num_points]) + range_tiled = tf.tile(tf.range(num_points)[tf.newaxis, :], + multiples=[num_instances, 1]) + valid_initial = range_tiled < dp_num_points_tiled + valid_in_window = tf.logical_and( + tf.logical_and(y >= win_y_min, y <= win_y_max), + tf.logical_and(x >= win_x_min, x <= win_x_max)) + valid_indices = tf.logical_and(valid_initial, valid_in_window) + + new_dp_num_points = tf.math.reduce_sum( + tf.cast(valid_indices, tf.int32), axis=1) + max_num_points = tf.math.reduce_max(new_dp_num_points) + + def gather_and_reshuffle(elems): + dp_part_ids, dp_surface_coords, valid_indices = elems + locs = tf.where(valid_indices)[:, 0] + valid_part_ids = tf.gather(dp_part_ids, locs, axis=0) + valid_part_ids_padded = shape_utils.pad_or_clip_nd( + valid_part_ids, output_shape=[max_num_points]) + valid_surface_coords = tf.gather(dp_surface_coords, locs, axis=0) + valid_surface_coords_padded = shape_utils.pad_or_clip_nd( + valid_surface_coords, output_shape=[max_num_points, 4]) + return [valid_part_ids_padded, valid_surface_coords_padded] + + new_dp_part_ids, new_dp_surface_coords = ( + shape_utils.static_or_dynamic_map_fn( + gather_and_reshuffle, + elems=[dp_part_ids, dp_surface_coords, valid_indices], + dtype=[tf.int32, tf.float32], + back_prop=False)) + return new_dp_num_points, new_dp_part_ids, new_dp_surface_coords + + +def change_coordinate_frame(dp_surface_coords, window, scope=None): + """Changes coordinate frame of the points to be relative to window's frame. + + Given a window of the form [y_min, x_min, y_max, x_max] in normalized + coordinates, changes DensePose coordinates to be relative to this window. + + An example use case is data augmentation: where we are given groundtruth + points and would like to randomly crop the image to some window. In this + case we need to change the coordinate frame of each sampled point to be + relative to this new window. + + Args: + dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose surface coordinates in (y, x, v, u) format. + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window we should change the coordinate frame to. + scope: name scope. + + Returns: + new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4]. + """ + with tf.name_scope(scope, 'DensePoseChangeCoordinateFrame'): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + new_dp_surface_coords = scale( + dp_surface_coords - [window[0], window[1], 0, 0], + 1.0 / win_height, 1.0 / win_width) + return new_dp_surface_coords + + +def to_normalized_coordinates(dp_surface_coords, height, width, + check_range=True, scope=None): + """Converts absolute DensePose coordinates to normalized in range [0, 1]. + + This function raises an assertion failed error at graph execution time when + the maximum coordinate is smaller than 1.01 (which means that coordinates are + already normalized). The value 1.01 is to deal with small rounding errors. + + Args: + dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose absolute surface coordinates in (y, x, v, u) format. + height: Height of image. + width: Width of image. + check_range: If True, checks if the coordinates are already normalized. + scope: name scope. + + Returns: + A tensor of shape [num_instances, num_points, 4] with normalized + coordinates. + """ + with tf.name_scope(scope, 'DensePoseToNormalizedCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(dp_surface_coords[:, :, :2]) + max_assert = tf.Assert(tf.greater(max_val, 1.01), + ['max value is lower than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(dp_surface_coords, 1.0 / height, 1.0 / width) + + +def to_absolute_coordinates(dp_surface_coords, height, width, + check_range=True, scope=None): + """Converts normalized DensePose coordinates to absolute pixel coordinates. + + This function raises an assertion failed error when the maximum + coordinate value is larger than 1.01 (in which case coordinates are already + absolute). + + Args: + dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose normalized surface coordinates in (y, x, v, u) format. + height: Height of image. + width: Width of image. + check_range: If True, checks if the coordinates are normalized or not. + scope: name scope. + + Returns: + A tensor of shape [num_instances, num_points, 4] with absolute coordinates. + """ + with tf.name_scope(scope, 'DensePoseToAbsoluteCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(dp_surface_coords[:, :, :2]) + max_assert = tf.Assert(tf.greater_equal(1.01, max_val), + ['maximum coordinate value is larger than 1.01: ', + max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(dp_surface_coords, height, width) + + +class DensePoseHorizontalFlip(object): + """Class responsible for horizontal flipping of parts and surface coords.""" + + def __init__(self): + """Constructor.""" + + path = os.path.dirname(os.path.abspath(__file__)) + uv_symmetry_transforms_path = tf.resource_loader.get_path_to_datafile( + os.path.join(path, '..', 'dataset_tools', 'densepose', + 'UV_symmetry_transforms.mat')) + tf.logging.info('Loading DensePose symmetry transforms file from {}'.format( + uv_symmetry_transforms_path)) + with tf.io.gfile.GFile(uv_symmetry_transforms_path, 'rb') as f: + data = scipy.io.loadmat(f) + + # Create lookup maps which indicate how a VU coordinate changes after a + # horizontal flip. + uv_symmetry_map = {} + for key in ('U_transforms', 'V_transforms'): + uv_symmetry_map_per_part = [] + for i in range(data[key].shape[1]): + # The following tensor has shape [256, 256]. The raw data is stored as + # uint8 values, so convert to float and scale to the range [0., 1.] + data_normalized = data[key][0, i].astype(np.float32) / 255. + map_per_part = tf.constant(data_normalized, dtype=tf.float32) + uv_symmetry_map_per_part.append(map_per_part) + uv_symmetry_map[key] = tf.reshape( + tf.stack(uv_symmetry_map_per_part, axis=0), [-1]) + # The following dictionary contains flattened lookup maps for the U and V + # coordinates separately. The shape of each is [24 * 256 * 256]. + self.uv_symmetries = uv_symmetry_map + + # Create a list of that maps part index to flipped part index (0-indexed). + part_symmetries = [] + for i, part_name in enumerate(PART_NAMES): + if b'left' in part_name: + part_symmetries.append(PART_NAMES.index( + part_name.replace(b'left', b'right'))) + elif b'right' in part_name: + part_symmetries.append(PART_NAMES.index( + part_name.replace(b'right', b'left'))) + else: + part_symmetries.append(i) + self.part_symmetries = part_symmetries + + def flip_parts_and_coords(self, part_ids, vu): + """Flips part ids and coordinates. + + Args: + part_ids: a [num_instances, num_points] int32 tensor with pre-flipped part + ids. These part_ids are 0-indexed, where the first non-background part + has index 0. + vu: a [num_instances, num_points, 2] float32 tensor with pre-flipped vu + normalized coordinates. + + Returns: + new_part_ids: a [num_instances, num_points] int32 tensor with post-flipped + part ids. These part_ids are 0-indexed, where the first non-background + part has index 0. + new_vu: a [num_instances, num_points, 2] float32 tensor with post-flipped + vu coordinates. + """ + num_instances, num_points = shape_utils.combined_static_and_dynamic_shape( + part_ids) + part_ids_flattened = tf.reshape(part_ids, [-1]) + new_part_ids_flattened = tf.gather(self.part_symmetries, part_ids_flattened) + new_part_ids = tf.reshape(new_part_ids_flattened, + [num_instances, num_points]) + + # Convert VU floating point coordinates to values in [256, 256] grid. + vu = tf.math.minimum(tf.math.maximum(vu, 0.0), 1.0) + vu_locs = tf.cast(vu * 256., dtype=tf.int32) + vu_locs_flattened = tf.reshape(vu_locs, [-1, 2]) + v_locs_flattened, u_locs_flattened = tf.unstack(vu_locs_flattened, axis=1) + + # Convert vu_locs into lookup indices (in flattened part symmetries map). + symmetry_lookup_inds = ( + part_ids_flattened * 65536 + 256 * v_locs_flattened + u_locs_flattened) + + # New VU coordinates. + v_new = tf.gather(self.uv_symmetries['V_transforms'], symmetry_lookup_inds) + u_new = tf.gather(self.uv_symmetries['U_transforms'], symmetry_lookup_inds) + new_vu_flattened = tf.stack([v_new, u_new], axis=1) + new_vu = tf.reshape(new_vu_flattened, [num_instances, num_points, 2]) + + return new_part_ids, new_vu + + +def flip_horizontal(dp_part_ids, dp_surface_coords, scope=None): + """Flips the DensePose points horizontally around the flip_point. + + This operation flips dense pose annotations horizontally. Note that part ids + and surface coordinates may or may not change as a result of the flip. + + Args: + dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose + part ids. These part_ids are 0-indexed, where the first non-background + part has index 0. + dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose surface coordinates in (y, x, v, u) normalized format. + scope: name scope. + + Returns: + new_dp_part_ids: a tensor of shape [num_instances, num_points] with + DensePose part ids after flipping. + new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with + DensePose surface coordinates after flipping. + """ + with tf.name_scope(scope, 'DensePoseFlipHorizontal'): + # First flip x coordinate. + y, x, vu = tf.split(dp_surface_coords, num_or_size_splits=[1, 1, 2], axis=2) + xflipped = 1.0 - x + + # Flip part ids and surface coordinates. + horizontal_flip = DensePoseHorizontalFlip() + new_dp_part_ids, new_vu = horizontal_flip.flip_parts_and_coords( + dp_part_ids, vu) + new_dp_surface_coords = tf.concat([y, xflipped, new_vu], axis=2) + return new_dp_part_ids, new_dp_surface_coords + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1da52add3caaf98c69f9c5c34b3efb8609755641 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5b814406d04f5854c339b12c66de6956e765251d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/densepose_ops_test.py @@ -0,0 +1,178 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.densepose_ops.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import densepose_ops +from object_detection.utils import test_case + + +class DensePoseOpsTest(test_case.TestCase): + """Tests for common DensePose operations.""" + + def test_scale(self): + def graph_fn(): + dp_surface_coords = tf.constant([ + [[0.0, 0.0, 0.1, 0.2], [100.0, 200.0, 0.3, 0.4]], + [[50.0, 120.0, 0.5, 0.6], [100.0, 140.0, 0.7, 0.8]] + ]) + y_scale = tf.constant(1.0 / 100) + x_scale = tf.constant(1.0 / 200) + + output = densepose_ops.scale(dp_surface_coords, y_scale, x_scale) + return output + output = self.execute(graph_fn, []) + + expected_dp_surface_coords = np.array([ + [[0., 0., 0.1, 0.2], [1.0, 1.0, 0.3, 0.4]], + [[0.5, 0.6, 0.5, 0.6], [1.0, 0.7, 0.7, 0.8]] + ]) + self.assertAllClose(output, expected_dp_surface_coords) + + def test_clip_to_window(self): + def graph_fn(): + dp_surface_coords = tf.constant([ + [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], + [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + output = densepose_ops.clip_to_window(dp_surface_coords, window) + return output + output = self.execute(graph_fn, []) + + expected_dp_surface_coords = np.array([ + [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], + [[0.5, 0.25, 0.5, 0.6], [0.75, 0.75, 0.7, 0.8]] + ]) + self.assertAllClose(output, expected_dp_surface_coords) + + def test_prune_outside_window(self): + def graph_fn(): + dp_num_points = tf.constant([2, 0, 1]) + dp_part_ids = tf.constant([[1, 1], [0, 0], [16, 0]]) + dp_surface_coords = tf.constant([ + [[0.9, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], + [[0.8, 0.5, 0.6, 0.6], [0.5, 0.5, 0.7, 0.7]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + new_dp_num_points, new_dp_part_ids, new_dp_surface_coords = ( + densepose_ops.prune_outside_window(dp_num_points, dp_part_ids, + dp_surface_coords, window)) + return new_dp_num_points, new_dp_part_ids, new_dp_surface_coords + new_dp_num_points, new_dp_part_ids, new_dp_surface_coords = ( + self.execute_cpu(graph_fn, [])) + + expected_dp_num_points = np.array([1, 0, 0]) + expected_dp_part_ids = np.array([[1], [0], [0]]) + expected_dp_surface_coords = np.array([ + [[0.75, 0.75, 0.3, 0.4]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]] + ]) + self.assertAllEqual(new_dp_num_points, expected_dp_num_points) + self.assertAllEqual(new_dp_part_ids, expected_dp_part_ids) + self.assertAllClose(new_dp_surface_coords, expected_dp_surface_coords) + + def test_change_coordinate_frame(self): + def graph_fn(): + dp_surface_coords = tf.constant([ + [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], + [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + output = densepose_ops.change_coordinate_frame(dp_surface_coords, window) + return output + output = self.execute(graph_fn, []) + + expected_dp_surface_coords = np.array([ + [[0, 0.5, 0.1, 0.2], [1.0, 1.0, 0.3, 0.4]], + [[0.5, -0.5, 0.5, 0.6], [1.5, 1.5, 0.7, 0.8]] + ]) + self.assertAllClose(output, expected_dp_surface_coords) + + def test_to_normalized_coordinates(self): + def graph_fn(): + dp_surface_coords = tf.constant([ + [[10., 30., 0.1, 0.2], [30., 45., 0.3, 0.4]], + [[20., 0., 0.5, 0.6], [40., 60., 0.7, 0.8]] + ]) + output = densepose_ops.to_normalized_coordinates( + dp_surface_coords, 40, 60) + return output + output = self.execute(graph_fn, []) + + expected_dp_surface_coords = np.array([ + [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], + [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] + ]) + self.assertAllClose(output, expected_dp_surface_coords) + + def test_to_absolute_coordinates(self): + def graph_fn(): + dp_surface_coords = tf.constant([ + [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], + [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] + ]) + output = densepose_ops.to_absolute_coordinates( + dp_surface_coords, 40, 60) + return output + output = self.execute(graph_fn, []) + + expected_dp_surface_coords = np.array([ + [[10., 30., 0.1, 0.2], [30., 45., 0.3, 0.4]], + [[20., 0., 0.5, 0.6], [40., 60., 0.7, 0.8]] + ]) + self.assertAllClose(output, expected_dp_surface_coords) + + def test_horizontal_flip(self): + part_ids_np = np.array([[1, 4], [0, 8]], dtype=np.int32) + surf_coords_np = np.array([ + [[0.1, 0.7, 0.2, 0.4], [0.3, 0.8, 0.2, 0.4]], + [[0.0, 0.5, 0.8, 0.7], [0.6, 1.0, 0.7, 0.9]], + ], dtype=np.float32) + def graph_fn(): + part_ids = tf.constant(part_ids_np, dtype=tf.int32) + surf_coords = tf.constant(surf_coords_np, dtype=tf.float32) + flipped_part_ids, flipped_surf_coords = densepose_ops.flip_horizontal( + part_ids, surf_coords) + flipped_twice_part_ids, flipped_twice_surf_coords = ( + densepose_ops.flip_horizontal(flipped_part_ids, flipped_surf_coords)) + return (flipped_part_ids, flipped_surf_coords, + flipped_twice_part_ids, flipped_twice_surf_coords) + (flipped_part_ids, flipped_surf_coords, flipped_twice_part_ids, + flipped_twice_surf_coords) = self.execute(graph_fn, []) + + expected_flipped_part_ids = [[1, 5], # 1->1, 4->5 + [0, 9]] # 0->0, 8->9 + expected_flipped_surf_coords_yx = np.array([ + [[0.1, 1.0-0.7], [0.3, 1.0-0.8]], + [[0.0, 1.0-0.5], [0.6, 1.0-1.0]], + ], dtype=np.float32) + self.assertAllEqual(expected_flipped_part_ids, flipped_part_ids) + self.assertAllClose(expected_flipped_surf_coords_yx, + flipped_surf_coords[:, :, 0:2]) + self.assertAllEqual(part_ids_np, flipped_twice_part_ids) + self.assertAllClose(surf_coords_np, flipped_twice_surf_coords, rtol=1e-2, + atol=1e-2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..7f08fa5df12163e8178f233dbb1d766fe27d8742 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm.py @@ -0,0 +1,68 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A freezable batch norm layer that uses Keras batch normalization.""" +import tensorflow.compat.v1 as tf + + +class FreezableBatchNorm(tf.keras.layers.BatchNormalization): + """Batch normalization layer (Ioffe and Szegedy, 2014). + + This is a `freezable` batch norm layer that supports setting the `training` + parameter in the __init__ method rather than having to set it either via + the Keras learning phase or via the `call` method parameter. This layer will + forward all other parameters to the default Keras `BatchNormalization` + layer + + This is class is necessary because Object Detection model training sometimes + requires batch normalization layers to be `frozen` and used as if it was + evaluation time, despite still training (and potentially using dropout layers) + + Like the default Keras BatchNormalization layer, this will normalize the + activations of the previous layer at each batch, + i.e. applies a transformation that maintains the mean activation + close to 0 and the activation standard deviation close to 1. + + Arguments: + training: If False, the layer will normalize using the moving average and + std. dev, without updating the learned avg and std. dev. + If None or True, the layer will follow the keras BatchNormalization layer + strategy of checking the Keras learning phase at `call` time to decide + what to do. + **kwargs: The keyword arguments to forward to the keras BatchNormalization + layer constructor. + + Input shape: + Arbitrary. Use the keyword argument `input_shape` + (tuple of integers, does not include the samples axis) + when using this layer as the first layer in a model. + + Output shape: + Same shape as input. + + References: + - [Batch Normalization: Accelerating Deep Network Training by Reducing + Internal Covariate Shift](https://arxiv.org/abs/1502.03167) + """ + + def __init__(self, training=None, **kwargs): + super(FreezableBatchNorm, self).__init__(**kwargs) + self._training = training + + def call(self, inputs, training=None): + # Override the call arg only if the batchnorm is frozen. (Ignore None) + if self._training is False: # pylint: disable=g-bool-id-comparison + training = self._training + return super(FreezableBatchNorm, self).call(inputs, training=training) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05af55562405516eae862363549c968bbc3b8c1d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc42ae3ef7da9b3412d2f461d7f9db62420e603 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/freezable_batch_norm_tf2_test.py @@ -0,0 +1,198 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.freezable_batch_norm.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + + +from object_detection.core import freezable_batch_norm +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FreezableBatchNormTest(tf.test.TestCase): + """Tests for FreezableBatchNorm operations.""" + + def _build_model(self, training=None): + model = tf.keras.models.Sequential() + norm = freezable_batch_norm.FreezableBatchNorm(training=training, + input_shape=(10,), + momentum=0.8) + model.add(norm) + return model, norm + + def _copy_weights(self, source_weights, target_weights): + for source, target in zip(source_weights, target_weights): + target.assign(source) + + def _train_freezable_batch_norm(self, training_mean, training_var): + model, _ = self._build_model() + model.compile(loss='mse', optimizer='sgd') + + # centered on training_mean, variance training_var + train_data = np.random.normal( + loc=training_mean, + scale=training_var, + size=(1000, 10)) + model.fit(train_data, train_data, epochs=4, verbose=0) + return model.weights + + def _test_batchnorm_layer( + self, norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, training_mean, training_var): + out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32), + training=training_arg) + out = out_tensor + out -= norm.beta + out /= norm.gamma + + if not should_be_training: + out *= training_var + out += (training_mean - testing_mean) + out /= testing_var + + np.testing.assert_allclose(out.numpy().mean(), 0.0, atol=1.5e-1) + np.testing.assert_allclose(out.numpy().std(), 1.0, atol=1.5e-1) + + def test_batchnorm_freezing_training_none(self): + training_mean = 5.0 + training_var = 10.0 + + testing_mean = -10.0 + testing_var = 5.0 + + # Initially train the batch norm, and save the weights + trained_weights = self._train_freezable_batch_norm(training_mean, + training_var) + + # Load the batch norm weights, freezing training to True. + # Apply the batch norm layer to testing data and ensure it is normalized + # according to the batch statistics. + model, norm = self._build_model(training=True) + self._copy_weights(trained_weights, model.weights) + + # centered on testing_mean, variance testing_var + test_data = np.random.normal( + loc=testing_mean, + scale=testing_var, + size=(1000, 10)) + + # Test with training=True passed to the call method: + training_arg = True + should_be_training = True + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Reset the weights, because they may have been updating by + # running with training=True + self._copy_weights(trained_weights, model.weights) + + # Test with training=False passed to the call method: + training_arg = False + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Test the layer in various Keras learning phase scopes: + training_arg = None + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + tf.keras.backend.set_learning_phase(True) + should_be_training = True + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Reset the weights, because they may have been updating by + # running with training=True + self._copy_weights(trained_weights, model.weights) + + tf.keras.backend.set_learning_phase(False) + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + def test_batchnorm_freezing_training_false(self): + training_mean = 5.0 + training_var = 10.0 + + testing_mean = -10.0 + testing_var = 5.0 + + # Initially train the batch norm, and save the weights + trained_weights = self._train_freezable_batch_norm(training_mean, + training_var) + + # Load the batch norm back up, freezing training to False. + # Apply the batch norm layer to testing data and ensure it is normalized + # according to the training data's statistics. + model, norm = self._build_model(training=False) + self._copy_weights(trained_weights, model.weights) + + # centered on testing_mean, variance testing_var + test_data = np.random.normal( + loc=testing_mean, + scale=testing_var, + size=(1000, 10)) + + # Make sure that the layer is never training + # Test with training=True passed to the call method: + training_arg = True + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Test with training=False passed to the call method: + training_arg = False + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + # Test the layer in various Keras learning phase scopes: + training_arg = None + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + tf.keras.backend.set_learning_phase(True) + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + tf.keras.backend.set_learning_phase(False) + should_be_training = False + self._test_batchnorm_layer(norm, should_be_training, test_data, + testing_mean, testing_var, training_arg, + training_mean, training_var) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0c4ccfed42aae492550331e870173c624f0316 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops.py @@ -0,0 +1,376 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keypoint operations. + +Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2], +where the last dimension holds rank 2 tensors of the form [y, x] representing +the coordinates of the keypoint. +""" +import numpy as np +import tensorflow.compat.v1 as tf + + +def scale(keypoints, y_scale, x_scale, scope=None): + """Scales keypoint coordinates in x and y dimensions. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + y_scale: (float) scalar tensor + x_scale: (float) scalar tensor + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'Scale'): + y_scale = tf.cast(y_scale, tf.float32) + x_scale = tf.cast(x_scale, tf.float32) + new_keypoints = keypoints * [[[y_scale, x_scale]]] + return new_keypoints + + +def clip_to_window(keypoints, window, scope=None): + """Clips keypoints to a window. + + This op clips any input keypoints to a window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window to which the op should clip the keypoints. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'ClipToWindow'): + y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) + x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) + new_keypoints = tf.concat([y, x], 2) + return new_keypoints + + +def prune_outside_window(keypoints, window, scope=None): + """Prunes keypoints that fall outside a given window. + + This function replaces keypoints that fall outside the given window with nan. + See also clip_to_window which clips any keypoints that fall outside the given + window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window outside of which the op should prune the keypoints. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'PruneOutsideWindow'): + y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) + + valid_indices = tf.logical_and( + tf.logical_and(y >= win_y_min, y <= win_y_max), + tf.logical_and(x >= win_x_min, x <= win_x_max)) + + new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y)) + new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x)) + new_keypoints = tf.concat([new_y, new_x], 2) + + return new_keypoints + + +def change_coordinate_frame(keypoints, window, scope=None): + """Changes coordinate frame of the keypoints to be relative to window's frame. + + Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint + coordinates from keypoints of shape [num_instances, num_keypoints, 2] + to be relative to this window. + + An example use case is data augmentation: where we are given groundtruth + keypoints and would like to randomly crop the image to some window. In this + case we need to change the coordinate frame of each groundtruth keypoint to be + relative to this new window. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] + window we should change the coordinate frame to. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'ChangeCoordinateFrame'): + win_height = window[2] - window[0] + win_width = window[3] - window[1] + new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height, + 1.0 / win_width) + return new_keypoints + + +def keypoints_to_enclosing_bounding_boxes(keypoints): + """Creates enclosing bounding boxes from keypoints. + + Args: + keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints + in [y, x] format. + + Returns: + A [num_instances, 4] float32 tensor that tightly covers all the keypoints + for each instance. + """ + ymin = tf.math.reduce_min(keypoints[:, :, 0], axis=1) + xmin = tf.math.reduce_min(keypoints[:, :, 1], axis=1) + ymax = tf.math.reduce_max(keypoints[:, :, 0], axis=1) + xmax = tf.math.reduce_max(keypoints[:, :, 1], axis=1) + return tf.stack([ymin, xmin, ymax, xmax], axis=1) + + +def to_normalized_coordinates(keypoints, height, width, + check_range=True, scope=None): + """Converts absolute keypoint coordinates to normalized coordinates in [0, 1]. + + Usually one uses the dynamic shape of the image or conv-layer tensor: + keypoints = keypoint_ops.to_normalized_coordinates(keypoints, + tf.shape(images)[1], + tf.shape(images)[2]), + + This function raises an assertion failed error at graph execution time when + the maximum coordinate is smaller than 1.01 (which means that coordinates are + already normalized). The value 1.01 is to deal with small rounding errors. + + Args: + keypoints: A tensor of shape [num_instances, num_keypoints, 2]. + height: Maximum value for y coordinate of absolute keypoint coordinates. + width: Maximum value for x coordinate of absolute keypoint coordinates. + check_range: If True, checks if the coordinates are normalized. + scope: name scope. + + Returns: + tensor of shape [num_instances, num_keypoints, 2] with normalized + coordinates in [0, 1]. + """ + with tf.name_scope(scope, 'ToNormalizedCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + if check_range: + max_val = tf.reduce_max(keypoints) + max_assert = tf.Assert(tf.greater(max_val, 1.01), + ['max value is lower than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(keypoints, 1.0 / height, 1.0 / width) + + +def to_absolute_coordinates(keypoints, height, width, + check_range=True, scope=None): + """Converts normalized keypoint coordinates to absolute pixel coordinates. + + This function raises an assertion failed error when the maximum keypoint + coordinate value is larger than 1.01 (in which case coordinates are already + absolute). + + Args: + keypoints: A tensor of shape [num_instances, num_keypoints, 2] + height: Maximum value for y coordinate of absolute keypoint coordinates. + width: Maximum value for x coordinate of absolute keypoint coordinates. + check_range: If True, checks if the coordinates are normalized or not. + scope: name scope. + + Returns: + tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates + in terms of the image size. + + """ + with tf.name_scope(scope, 'ToAbsoluteCoordinates'): + height = tf.cast(height, tf.float32) + width = tf.cast(width, tf.float32) + + # Ensure range of input keypoints is correct. + if check_range: + max_val = tf.reduce_max(keypoints) + max_assert = tf.Assert(tf.greater_equal(1.01, max_val), + ['maximum keypoint coordinate value is larger ' + 'than 1.01: ', max_val]) + with tf.control_dependencies([max_assert]): + width = tf.identity(width) + + return scale(keypoints, height, width) + + +def flip_horizontal(keypoints, flip_point, flip_permutation=None, scope=None): + """Flips the keypoints horizontally around the flip_point. + + This operation flips the x coordinate for each keypoint around the flip_point + and also permutes the keypoints in a manner specified by flip_permutation. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + flip_point: (float) scalar tensor representing the x coordinate to flip the + keypoints around. + flip_permutation: integer list or rank 1 int32 tensor containing the + keypoint flip permutation. This specifies the mapping from original + keypoint indices to the flipped keypoint indices. This is used primarily + for keypoints that are not reflection invariant. E.g. Suppose there are 3 + keypoints representing ['head', 'right_eye', 'left_eye'], then a logical + choice for flip_permutation might be [0, 2, 1] since we want to swap the + 'left_eye' and 'right_eye' after a horizontal flip. + Default to None or empty list to keep the original order after flip. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'FlipHorizontal'): + keypoints = tf.transpose(keypoints, [1, 0, 2]) + if flip_permutation: + keypoints = tf.gather(keypoints, flip_permutation) + v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + u = flip_point * 2.0 - u + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) + return new_keypoints + + +def flip_vertical(keypoints, flip_point, flip_permutation=None, scope=None): + """Flips the keypoints vertically around the flip_point. + + This operation flips the y coordinate for each keypoint around the flip_point + and also permutes the keypoints in a manner specified by flip_permutation. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + flip_point: (float) scalar tensor representing the y coordinate to flip the + keypoints around. + flip_permutation: integer list or rank 1 int32 tensor containing the + keypoint flip permutation. This specifies the mapping from original + keypoint indices to the flipped keypoint indices. This is used primarily + for keypoints that are not reflection invariant. E.g. Suppose there are 3 + keypoints representing ['head', 'right_eye', 'left_eye'], then a logical + choice for flip_permutation might be [0, 2, 1] since we want to swap the + 'left_eye' and 'right_eye' after a horizontal flip. + Default to None or empty list to keep the original order after flip. + scope: name scope. + + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'FlipVertical'): + keypoints = tf.transpose(keypoints, [1, 0, 2]) + if flip_permutation: + keypoints = tf.gather(keypoints, flip_permutation) + v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) + v = flip_point * 2.0 - v + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) + return new_keypoints + + +def rot90(keypoints, rotation_permutation=None, scope=None): + """Rotates the keypoints counter-clockwise by 90 degrees. + + Args: + keypoints: a tensor of shape [num_instances, num_keypoints, 2] + rotation_permutation: integer list or rank 1 int32 tensor containing the + keypoint flip permutation. This specifies the mapping from original + keypoint indices to the rotated keypoint indices. This is used primarily + for keypoints that are not rotation invariant. + Default to None or empty list to keep the original order after rotation. + scope: name scope. + Returns: + new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] + """ + with tf.name_scope(scope, 'Rot90'): + keypoints = tf.transpose(keypoints, [1, 0, 2]) + if rotation_permutation: + keypoints = tf.gather(keypoints, rotation_permutation) + v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2) + v = 1.0 - v + new_keypoints = tf.concat([v, u], 2) + new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) + return new_keypoints + + +def keypoint_weights_from_visibilities(keypoint_visibilities, + per_keypoint_weights=None): + """Returns a keypoint weights tensor. + + During training, it is often beneficial to consider only those keypoints that + are labeled. This function returns a weights tensor that combines default + per-keypoint weights, as well as the visibilities of individual keypoints. + + The returned tensor satisfies: + keypoint_weights[i, k] = per_keypoint_weights[k] * keypoint_visibilities[i, k] + where per_keypoint_weights[k] is set to 1 if not provided. + + Args: + keypoint_visibilities: A [num_instances, num_keypoints] boolean tensor + indicating whether a keypoint is labeled (and perhaps even visible). + per_keypoint_weights: A list or 1-d tensor of length `num_keypoints` with + per-keypoint weights. If None, will use 1 for each visible keypoint + weight. + + Returns: + A [num_instances, num_keypoints] float32 tensor with keypoint weights. Those + keypoints deemed visible will have the provided per-keypoint weight, and + all others will be set to zero. + """ + if per_keypoint_weights is None: + num_keypoints = keypoint_visibilities.shape.as_list()[1] + per_keypoint_weight_mult = tf.ones((1, num_keypoints,), dtype=tf.float32) + else: + per_keypoint_weight_mult = tf.expand_dims(per_keypoint_weights, axis=0) + return per_keypoint_weight_mult * tf.cast(keypoint_visibilities, tf.float32) + + +def set_keypoint_visibilities(keypoints, initial_keypoint_visibilities=None): + """Sets keypoint visibilities based on valid/invalid keypoints. + + Some keypoint operations set invisible keypoints (e.g. cropped keypoints) to + NaN, without affecting any keypoint "visibility" variables. This function is + used to update (or create) keypoint visibilities to agree with visible / + invisible keypoint coordinates. + + Args: + keypoints: a float32 tensor of shape [num_instances, num_keypoints, 2]. + initial_keypoint_visibilities: a boolean tensor of shape + [num_instances, num_keypoints]. If provided, will maintain the visibility + designation of a keypoint, so long as the corresponding coordinates are + not NaN. If not provided, will create keypoint visibilities directly from + the values in `keypoints` (i.e. NaN coordinates map to False, otherwise + they map to True). + + Returns: + keypoint_visibilities: a bool tensor of shape [num_instances, num_keypoints] + indicating whether a keypoint is visible or not. + """ + if initial_keypoint_visibilities is not None: + keypoint_visibilities = tf.cast(initial_keypoint_visibilities, tf.bool) + else: + keypoint_visibilities = tf.ones_like(keypoints[:, :, 0], dtype=tf.bool) + + keypoints_with_nan = tf.math.reduce_any(tf.math.is_nan(keypoints), axis=2) + keypoint_visibilities = tf.where( + keypoints_with_nan, + tf.zeros_like(keypoint_visibilities, dtype=tf.bool), + keypoint_visibilities) + return keypoint_visibilities diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6423aac4d6d8f2189f836684f7ef156d5afa853b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bbdcf01940dcaf96da283bd6bcf73e91b633f0ee --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/keypoint_ops_test.py @@ -0,0 +1,365 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.keypoint_ops.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import keypoint_ops +from object_detection.utils import test_case + + +class KeypointOpsTest(test_case.TestCase): + """Tests for common keypoint operations.""" + + def test_scale(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.0, 0.0], [100.0, 200.0]], + [[50.0, 120.0], [100.0, 140.0]] + ]) + y_scale = tf.constant(1.0 / 100) + x_scale = tf.constant(1.0 / 200) + + expected_keypoints = tf.constant([ + [[0., 0.], [1.0, 1.0]], + [[0.5, 0.6], [1.0, 0.7]] + ]) + output = keypoint_ops.scale(keypoints, y_scale, x_scale) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_clip_to_window(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + expected_keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.25], [0.75, 0.75]] + ]) + output = keypoint_ops.clip_to_window(keypoints, window) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_prune_outside_window(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]], + [[np.nan, np.nan], [np.nan, np.nan]]]) + output = keypoint_ops.prune_outside_window(keypoints, window) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_change_coordinate_frame(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + window = tf.constant([0.25, 0.25, 0.75, 0.75]) + + expected_keypoints = tf.constant([ + [[0, 0.5], [1.0, 1.0]], + [[0.5, -0.5], [1.5, 1.5]] + ]) + output = keypoint_ops.change_coordinate_frame(keypoints, window) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_keypoints_to_enclosing_bounding_boxes(self): + def graph_fn(): + keypoints = tf.constant( + [ + [ # Instance 0. + [5., 10.], + [3., 20.], + [8., 4.], + ], + [ # Instance 1. + [2., 12.], + [0., 3.], + [5., 19.], + ], + ], dtype=tf.float32) + bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(keypoints) + return bboxes + output = self.execute(graph_fn, []) + expected_bboxes = np.array( + [ + [3., 4., 8., 20.], + [0., 3., 5., 19.] + ]) + self.assertAllClose(expected_bboxes, output) + + def test_to_normalized_coordinates(self): + def graph_fn(): + keypoints = tf.constant([ + [[10., 30.], [30., 45.]], + [[20., 0.], [40., 60.]] + ]) + output = keypoint_ops.to_normalized_coordinates( + keypoints, 40, 60) + expected_keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_to_normalized_coordinates_already_normalized(self): + if self.has_tpu(): return + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + output = keypoint_ops.to_normalized_coordinates( + keypoints, 40, 60) + return output + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_to_absolute_coordinates(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.25, 0.5], [0.75, 0.75]], + [[0.5, 0.0], [1.0, 1.0]] + ]) + output = keypoint_ops.to_absolute_coordinates( + keypoints, 40, 60) + expected_keypoints = tf.constant([ + [[10., 30.], [30., 45.]], + [[20., 0.], [40., 60.]] + ]) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_to_absolute_coordinates_already_absolute(self): + if self.has_tpu(): return + def graph_fn(): + keypoints = tf.constant([ + [[10., 30.], [30., 45.]], + [[20., 0.], [40., 60.]] + ]) + output = keypoint_ops.to_absolute_coordinates( + keypoints, 40, 60) + return output + with self.assertRaisesOpError('assertion failed'): + self.execute_cpu(graph_fn, []) + + def test_flip_horizontal(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] + ]) + expected_keypoints = tf.constant([ + [[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]], + [[0.4, 0.6], [0.5, 0.5], [0.6, 0.4]], + ]) + output = keypoint_ops.flip_horizontal(keypoints, 0.5) + return output, expected_keypoints + + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_flip_horizontal_permutation(self): + + def graph_fn(): + keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) + flip_permutation = [0, 2, 1] + + expected_keypoints = tf.constant([ + [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]], + [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]], + ]) + output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_flip_vertical(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] + ]) + + expected_keypoints = tf.constant([ + [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], + [[0.6, 0.4], [0.5, 0.5], [0.4, 0.6]], + ]) + output = keypoint_ops.flip_vertical(keypoints, 0.5) + return output, expected_keypoints + + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_flip_vertical_permutation(self): + + def graph_fn(): + keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) + flip_permutation = [0, 2, 1] + + expected_keypoints = tf.constant([ + [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], + [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]], + ]) + output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_rot90(self): + def graph_fn(): + keypoints = tf.constant([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]] + ]) + expected_keypoints = tf.constant([ + [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], + [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]], + ]) + output = keypoint_ops.rot90(keypoints) + return output, expected_keypoints + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_rot90_permutation(self): + + def graph_fn(): + keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]]) + rot_permutation = [0, 2, 1] + expected_keypoints = tf.constant([ + [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], + [[0.4, 0.4], [0.3, 0.6], [0.4, 0.5]], + ]) + output = keypoint_ops.rot90(keypoints, + rotation_permutation=rot_permutation) + return output, expected_keypoints + + output, expected_keypoints = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoints) + + def test_keypoint_weights_from_visibilities(self): + def graph_fn(): + keypoint_visibilities = tf.constant([ + [True, True, False], + [False, True, False] + ]) + per_keypoint_weights = [1.0, 2.0, 3.0] + keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities( + keypoint_visibilities, per_keypoint_weights) + return keypoint_weights + expected_keypoint_weights = [ + [1.0, 2.0, 0.0], + [0.0, 2.0, 0.0] + ] + output = self.execute(graph_fn, []) + self.assertAllClose(output, expected_keypoint_weights) + + def test_keypoint_weights_from_visibilities_no_per_kpt_weights(self): + def graph_fn(): + keypoint_visibilities = tf.constant([ + [True, True, False], + [False, True, False] + ]) + keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities( + keypoint_visibilities) + return keypoint_weights + expected_keypoint_weights = [ + [1.0, 1.0, 0.0], + [0.0, 1.0, 0.0] + ] + output = self.execute(graph_fn, []) + self.assertAllClose(expected_keypoint_weights, output) + + def test_set_keypoint_visibilities_no_initial_kpt_vis(self): + keypoints_np = np.array( + [ + [[np.nan, 0.2], + [np.nan, np.nan], + [-3., 7.]], + [[0.5, 0.2], + [4., 1.0], + [-3., np.nan]], + ], dtype=np.float32) + def graph_fn(): + keypoints = tf.constant(keypoints_np, dtype=tf.float32) + keypoint_visibilities = keypoint_ops.set_keypoint_visibilities( + keypoints) + return keypoint_visibilities + + expected_kpt_vis = [ + [False, False, True], + [True, True, False] + ] + output = self.execute(graph_fn, []) + self.assertAllEqual(expected_kpt_vis, output) + + def test_set_keypoint_visibilities(self): + keypoints_np = np.array( + [ + [[np.nan, 0.2], + [np.nan, np.nan], + [-3., 7.]], + [[0.5, 0.2], + [4., 1.0], + [-3., np.nan]], + ], dtype=np.float32) + initial_keypoint_visibilities_np = np.array( + [ + [False, + True, # Will be overriden by NaN coords. + False], # Will be maintained, even though non-NaN coords. + [True, + False, # Will be maintained, even though non-NaN coords. + False] + ]) + def graph_fn(): + keypoints = tf.constant(keypoints_np, dtype=tf.float32) + initial_keypoint_visibilities = tf.constant( + initial_keypoint_visibilities_np, dtype=tf.bool) + keypoint_visibilities = keypoint_ops.set_keypoint_visibilities( + keypoints, initial_keypoint_visibilities) + return keypoint_visibilities + + expected_kpt_vis = [ + [False, False, False], + [True, False, False] + ] + output = self.execute(graph_fn, []) + self.assertAllEqual(expected_kpt_vis, output) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/losses.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..434ab47cdfd4f76eb10aaf5ae5cb153415b1aef4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/losses.py @@ -0,0 +1,808 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Classification and regression loss functions for object detection. + +Localization losses: + * WeightedL2LocalizationLoss + * WeightedSmoothL1LocalizationLoss + * WeightedIOULocalizationLoss + +Classification losses: + * WeightedSigmoidClassificationLoss + * WeightedSoftmaxClassificationLoss + * WeightedSoftmaxClassificationAgainstLogitsLoss + * BootstrappedSigmoidClassificationLoss +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v1 as tf +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class Loss(six.with_metaclass(abc.ABCMeta, object)): + """Abstract base class for loss functions.""" + + def __call__(self, + prediction_tensor, + target_tensor, + ignore_nan_targets=False, + losses_mask=None, + scope=None, + **params): + """Call the loss function. + + Args: + prediction_tensor: an N-d tensor of shape [batch, anchors, ...] + representing predicted quantities. + target_tensor: an N-d tensor of shape [batch, anchors, ...] representing + regression or classification targets. + ignore_nan_targets: whether to ignore nan targets in the loss computation. + E.g. can be used if the target tensor is missing groundtruth data that + shouldn't be factored into the loss. + losses_mask: A [batch] boolean tensor that indicates whether losses should + be applied to individual images in the batch. For elements that + are False, corresponding prediction, target, and weight tensors will not + contribute to loss computation. If None, no filtering will take place + prior to loss computation. + scope: Op scope name. Defaults to 'Loss' if None. + **params: Additional keyword arguments for specific implementations of + the Loss. + + Returns: + loss: a tensor representing the value of the loss function. + """ + with tf.name_scope(scope, 'Loss', + [prediction_tensor, target_tensor, params]) as scope: + if ignore_nan_targets: + target_tensor = tf.where(tf.is_nan(target_tensor), + prediction_tensor, + target_tensor) + if losses_mask is not None: + tensor_multiplier = self._get_loss_multiplier_for_tensor( + prediction_tensor, + losses_mask) + prediction_tensor *= tensor_multiplier + target_tensor *= tensor_multiplier + + if 'weights' in params: + params['weights'] = tf.convert_to_tensor(params['weights']) + weights_multiplier = self._get_loss_multiplier_for_tensor( + params['weights'], + losses_mask) + params['weights'] *= weights_multiplier + return self._compute_loss(prediction_tensor, target_tensor, **params) + + def _get_loss_multiplier_for_tensor(self, tensor, losses_mask): + loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1)) + return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32) + + @abc.abstractmethod + def _compute_loss(self, prediction_tensor, target_tensor, **params): + """Method to be overridden by implementations. + + Args: + prediction_tensor: a tensor representing predicted quantities + target_tensor: a tensor representing regression or classification targets + **params: Additional keyword arguments for specific implementations of + the Loss. + + Returns: + loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per + anchor + """ + pass + + +class WeightedL2LocalizationLoss(Loss): + """L2 localization loss function with anchorwise output support. + + Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 + """ + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the (encoded) predicted locations of objects. + target_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the regression targets + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( + weights, 2) + square_diff = 0.5 * tf.square(weighted_diff) + return tf.reduce_sum(square_diff, 2) + + +class WeightedSmoothL1LocalizationLoss(Loss): + """Smooth L1 localization loss function aka Huber Loss.. + + The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and + delta * (|x|- 0.5*delta) otherwise, where x is the difference between + predictions and target. + + See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) + """ + + def __init__(self, delta=1.0): + """Constructor. + + Args: + delta: delta for smooth L1 loss. + """ + super(WeightedSmoothL1LocalizationLoss, self).__init__() + self._delta = delta + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the (encoded) predicted locations of objects. + target_tensor: A float tensor of shape [batch_size, num_anchors, + code_size] representing the regression targets + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + return tf.reduce_sum(tf.losses.huber_loss( + target_tensor, + prediction_tensor, + delta=self._delta, + weights=tf.expand_dims(weights, axis=2), + loss_collection=None, + reduction=tf.losses.Reduction.NONE + ), axis=2) + + +class WeightedIOULocalizationLoss(Loss): + """IOU localization loss function. + + Sums the IOU for corresponding pairs of predicted/groundtruth boxes + and for each pair assign a loss of 1 - IOU. We then compute a weighted + sum over all pairs which is returned as the total loss. + """ + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded predicted boxes + target_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded target boxes + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) + target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) + per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, + target_boxes) + return tf.reshape(weights, [-1]) * per_anchor_iou_loss + + +class WeightedGIOULocalizationLoss(Loss): + """GIOU localization loss function. + + Sums the GIOU loss for corresponding pairs of predicted/groundtruth boxes + and for each pair assign a loss of 1 - GIOU. We then compute a weighted + sum over all pairs which is returned as the total loss. + """ + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded predicted boxes + target_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded target boxes + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + batch_size, num_anchors, _ = shape_utils.combined_static_and_dynamic_shape( + prediction_tensor) + predicted_boxes = tf.reshape(prediction_tensor, [-1, 4]) + target_boxes = tf.reshape(target_tensor, [-1, 4]) + + per_anchor_iou_loss = 1 - ops.giou(predicted_boxes, target_boxes) + return tf.reshape(tf.reshape(weights, [-1]) * per_anchor_iou_loss, + [batch_size, num_anchors]) + + +class WeightedSigmoidClassificationLoss(Loss): + """Sigmoid cross entropy classification loss function.""" + + def _compute_loss(self, + prediction_tensor, + target_tensor, + weights, + class_indices=None): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + class_indices: (Optional) A 1-D integer tensor of class indices. + If provided, computes loss only for the specified class indices. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors, num_classes] + representing the value of the loss function. + """ + if class_indices is not None: + weights *= tf.reshape( + ops.indices_to_dense_vector(class_indices, + tf.shape(prediction_tensor)[2]), + [1, 1, -1]) + per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( + labels=target_tensor, logits=prediction_tensor)) + return per_entry_cross_ent * weights + + +class SigmoidFocalClassificationLoss(Loss): + """Sigmoid focal cross entropy loss. + + Focal loss down-weights well classified examples and focusses on the hard + examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. + """ + + def __init__(self, gamma=2.0, alpha=0.25): + """Constructor. + + Args: + gamma: exponent of the modulating factor (1 - p_t) ^ gamma. + alpha: optional alpha weighting factor to balance positives vs negatives. + """ + super(SigmoidFocalClassificationLoss, self).__init__() + self._alpha = alpha + self._gamma = gamma + + def _compute_loss(self, + prediction_tensor, + target_tensor, + weights, + class_indices=None): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + class_indices: (Optional) A 1-D integer tensor of class indices. + If provided, computes loss only for the specified class indices. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors, num_classes] + representing the value of the loss function. + """ + if class_indices is not None: + weights *= tf.reshape( + ops.indices_to_dense_vector(class_indices, + tf.shape(prediction_tensor)[2]), + [1, 1, -1]) + per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( + labels=target_tensor, logits=prediction_tensor)) + prediction_probabilities = tf.sigmoid(prediction_tensor) + p_t = ((target_tensor * prediction_probabilities) + + ((1 - target_tensor) * (1 - prediction_probabilities))) + modulating_factor = 1.0 + if self._gamma: + modulating_factor = tf.pow(1.0 - p_t, self._gamma) + alpha_weight_factor = 1.0 + if self._alpha is not None: + alpha_weight_factor = (target_tensor * self._alpha + + (1 - target_tensor) * (1 - self._alpha)) + focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * + per_entry_cross_ent) + return focal_cross_entropy_loss * weights + + +class WeightedSoftmaxClassificationLoss(Loss): + """Softmax loss function.""" + + def __init__(self, logit_scale=1.0): + """Constructor. + + Args: + logit_scale: When this value is high, the prediction is "diffused" and + when this value is low, the prediction is made peakier. + (default 1.0) + + """ + super(WeightedSoftmaxClassificationLoss, self).__init__() + self._logit_scale = logit_scale + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] + representing the value of the loss function. + """ + weights = tf.reduce_mean(weights, axis=2) + num_classes = prediction_tensor.get_shape().as_list()[-1] + prediction_tensor = tf.divide( + prediction_tensor, self._logit_scale, name='scale_logit') + per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( + labels=tf.reshape(target_tensor, [-1, num_classes]), + logits=tf.reshape(prediction_tensor, [-1, num_classes]))) + return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights + + +class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss): + """Softmax loss function against logits. + + Targets are expected to be provided in logits space instead of "one hot" or + "probability distribution" space. + """ + + def __init__(self, logit_scale=1.0): + """Constructor. + + Args: + logit_scale: When this value is high, the target is "diffused" and + when this value is low, the target is made peakier. + (default 1.0) + + """ + super(WeightedSoftmaxClassificationAgainstLogitsLoss, self).__init__() + self._logit_scale = logit_scale + + def _scale_and_softmax_logits(self, logits): + """Scale logits then apply softmax.""" + scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits') + return tf.nn.softmax(scaled_logits, name='convert_scores') + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing logit classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] + representing the value of the loss function. + """ + weights = tf.reduce_mean(weights, axis=2) + num_classes = prediction_tensor.get_shape().as_list()[-1] + target_tensor = self._scale_and_softmax_logits(target_tensor) + prediction_tensor = tf.divide(prediction_tensor, self._logit_scale, + name='scale_logits') + + per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( + labels=tf.reshape(target_tensor, [-1, num_classes]), + logits=tf.reshape(prediction_tensor, [-1, num_classes]))) + return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights + + +class BootstrappedSigmoidClassificationLoss(Loss): + """Bootstrapped sigmoid cross entropy classification loss function. + + This loss uses a convex combination of training labels and the current model's + predictions as training targets in the classification loss. The idea is that + as the model improves over time, its predictions can be trusted more and we + can use these predictions to mitigate the damage of noisy/incorrect labels, + because incorrect labels are likely to be eventually highly inconsistent with + other stimuli predicted to have the same label by the model. + + In "soft" bootstrapping, we use all predicted class probabilities, whereas in + "hard" bootstrapping, we use the single class favored by the model. + + See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by + Reed et al. (ICLR 2015). + """ + + def __init__(self, alpha, bootstrap_type='soft'): + """Constructor. + + Args: + alpha: a float32 scalar tensor between 0 and 1 representing interpolation + weight + bootstrap_type: set to either 'hard' or 'soft' (default) + + Raises: + ValueError: if bootstrap_type is not either 'hard' or 'soft' + """ + super(BootstrappedSigmoidClassificationLoss, self).__init__() + if bootstrap_type != 'hard' and bootstrap_type != 'soft': + raise ValueError('Unrecognized bootstrap_type: must be one of ' + '\'hard\' or \'soft.\'') + self._alpha = alpha + self._bootstrap_type = bootstrap_type + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing the predicted logits for each class + target_tensor: A float tensor of shape [batch_size, num_anchors, + num_classes] representing one-hot encoded classification targets + weights: a float tensor of shape, either [batch_size, num_anchors, + num_classes] or [batch_size, num_anchors, 1]. If the shape is + [batch_size, num_anchors, 1], all the classses are equally weighted. + + Returns: + loss: a float tensor of shape [batch_size, num_anchors, num_classes] + representing the value of the loss function. + """ + if self._bootstrap_type == 'soft': + bootstrap_target_tensor = self._alpha * target_tensor + ( + 1.0 - self._alpha) * tf.sigmoid(prediction_tensor) + else: + bootstrap_target_tensor = self._alpha * target_tensor + ( + 1.0 - self._alpha) * tf.cast( + tf.sigmoid(prediction_tensor) > 0.5, tf.float32) + per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( + labels=bootstrap_target_tensor, logits=prediction_tensor)) + return per_entry_cross_ent * weights + + +class HardExampleMiner(object): + """Hard example mining for regions in a list of images. + + Implements hard example mining to select a subset of regions to be + back-propagated. For each image, selects the regions with highest losses, + subject to the condition that a newly selected region cannot have + an IOU > iou_threshold with any of the previously selected regions. + This can be achieved by re-using a greedy non-maximum suppression algorithm. + A constraint on the number of negatives mined per positive region can also be + enforced. + + Reference papers: "Training Region-based Object Detectors with Online + Hard Example Mining" (CVPR 2016) by Srivastava et al., and + "SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al. + """ + + def __init__(self, + num_hard_examples=64, + iou_threshold=0.7, + loss_type='both', + cls_loss_weight=0.05, + loc_loss_weight=0.06, + max_negatives_per_positive=None, + min_negatives_per_image=0): + """Constructor. + + The hard example mining implemented by this class can replicate the behavior + in the two aforementioned papers (Srivastava et al., and Liu et al). + To replicate the A2 paper (Srivastava et al), num_hard_examples is set + to a fixed parameter (64 by default) and iou_threshold is set to .7 for + running non-max-suppression the predicted boxes prior to hard mining. + In order to replicate the SSD paper (Liu et al), num_hard_examples should + be set to None, max_negatives_per_positive should be 3 and iou_threshold + should be 1.0 (in order to effectively turn off NMS). + + Args: + num_hard_examples: maximum number of hard examples to be + selected per image (prior to enforcing max negative to positive ratio + constraint). If set to None, all examples obtained after NMS are + considered. + iou_threshold: minimum intersection over union for an example + to be discarded during NMS. + loss_type: use only classification losses ('cls', default), + localization losses ('loc') or both losses ('both'). + In the last case, cls_loss_weight and loc_loss_weight are used to + compute weighted sum of the two losses. + cls_loss_weight: weight for classification loss. + loc_loss_weight: weight for location loss. + max_negatives_per_positive: maximum number of negatives to retain for + each positive anchor. By default, num_negatives_per_positive is None, + which means that we do not enforce a prespecified negative:positive + ratio. Note also that num_negatives_per_positives can be a float + (and will be converted to be a float even if it is passed in otherwise). + min_negatives_per_image: minimum number of negative anchors to sample for + a given image. Setting this to a positive number allows sampling + negatives in an image without any positive anchors and thus not biased + towards at least one detection per image. + """ + self._num_hard_examples = num_hard_examples + self._iou_threshold = iou_threshold + self._loss_type = loss_type + self._cls_loss_weight = cls_loss_weight + self._loc_loss_weight = loc_loss_weight + self._max_negatives_per_positive = max_negatives_per_positive + self._min_negatives_per_image = min_negatives_per_image + if self._max_negatives_per_positive is not None: + self._max_negatives_per_positive = float(self._max_negatives_per_positive) + self._num_positives_list = None + self._num_negatives_list = None + + def __call__(self, + location_losses, + cls_losses, + decoded_boxlist_list, + match_list=None): + """Computes localization and classification losses after hard mining. + + Args: + location_losses: a float tensor of shape [num_images, num_anchors] + representing anchorwise localization losses. + cls_losses: a float tensor of shape [num_images, num_anchors] + representing anchorwise classification losses. + decoded_boxlist_list: a list of decoded BoxList representing location + predictions for each image. + match_list: an optional list of matcher.Match objects encoding the match + between anchors and groundtruth boxes for each image of the batch, + with rows of the Match objects corresponding to groundtruth boxes + and columns corresponding to anchors. Match objects in match_list are + used to reference which anchors are positive, negative or ignored. If + self._max_negatives_per_positive exists, these are then used to enforce + a prespecified negative to positive ratio. + + Returns: + mined_location_loss: a float scalar with sum of localization losses from + selected hard examples. + mined_cls_loss: a float scalar with sum of classification losses from + selected hard examples. + Raises: + ValueError: if location_losses, cls_losses and decoded_boxlist_list do + not have compatible shapes (i.e., they must correspond to the same + number of images). + ValueError: if match_list is specified but its length does not match + len(decoded_boxlist_list). + """ + mined_location_losses = [] + mined_cls_losses = [] + location_losses = tf.unstack(location_losses) + cls_losses = tf.unstack(cls_losses) + num_images = len(decoded_boxlist_list) + if not match_list: + match_list = num_images * [None] + if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses): + raise ValueError('location_losses, cls_losses and decoded_boxlist_list ' + 'do not have compatible shapes.') + if not isinstance(match_list, list): + raise ValueError('match_list must be a list.') + if len(match_list) != len(decoded_boxlist_list): + raise ValueError('match_list must either be None or have ' + 'length=len(decoded_boxlist_list).') + num_positives_list = [] + num_negatives_list = [] + for ind, detection_boxlist in enumerate(decoded_boxlist_list): + box_locations = detection_boxlist.get() + match = match_list[ind] + image_losses = cls_losses[ind] + if self._loss_type == 'loc': + image_losses = location_losses[ind] + elif self._loss_type == 'both': + image_losses *= self._cls_loss_weight + image_losses += location_losses[ind] * self._loc_loss_weight + if self._num_hard_examples is not None: + num_hard_examples = self._num_hard_examples + else: + num_hard_examples = detection_boxlist.num_boxes() + selected_indices = tf.image.non_max_suppression( + box_locations, image_losses, num_hard_examples, self._iou_threshold) + if self._max_negatives_per_positive is not None and match: + (selected_indices, num_positives, + num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio( + selected_indices, match, self._max_negatives_per_positive, + self._min_negatives_per_image) + num_positives_list.append(num_positives) + num_negatives_list.append(num_negatives) + mined_location_losses.append( + tf.reduce_sum(tf.gather(location_losses[ind], selected_indices))) + mined_cls_losses.append( + tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices))) + location_loss = tf.reduce_sum(tf.stack(mined_location_losses)) + cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses)) + if match and self._max_negatives_per_positive: + self._num_positives_list = num_positives_list + self._num_negatives_list = num_negatives_list + return (location_loss, cls_loss) + + def summarize(self): + """Summarize the number of positives and negatives after mining.""" + if self._num_positives_list and self._num_negatives_list: + avg_num_positives = tf.reduce_mean( + tf.cast(self._num_positives_list, dtype=tf.float32)) + avg_num_negatives = tf.reduce_mean( + tf.cast(self._num_negatives_list, dtype=tf.float32)) + tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives) + tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives) + + def _subsample_selection_to_desired_neg_pos_ratio(self, + indices, + match, + max_negatives_per_positive, + min_negatives_per_image=0): + """Subsample a collection of selected indices to a desired neg:pos ratio. + + This function takes a subset of M indices (indexing into a large anchor + collection of N anchors where M=0, + meaning that column i is matched with row match_results[i]. + (2) match_results[i]=-1, meaning that column i is not matched. + (3) match_results[i]=-2, meaning that column i is ignored. + use_matmul_gather: Use matrix multiplication based gather instead of + standard tf.gather. (Default: False). + + Raises: + ValueError: if match_results does not have rank 1 or is not an + integer int32 scalar tensor + """ + if match_results.shape.ndims != 1: + raise ValueError('match_results should have rank 1') + if match_results.dtype != tf.int32: + raise ValueError('match_results should be an int32 or int64 scalar ' + 'tensor') + self._match_results = match_results + self._gather_op = tf.gather + if use_matmul_gather: + self._gather_op = ops.matmul_gather_on_zeroth_axis + + @property + def match_results(self): + """The accessor for match results. + + Returns: + the tensor which encodes the match results. + """ + return self._match_results + + def matched_column_indices(self): + """Returns column indices that match to some row. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1))) + + def matched_column_indicator(self): + """Returns column indices that are matched. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return tf.greater_equal(self._match_results, 0) + + def num_matched_columns(self): + """Returns number (int32 scalar tensor) of matched columns.""" + return tf.size(self.matched_column_indices()) + + def unmatched_column_indices(self): + """Returns column indices that do not match any row. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1))) + + def unmatched_column_indicator(self): + """Returns column indices that are unmatched. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return tf.equal(self._match_results, -1) + + def num_unmatched_columns(self): + """Returns number (int32 scalar tensor) of unmatched columns.""" + return tf.size(self.unmatched_column_indices()) + + def ignored_column_indices(self): + """Returns column indices that are ignored (neither Matched nor Unmatched). + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(self.ignored_column_indicator())) + + def ignored_column_indicator(self): + """Returns boolean column indicator where True means the colum is ignored. + + Returns: + column_indicator: boolean vector which is True for all ignored column + indices. + """ + return tf.equal(self._match_results, -2) + + def num_ignored_columns(self): + """Returns number (int32 scalar tensor) of matched columns.""" + return tf.size(self.ignored_column_indices()) + + def unmatched_or_ignored_column_indices(self): + """Returns column indices that are unmatched or ignored. + + The indices returned by this op are always sorted in increasing order. + + Returns: + column_indices: int32 tensor of shape [K] with column indices. + """ + return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results))) + + def matched_row_indices(self): + """Returns row indices that match some column. + + The indices returned by this op are ordered so as to be in correspondence + with the output of matched_column_indicator(). For example if + self.matched_column_indicator() is [0,2], and self.matched_row_indices() is + [7, 3], then we know that column 0 was matched to row 7 and column 2 was + matched to row 3. + + Returns: + row_indices: int32 tensor of shape [K] with row indices. + """ + return self._reshape_and_cast( + self._gather_op(tf.cast(self._match_results, dtype=tf.float32), + self.matched_column_indices())) + + def num_matched_rows(self): + """Returns number (int32 scalar tensor) of matched rows.""" + unique_rows, _ = tf.unique(self.matched_row_indices()) + return tf.size(unique_rows) + + def _reshape_and_cast(self, t): + return tf.cast(tf.reshape(t, [-1]), tf.int32) + + def gather_based_on_match(self, input_tensor, unmatched_value, + ignored_value): + """Gathers elements from `input_tensor` based on match results. + + For columns that are matched to a row, gathered_tensor[col] is set to + input_tensor[match_results[col]]. For columns that are unmatched, + gathered_tensor[col] is set to unmatched_value. Finally, for columns that + are ignored gathered_tensor[col] is set to ignored_value. + + Note that the input_tensor.shape[1:] must match with unmatched_value.shape + and ignored_value.shape + + Args: + input_tensor: Tensor to gather values from. + unmatched_value: Constant tensor value for unmatched columns. + ignored_value: Constant tensor value for ignored columns. + + Returns: + gathered_tensor: A tensor containing values gathered from input_tensor. + The shape of the gathered tensor is [match_results.shape[0]] + + input_tensor.shape[1:]. + """ + input_tensor = tf.concat( + [tf.stack([ignored_value, unmatched_value]), + input_tensor], + axis=0) + gather_indices = tf.maximum(self.match_results + 2, 0) + gathered_tensor = self._gather_op(input_tensor, gather_indices) + return gathered_tensor + + +class Matcher(six.with_metaclass(abc.ABCMeta, object)): + """Abstract base class for matcher. + """ + + def __init__(self, use_matmul_gather=False): + """Constructs a Matcher. + + Args: + use_matmul_gather: Force constructed match objects to use matrix + multiplication based gather instead of standard tf.gather. + (Default: False). + """ + self._use_matmul_gather = use_matmul_gather + + def match(self, similarity_matrix, valid_rows=None, scope=None): + """Computes matches among row and column indices and returns the result. + + Computes matches among the row and column indices based on the similarity + matrix and optional arguments. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher value means more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid for matching. + scope: Op scope name. Defaults to 'Match' if None. + + Returns: + A Match object with the results of matching. + """ + with tf.name_scope(scope, 'Match') as scope: + if valid_rows is None: + valid_rows = tf.ones(tf.shape(similarity_matrix)[0], dtype=tf.bool) + return Match(self._match(similarity_matrix, valid_rows), + self._use_matmul_gather) + + @abc.abstractmethod + def _match(self, similarity_matrix, valid_rows): + """Method to be overridden by implementations. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher value means more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid for matching. + Returns: + match_results: Integer tensor of shape [M]: match_results[i]>=0 means + that column i is matched to row match_results[i], match_results[i]=-1 + means that the column is not matched. match_results[i]=-2 means that + the column is ignored (usually this happens when there is a very weak + match which one neither wants as positive nor negative example). + """ + pass diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/matcher.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/matcher.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb80d237f406f702d383b7bbed8a1cd80f566e6a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/matcher.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/matcher_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/matcher_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ad64075397e8ba2b6aea74b039036a84204f6631 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/matcher_test.py @@ -0,0 +1,191 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.matcher.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import matcher +from object_detection.utils import test_case + + +class MatchTest(test_case.TestCase): + + def test_get_correct_matched_columnIndices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + matched_column_indices = match.matched_column_indices() + return matched_column_indices + expected_column_indices = [0, 1, 3, 5] + matched_column_indices = self.execute(graph_fn, []) + self.assertAllEqual(matched_column_indices, expected_column_indices) + + def test_get_correct_counts(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 1, -2]) + match = matcher.Match(match_results) + num_matched_columns = match.num_matched_columns() + num_unmatched_columns = match.num_unmatched_columns() + num_ignored_columns = match.num_ignored_columns() + num_matched_rows = match.num_matched_rows() + return [num_matched_columns, num_unmatched_columns, num_ignored_columns, + num_matched_rows] + (num_matched_columns_out, num_unmatched_columns_out, + num_ignored_columns_out, + num_matched_rows_out) = self.execute_cpu(graph_fn, []) + exp_num_matched_columns = 4 + exp_num_unmatched_columns = 2 + exp_num_ignored_columns = 1 + exp_num_matched_rows = 3 + self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns) + self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns) + self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns) + self.assertAllEqual(num_matched_rows_out, exp_num_matched_rows) + + def testGetCorrectUnmatchedColumnIndices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + unmatched_column_indices = match.unmatched_column_indices() + return unmatched_column_indices + unmatched_column_indices = self.execute(graph_fn, []) + expected_column_indices = [2, 4] + self.assertAllEqual(unmatched_column_indices, expected_column_indices) + + def testGetCorrectMatchedRowIndices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + matched_row_indices = match.matched_row_indices() + return matched_row_indices + matched_row_indices = self.execute(graph_fn, []) + expected_row_indices = [3, 1, 0, 5] + self.assertAllEqual(matched_row_indices, expected_row_indices) + + def test_get_correct_ignored_column_indices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + ignored_column_indices = match.ignored_column_indices() + return ignored_column_indices + ignored_column_indices = self.execute(graph_fn, []) + expected_column_indices = [6] + self.assertAllEqual(ignored_column_indices, expected_column_indices) + + def test_get_correct_matched_column_indicator(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + matched_column_indicator = match.matched_column_indicator() + return matched_column_indicator + expected_column_indicator = [True, True, False, True, False, True, False] + matched_column_indicator = self.execute(graph_fn, []) + self.assertAllEqual(matched_column_indicator, expected_column_indicator) + + def test_get_correct_unmatched_column_indicator(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + unmatched_column_indicator = match.unmatched_column_indicator() + return unmatched_column_indicator + expected_column_indicator = [False, False, True, False, True, False, False] + unmatched_column_indicator = self.execute(graph_fn, []) + self.assertAllEqual(unmatched_column_indicator, expected_column_indicator) + + def test_get_correct_ignored_column_indicator(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + ignored_column_indicator = match.ignored_column_indicator() + return ignored_column_indicator + expected_column_indicator = [False, False, False, False, False, False, True] + ignored_column_indicator = self.execute(graph_fn, []) + self.assertAllEqual(ignored_column_indicator, expected_column_indicator) + + def test_get_correct_unmatched_ignored_column_indices(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + match = matcher.Match(match_results) + unmatched_ignored_column_indices = (match. + unmatched_or_ignored_column_indices()) + return unmatched_ignored_column_indices + expected_column_indices = [2, 4, 6] + unmatched_ignored_column_indices = self.execute(graph_fn, []) + self.assertAllEqual(unmatched_ignored_column_indices, + expected_column_indices) + + def test_all_columns_accounted_for(self): + # Note: deliberately setting to small number so not always + # all possibilities appear (matched, unmatched, ignored) + def graph_fn(): + match_results = tf.random_uniform( + [num_matches], minval=-2, maxval=5, dtype=tf.int32) + match = matcher.Match(match_results) + matched_column_indices = match.matched_column_indices() + unmatched_column_indices = match.unmatched_column_indices() + ignored_column_indices = match.ignored_column_indices() + return (matched_column_indices, unmatched_column_indices, + ignored_column_indices) + num_matches = 10 + matched, unmatched, ignored = self.execute(graph_fn, []) + all_indices = np.hstack((matched, unmatched, ignored)) + all_indices_sorted = np.sort(all_indices) + self.assertAllEqual(all_indices_sorted, + np.arange(num_matches, dtype=np.int32)) + + def test_scalar_gather_based_on_match(self): + def graph_fn(): + match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) + input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32) + match = matcher.Match(match_results) + gathered_tensor = match.gather_based_on_match(input_tensor, + unmatched_value=100., + ignored_value=200.) + return gathered_tensor + expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200] + gathered_tensor_out = self.execute(graph_fn, []) + self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) + + def test_multidimensional_gather_based_on_match(self): + def graph_fn(): + match_results = tf.constant([1, -1, -2]) + input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], + dtype=tf.float32) + match = matcher.Match(match_results) + gathered_tensor = match.gather_based_on_match(input_tensor, + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + return gathered_tensor + expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] + gathered_tensor_out = self.execute(graph_fn, []) + self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) + + def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self): + def graph_fn(): + match_results = tf.constant([1, -1, -2]) + input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], + dtype=tf.float32) + match = matcher.Match(match_results, use_matmul_gather=True) + gathered_tensor = match.gather_based_on_match(input_tensor, + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + return gathered_tensor + expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] + gathered_tensor_out = self.execute(graph_fn, []) + self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5b0a7242530202c5510c8ea29c4f5857f12b3b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler.py @@ -0,0 +1,94 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base minibatch sampler module. + +The job of the minibatch_sampler is to subsample a minibatch based on some +criterion. + +The main function call is: + subsample(indicator, batch_size, **params). +Indicator is a 1d boolean tensor where True denotes which examples can be +sampled. It returns a boolean indicator where True denotes an example has been +sampled.. + +Subclasses should implement the Subsample function and can make use of the +@staticmethod SubsampleIndicator. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod + +import six +import tensorflow.compat.v1 as tf + +from object_detection.utils import ops + + +class MinibatchSampler(six.with_metaclass(ABCMeta, object)): + """Abstract base class for subsampling minibatches.""" + + def __init__(self): + """Constructs a minibatch sampler.""" + pass + + @abstractmethod + def subsample(self, indicator, batch_size, **params): + """Returns subsample of entries in indicator. + + Args: + indicator: boolean tensor of shape [N] whose True entries can be sampled. + batch_size: desired batch size. + **params: additional keyword arguments for specific implementations of + the MinibatchSampler. + + Returns: + sample_indicator: boolean tensor of shape [N] whose True entries have been + sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size + """ + pass + + @staticmethod + def subsample_indicator(indicator, num_samples): + """Subsample indicator vector. + + Given a boolean indicator vector with M elements set to `True`, the function + assigns all but `num_samples` of these previously `True` elements to + `False`. If `num_samples` is greater than M, the original indicator vector + is returned. + + Args: + indicator: a 1-dimensional boolean tensor indicating which elements + are allowed to be sampled and which are not. + num_samples: int32 scalar tensor + + Returns: + a boolean tensor with the same shape as input (indicator) tensor + """ + indices = tf.where(indicator) + indices = tf.random_shuffle(indices) + indices = tf.reshape(indices, [-1]) + + num_samples = tf.minimum(tf.size(indices), num_samples) + selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) + + selected_indicator = ops.indices_to_dense_vector(selected_indices, + tf.shape(indicator)[0]) + + return tf.equal(selected_indicator, 1) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c2fdc12e2528dbec0d67ab35c1aec15a0d45629 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ddadd25eb587c2087e23a20807488fee955882 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/minibatch_sampler_test.py @@ -0,0 +1,71 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for google3.research.vale.object_detection.minibatch_sampler.""" + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import minibatch_sampler +from object_detection.utils import test_case + + +class MinibatchSamplerTest(test_case.TestCase): + + def test_subsample_indicator_when_more_true_elements_than_num_samples(self): + np_indicator = np.array([True, False, True, False, True, True, False]) + def graph_fn(indicator): + samples = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 3) + return samples + samples_out = self.execute(graph_fn, [np_indicator]) + self.assertTrue(np.sum(samples_out), 3) + self.assertAllEqual(samples_out, + np.logical_and(samples_out, np_indicator)) + + def test_subsample_indicator_when_less_true_elements_than_num_samples(self): + np_indicator = np.array([True, False, True, False, True, True, False]) + def graph_fn(indicator): + samples = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 5) + return samples + samples_out = self.execute(graph_fn, [np_indicator]) + self.assertTrue(np.sum(samples_out), 4) + self.assertAllEqual(samples_out, + np.logical_and(samples_out, np_indicator)) + + def test_subsample_indicator_when_num_samples_is_zero(self): + np_indicator = np.array([True, False, True, False, True, True, False]) + def graph_fn(indicator): + samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 0) + return samples_none + samples_out = self.execute(graph_fn, [np_indicator]) + self.assertAllEqual( + np.zeros_like(samples_out, dtype=bool), + samples_out) + + def test_subsample_indicator_when_indicator_all_false(self): + indicator_empty = np.zeros([0], dtype=np.bool) + def graph_fn(indicator): + samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator( + indicator, 4) + return samples_empty + samples_out = self.execute(graph_fn, [indicator_empty]) + self.assertEqual(0, samples_out.size) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model.py new file mode 100644 index 0000000000000000000000000000000000000000..be4515216cc45801b21903a4beeaeb401572e75d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model.py @@ -0,0 +1,550 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Abstract detection model. + +This file defines a generic base class for detection models. Programs that are +designed to work with arbitrary detection models should only depend on this +class. We intend for the functions in this class to follow tensor-in/tensor-out +design, thus all functions have tensors or lists/dictionaries holding tensors as +inputs and outputs. + +Abstractly, detection models predict output tensors given input images +which can be passed to a loss function at training time or passed to a +postprocessing function at eval time. The computation graphs at a high level +consequently look as follows: + +Training time: +inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor) + +Evaluation time: +inputs (images tensor) -> preprocess -> predict -> postprocess + -> outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor) + +DetectionModels must thus implement four functions (1) preprocess, (2) predict, +(3) postprocess and (4) loss. DetectionModels should make no assumptions about +the input size or aspect ratio --- they are responsible for doing any +resize/reshaping necessary (see docstring for the preprocess function). +Output classes are always integers in the range [0, num_classes). Any mapping +of these integers to semantic labels is to be handled outside of this class. + +Images are resized in the `preprocess` method. All of `preprocess`, `predict`, +and `postprocess` should be reentrant. + +The `preprocess` method runs `image_resizer_fn` that returns resized_images and +`true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros, +true_image_shapes indicate the slices that contain the image without padding. +This is useful for padding images to be a fixed size for batching. + +The `postprocess` method uses the true image shapes to clip predictions that lie +outside of images. + +By default, DetectionModels produce bounding box detections; However, we support +a handful of auxiliary annotations associated with each bounding box, namely, +instance masks and keypoints. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields + + +# If using a new enough version of TensorFlow, detection models should be a +# tf module or keras model for tracking. +try: + _BaseClass = tf.keras.layers.Layer +except AttributeError: + _BaseClass = object + + +class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)): + """Abstract base class for detection models. + + Extends tf.Module to guarantee variable tracking. + """ + + def __init__(self, num_classes): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* include + background categories that might be implicitly predicted in various + implementations. + """ + self._num_classes = num_classes + self._groundtruth_lists = {} + + super(DetectionModel, self).__init__() + + @property + def num_classes(self): + return self._num_classes + + def groundtruth_lists(self, field): + """Access list of groundtruth tensors. + + Args: + field: a string key, options are + fields.BoxListFields.{boxes,classes,masks,keypoints, + keypoint_visibilities, densepose_*, track_ids, + temporal_offsets, track_match_flags} + fields.InputDataFields.is_annotated. + + Returns: + a list of tensors holding groundtruth information (see also + provide_groundtruth function below), with one entry for each image in the + batch. + Raises: + RuntimeError: if the field has not been provided via provide_groundtruth. + """ + if field not in self._groundtruth_lists: + raise RuntimeError('Groundtruth tensor {} has not been provided'.format( + field)) + return self._groundtruth_lists[field] + + def groundtruth_has_field(self, field): + """Determines whether the groundtruth includes the given field. + + Args: + field: a string key, options are + fields.BoxListFields.{boxes,classes,masks,keypoints, + keypoint_visibilities, densepose_*, track_ids} or + fields.InputDataFields.is_annotated. + + Returns: + True if the groundtruth includes the given field, False otherwise. + """ + return field in self._groundtruth_lists + + @staticmethod + def get_side_inputs(features): + """Get side inputs from input features. + + This placeholder method provides a way for a meta-architecture to specify + how to grab additional side inputs from input features (in addition to the + image itself) and allows models to depend on contextual information. By + default, detection models do not use side information (and thus this method + returns an empty dictionary by default. However it can be overridden if + side inputs are necessary." + + Args: + features: A dictionary of tensors. + + Returns: + An empty dictionary by default. + """ + return {} + + @abc.abstractmethod + def preprocess(self, inputs): + """Input preprocessing. + + To be overridden by implementations. + + This function is responsible for any scaling/shifting of input values that + is necessary prior to running the detector on an input image. + It is also responsible for any resizing, padding that might be necessary + as images are assumed to arrive in arbitrary sizes. While this function + could conceivably be part of the predict method (below), it is often + convenient to keep these separate --- for example, we may want to preprocess + on one device, place onto a queue, and let another device (e.g., the GPU) + handle prediction. + + A few important notes about the preprocess function: + + We assume that this operation does not have any trainable variables nor + does it affect the groundtruth annotations in any way (thus data + augmentation operations such as random cropping should be performed + externally). + + There is no assumption that the batchsize in this function is the same as + the batch size in the predict function. In fact, we recommend calling the + preprocess function prior to calling any batching operations (which should + happen outside of the model) and thus assuming that batch sizes are equal + to 1 in the preprocess function. + + There is also no explicit assumption that the output resolutions + must be fixed across inputs --- this is to support "fully convolutional" + settings in which input images can have different shapes/resolutions. + + Args: + inputs: a [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + pass + + @abc.abstractmethod + def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): + """Predict prediction tensors from inputs tensor. + + Outputs of this function can be passed to loss or postprocess functions. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float32 tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding prediction tensors to be + passed to the Loss or Postprocess functions. + """ + pass + + @abc.abstractmethod + def postprocess(self, prediction_dict, true_image_shapes, **params): + """Convert predicted output tensors to final detections. + + This stage typically performs a few things such as + * Non-Max Suppression to remove overlapping detection boxes. + * Score conversion and background class removal. + + Outputs adhere to the following conventions: + * Classes are integers in [0, num_classes); background classes are removed + and the first non-background class is mapped to 0. If the model produces + class-agnostic detections, then no output is produced for classes. + * Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max] + format and normalized relative to the image window. + * `num_detections` is provided for settings where detections are padded to a + fixed number of boxes. + * We do not specifically assume any kind of probabilistic interpretation + of the scores --- the only important thing is their relative ordering. + Thus implementations of the postprocess function are free to output + logits, probabilities, calibrated probabilities, or anything else. + + Args: + prediction_dict: a dictionary holding prediction tensors. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **params: Additional keyword arguments for specific implementations of + DetectionModel. + + Returns: + detections: a dictionary containing the following fields + detection_boxes: [batch, max_detections, 4] + detection_scores: [batch, max_detections] + detection_classes: [batch, max_detections] + (If a model is producing class-agnostic detections, this field may be + missing) + detection_masks: [batch, max_detections, mask_height, mask_width] + (optional) + detection_keypoints: [batch, max_detections, num_keypoints, 2] + (optional) + detection_keypoint_scores: [batch, max_detections, num_keypoints] + (optional) + detection_surface_coords: [batch, max_detections, mask_height, + mask_width, 2] (optional) + num_detections: [batch] + + In addition to the above fields this stage also outputs the following + raw tensors: + + raw_detection_boxes: [batch, total_detections, 4] tensor containing + all detection boxes from `prediction_dict` in the format + [ymin, xmin, ymax, xmax] and normalized co-ordinates. + raw_detection_scores: [batch, total_detections, + num_classes_with_background] tensor of class score logits for + raw detection boxes. + """ + pass + + @abc.abstractmethod + def loss(self, prediction_dict, true_image_shapes): + """Compute scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding predicted tensors + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + a dictionary mapping strings (loss names) to scalar tensors representing + loss values. + """ + pass + + def provide_groundtruth( + self, + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list=None, + groundtruth_keypoints_list=None, + groundtruth_keypoint_visibilities_list=None, + groundtruth_dp_num_points_list=None, + groundtruth_dp_part_ids_list=None, + groundtruth_dp_surface_coords_list=None, + groundtruth_track_ids_list=None, + groundtruth_temporal_offsets_list=None, + groundtruth_track_match_flags_list=None, + groundtruth_weights_list=None, + groundtruth_confidences_list=None, + groundtruth_is_crowd_list=None, + groundtruth_group_of_list=None, + groundtruth_area_list=None, + is_annotated_list=None, + groundtruth_labeled_classes=None, + groundtruth_verified_neg_classes=None, + groundtruth_not_exhaustive_classes=None): + """Provide groundtruth tensors. + + Args: + groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape + [num_boxes, 4] containing coordinates of the groundtruth boxes. + Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] + format and assumed to be normalized and clipped + relative to the image window with y_min <= y_max and x_min <= x_max. + groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot) + tensors of shape [num_boxes, num_classes] containing the class targets + with the 0th index assumed to map to the first non-background class. + groundtruth_masks_list: a list of 3-D tf.float32 tensors of + shape [num_boxes, height_in, width_in] containing instance + masks with values in {0, 1}. If None, no masks are provided. + Mask resolution `height_in`x`width_in` must agree with the resolution + of the input image tensor provided to the `preprocess` function. + groundtruth_keypoints_list: a list of 3-D tf.float32 tensors of + shape [num_boxes, num_keypoints, 2] containing keypoints. + Keypoints are assumed to be provided in normalized coordinates and + missing keypoints should be encoded as NaN (but it is recommended to use + `groundtruth_keypoint_visibilities_list`). + groundtruth_keypoint_visibilities_list: a list of 3-D tf.bool tensors + of shape [num_boxes, num_keypoints] containing keypoint visibilities. + groundtruth_dp_num_points_list: a list of 1-D tf.int32 tensors of shape + [num_boxes] containing the number of DensePose sampled points. + groundtruth_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape + [num_boxes, max_sampled_points] containing the DensePose part ids + (0-indexed) for each sampled point. Note that there may be padding. + groundtruth_dp_surface_coords_list: a list of 3-D tf.float32 tensors of + shape [num_boxes, max_sampled_points, 4] containing the DensePose + surface coordinates for each sampled point. Note that there may be + padding. + groundtruth_track_ids_list: a list of 1-D tf.int32 tensors of shape + [num_boxes] containing the track IDs of groundtruth objects. + groundtruth_temporal_offsets_list: a list of 2-D tf.float32 tensors + of shape [num_boxes, 2] containing the spatial offsets of objects' + centers compared with the previous frame. + groundtruth_track_match_flags_list: a list of 1-D tf.float32 tensors + of shape [num_boxes] containing 0-1 flags that indicate if an object + has existed in the previous frame. + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape + [num_boxes, num_classes] containing class confidences for groundtruth + boxes. + groundtruth_is_crowd_list: A list of 1-D tf.bool tensors of shape + [num_boxes] containing is_crowd annotations. + groundtruth_group_of_list: A list of 1-D tf.bool tensors of shape + [num_boxes] containing group_of annotations. + groundtruth_area_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing the area (in the original absolute coordinates) + of the annotations. + is_annotated_list: A list of scalar tf.bool tensors indicating whether + images have been labeled or not. + groundtruth_labeled_classes: A list of 1-D tf.float32 tensors of shape + [num_classes], containing label indices encoded as k-hot of the classes + that are exhaustively annotated. + groundtruth_verified_neg_classes: A list of 1-D tf.float32 tensors of + shape [num_classes], containing a K-hot representation of classes + which were verified as not present in the image. + groundtruth_not_exhaustive_classes: A list of 1-D tf.float32 tensors of + shape [num_classes], containing a K-hot representation of classes + which don't have all of their instances marked exhaustively. + """ + self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list + self._groundtruth_lists[ + fields.BoxListFields.classes] = groundtruth_classes_list + if groundtruth_weights_list: + self._groundtruth_lists[fields.BoxListFields. + weights] = groundtruth_weights_list + if groundtruth_confidences_list: + self._groundtruth_lists[fields.BoxListFields. + confidences] = groundtruth_confidences_list + if groundtruth_masks_list: + self._groundtruth_lists[ + fields.BoxListFields.masks] = groundtruth_masks_list + if groundtruth_keypoints_list: + self._groundtruth_lists[ + fields.BoxListFields.keypoints] = groundtruth_keypoints_list + if groundtruth_keypoint_visibilities_list: + self._groundtruth_lists[ + fields.BoxListFields.keypoint_visibilities] = ( + groundtruth_keypoint_visibilities_list) + if groundtruth_dp_num_points_list: + self._groundtruth_lists[ + fields.BoxListFields.densepose_num_points] = ( + groundtruth_dp_num_points_list) + if groundtruth_dp_part_ids_list: + self._groundtruth_lists[ + fields.BoxListFields.densepose_part_ids] = ( + groundtruth_dp_part_ids_list) + if groundtruth_dp_surface_coords_list: + self._groundtruth_lists[ + fields.BoxListFields.densepose_surface_coords] = ( + groundtruth_dp_surface_coords_list) + if groundtruth_track_ids_list: + self._groundtruth_lists[ + fields.BoxListFields.track_ids] = groundtruth_track_ids_list + if groundtruth_temporal_offsets_list: + self._groundtruth_lists[ + fields.BoxListFields.temporal_offsets] = ( + groundtruth_temporal_offsets_list) + if groundtruth_track_match_flags_list: + self._groundtruth_lists[ + fields.BoxListFields.track_match_flags] = ( + groundtruth_track_match_flags_list) + if groundtruth_is_crowd_list: + self._groundtruth_lists[ + fields.BoxListFields.is_crowd] = groundtruth_is_crowd_list + if groundtruth_group_of_list: + self._groundtruth_lists[ + fields.BoxListFields.group_of] = groundtruth_group_of_list + if groundtruth_area_list: + self._groundtruth_lists[ + fields.InputDataFields.groundtruth_area] = groundtruth_area_list + if is_annotated_list: + self._groundtruth_lists[ + fields.InputDataFields.is_annotated] = is_annotated_list + if groundtruth_labeled_classes: + self._groundtruth_lists[ + fields.InputDataFields + .groundtruth_labeled_classes] = groundtruth_labeled_classes + if groundtruth_verified_neg_classes: + self._groundtruth_lists[ + fields.InputDataFields + .groundtruth_verified_neg_classes] = groundtruth_verified_neg_classes + if groundtruth_not_exhaustive_classes: + self._groundtruth_lists[ + fields.InputDataFields + .groundtruth_not_exhaustive_classes] = ( + groundtruth_not_exhaustive_classes) + + @abc.abstractmethod + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + pass + + @abc.abstractmethod + def restore_map(self, + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False): + """Returns a map of variables to load from a foreign checkpoint. + + Returns a map of variable names to load from a checkpoint to variables in + the model graph. This enables the model to initialize based on weights from + another task. For example, the feature extractor variables from a + classification model can be used to bootstrap training of an object + detector. When loading from an object detection model, the checkpoint model + should have the same parameters as this detection model with exception of + the num_classes parameter. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + load_all_detection_checkpoint_vars: whether to load all variables (when + `fine_tune_checkpoint_type` is `detection`). If False, only variables + within the feature extractor scope are included. Default False. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + pass + + @abc.abstractmethod + def restore_from_objects(self, fine_tune_checkpoint_type='detection'): + """Returns a map of variables to load from a foreign checkpoint. + + Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module + or Checkpoint). This enables the model to initialize based on weights from + another task. For example, the feature extractor variables from a + classification model can be used to bootstrap training of an object + detector. When loading from an object detection model, the checkpoint model + should have the same parameters as this detection model with exception of + the num_classes parameter. + + Note that this function is intended to be used to restore Keras-based + models when running Tensorflow 2, whereas restore_map (above) is intended + to be used to restore Slim-based models when running Tensorflow 1.x. + + TODO(jonathanhuang,rathodv): Check tf_version and raise unimplemented + error for both restore_map and restore_from_objects depending on version. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + + Returns: + A dict mapping keys to Trackable objects (tf.Module or Checkpoint). + """ + pass + + @abc.abstractmethod + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + pass + + def call(self, images): + """Returns detections from a batch of images. + + This method calls the preprocess, predict and postprocess function + sequentially and returns the output. + + Args: + images: a [batch_size, height, width, channels] float tensor. + + Returns: + detetcions: The dict of tensors returned by the postprocess function. + """ + + preprocessed_images, shapes = self.preprocess(images) + prediction_dict = self.predict(preprocessed_images, shapes) + return self.postprocess(prediction_dict, shapes) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92bbcc60692550278f49643b35dae7db66c0c8ba Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fcc36c03d4a77a78193975766b5e96b37a32b075 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/model_test.py @@ -0,0 +1,101 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for model API.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import model +from object_detection.utils import test_case + + +class FakeModel(model.DetectionModel): + + def __init__(self): + + # sub-networks containing weights of different shapes. + self._network1 = tf.keras.Sequential([ + tf.keras.layers.Conv2D(8, 1) + ]) + + self._network2 = tf.keras.Sequential([ + tf.keras.layers.Conv2D(16, 1) + ]) + + super(FakeModel, self).__init__(num_classes=0) + + def preprocess(self, images): + return images, tf.shape(images) + + def predict(self, images, shapes): + return {'prediction': self._network2(self._network1(images))} + + def postprocess(self, prediction_dict, shapes): + return prediction_dict + + def loss(self): + return tf.constant(0.0) + + def updates(self): + return [] + + def restore_map(self): + return {} + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def regularization_losses(self): + return [] + + +class ModelTest(test_case.TestCase): + + def test_model_call(self): + + detection_model = FakeModel() + + def graph_fn(): + return detection_model(tf.zeros((1, 128, 128, 3))) + + result = self.execute(graph_fn, []) + self.assertEqual(result['prediction'].shape, + (1, 128, 128, 16)) + + def test_freeze(self): + + detection_model = FakeModel() + detection_model(tf.zeros((1, 128, 128, 3))) + + net1_var_shapes = [tuple(var.get_shape().as_list()) for var in + detection_model._network1.trainable_variables] + + del detection_model + + detection_model = FakeModel() + detection_model._network2.trainable = False + detection_model(tf.zeros((1, 128, 128, 3))) + + var_shapes = [tuple(var.get_shape().as_list()) for var in + detection_model._network1.trainable_variables] + + self.assertEqual(set(net1_var_shapes), set(var_shapes)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/multiclass_nms_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/multiclass_nms_test.py new file mode 100644 index 0000000000000000000000000000000000000000..80be89da926115bc55eaab5a5c471d4f5ae0bca1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/multiclass_nms_test.py @@ -0,0 +1,583 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for tensorflow_models.object_detection.core.post_processing.""" +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.core import post_processing +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class MulticlassNonMaxSuppressionTest(test_case.TestCase): + + def test_multiclass_nms_select_with_shared_boxes_cpu_only(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + + def graph_fn(boxes, scores): + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + nms, _ = post_processing.multiclass_non_max_suppression( + boxes, scores, score_thresh, iou_thresh, max_output_size) + return (nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes)) + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output, exp_nms_corners) + self.assertAllClose(nms_scores_output, exp_nms_scores) + self.assertAllClose(nms_classes_output, exp_nms_classes) + + def test_multiclass_nms_select_with_shared_boxes_pad_to_max_output_size(self): + boxes = np.array([[[0, 0, 1, 1]], + [[0, 0.1, 1, 1.1]], + [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], + [[0, 10.1, 1, 11.1]], + [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], + [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], + [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], + [.01, .85], [.01, .5]], np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_size_per_class = 4 + max_output_size = 5 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores): + nms, num_valid_nms_boxes = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size=max_output_size, + pad_to_max_output_size=True) + return [nms.get(), nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), num_valid_nms_boxes] + + [nms_corners_output, nms_scores_output, nms_classes_output, + num_valid_nms_boxes] = self.execute(graph_fn, [boxes, scores]) + + self.assertEqual(num_valid_nms_boxes, 4) + self.assertAllClose(nms_corners_output[0:num_valid_nms_boxes], + exp_nms_corners) + self.assertAllClose(nms_scores_output[0:num_valid_nms_boxes], + exp_nms_scores) + self.assertAllClose(nms_classes_output[0:num_valid_nms_boxes], + exp_nms_classes) + + def test_multiclass_nms_select_with_shared_boxes_given_keypoints(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + num_keypoints = 6 + keypoints = np.tile(np.reshape(range(8), [8, 1, 1]), + [1, num_keypoints, 2]).astype(np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + def graph_fn(boxes, scores, keypoints): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + additional_fields={fields.BoxListFields.keypoints: keypoints}) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(fields.BoxListFields.keypoints), nms_valid + ] + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + exp_nms_keypoints = np.tile( + np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]), + [1, num_keypoints, 2]) + (nms_corners_output, nms_scores_output, nms_classes_output, nms_keypoints, + nms_valid) = self.execute(graph_fn, [boxes, scores, keypoints]) + + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_keypoints[:nms_valid], exp_nms_keypoints) + + def test_multiclass_nms_with_shared_boxes_given_keypoint_heatmaps(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + + num_boxes = boxes.shape[0] + heatmap_height = 5 + heatmap_width = 5 + num_keypoints = 17 + keypoint_heatmaps = np.ones( + [num_boxes, heatmap_height, heatmap_width, num_keypoints], + dtype=np.float32) + + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + exp_nms_keypoint_heatmaps = np.ones( + (4, heatmap_height, heatmap_width, num_keypoints), dtype=np.float32) + + def graph_fn(boxes, scores, keypoint_heatmaps): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + additional_fields={ + fields.BoxListFields.keypoint_heatmaps: keypoint_heatmaps + }) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(fields.BoxListFields.keypoint_heatmaps), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_keypoint_heatmaps, + nms_valid) = self.execute(graph_fn, [boxes, scores, keypoint_heatmaps]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_keypoint_heatmaps[:nms_valid], + exp_nms_keypoint_heatmaps) + + def test_multiclass_nms_with_additional_fields(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + + coarse_boxes_key = 'coarse_boxes' + coarse_boxes = np.array( + [[0.1, 0.1, 1.1, 1.1], [0.1, 0.2, 1.1, 1.2], [0.1, -0.2, 1.1, 1.0], + [0.1, 10.1, 1.1, 11.1], [0.1, 10.2, 1.1, 11.2], [ + 0.1, 100.1, 1.1, 101.1 + ], [0.1, 1000.1, 1.1, 1002.1], [0.1, 1000.1, 1.1, 1002.2]], np.float32) + + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = np.array([[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]], dtype=np.float32) + + exp_nms_coarse_corners = np.array([[0.1, 10.1, 1.1, 11.1], + [0.1, 0.1, 1.1, 1.1], + [0.1, 1000.1, 1.1, 1002.1], + [0.1, 100.1, 1.1, 101.1]], + dtype=np.float32) + + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores, coarse_boxes): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + additional_fields={coarse_boxes_key: coarse_boxes}) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(coarse_boxes_key), + nms_valid, + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_coarse_corners, + nms_valid) = self.execute(graph_fn, [boxes, scores, coarse_boxes]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_coarse_corners[:nms_valid], exp_nms_coarse_corners) + + def test_multiclass_nms_select_with_shared_boxes_given_masks(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + num_classes = 2 + mask_height = 3 + mask_width = 3 + masks = np.tile( + np.reshape(range(8), [8, 1, 1, 1]), + [1, num_classes, mask_height, mask_width]) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + exp_nms_masks_tensor = np.tile( + np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]), + [1, mask_height, mask_width]) + + def graph_fn(boxes, scores, masks): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + masks=masks, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms.get_field(fields.BoxListFields.masks), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, nms_masks, + nms_valid) = self.execute(graph_fn, [boxes, scores, masks]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + self.assertAllEqual(nms_masks[:nms_valid], exp_nms_masks_tensor) + + def test_multiclass_nms_select_with_clip_window(self): + boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32) + scores = np.array([[.9], [.75]], np.float32) + clip_window = np.array([5, 4, 8, 7], np.float32) + score_thresh = 0.0 + iou_thresh = 0.5 + max_output_size = 100 + + exp_nms_corners = [[5, 4, 8, 7]] + exp_nms_scores = [.9] + exp_nms_classes = [0] + + def graph_fn(boxes, scores, clip_window): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True, + clip_window=clip_window) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_select_with_clip_window_change_coordinate_frame(self): + boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32) + scores = np.array([[.9], [.75]], np.float32) + clip_window = np.array([5, 4, 8, 7], np.float32) + score_thresh = 0.0 + iou_thresh = 0.5 + max_output_size = 100 + + exp_nms_corners = [[0, 0, 1, 1]] + exp_nms_scores = [.9] + exp_nms_classes = [0] + + def graph_fn(boxes, scores, clip_window): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + clip_window=clip_window, + pad_to_max_output_size=True, + change_coordinate_frame=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), nms_valid + ] + + (nms_corners_output, nms_scores_output, nms_classes_output, + nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_select_with_per_class_cap(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_size_per_class = 2 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002]] + exp_nms_scores = [.95, .9, .85] + exp_nms_classes = [0, 0, 1] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms_valid + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_select_with_total_cap(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_size_per_class = 4 + max_total_size = 2 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1]] + exp_nms_scores = [.95, .9] + exp_nms_classes = [0, 0] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms_valid + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_nms_threshold_then_select_with_shared_boxes(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9], [.75], [.6], [.95], [.5], [.3], [.01], [.01]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 3 + + exp_nms = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 100, 1, 101]] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True) + return nms.get(), nms_valid + + nms_output, nms_valid = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_output[:nms_valid], exp_nms) + + def test_multiclass_nms_select_with_separate_boxes(self): + boxes = np.array( + [[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], + [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [ + 0, 10, 1, 11 + ]], [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], + [[0, 100, 1, 101], [0, 100, 1, 101]], + [[0, 1000, 1, 1002], [0, 999, 2, 1004]], + [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = .5 + max_output_size = 4 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 999, 2, 1004], + [0, 100, 1, 101]] + exp_nms_scores = [.95, .9, .85, .3] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores): + nms, nms_valid = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_output_size, + pad_to_max_output_size=True) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes), + nms_valid + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) + self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) + self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) + self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) + + def test_multiclass_soft_nms_select_with_shared_boxes_cpu_only(self): + boxes = np.array( + [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], + [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], + [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) + scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], + [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], + np.float32) + score_thresh = 0.1 + iou_thresh = 1.0 + max_output_size = 4 + + exp_nms_corners = [[0, 10, 1, 11], + [0, 0, 1, 1], + [0, 1000, 1, 1002], + [0, 0.1, 1, 1.1]] + exp_nms_scores = [.95, .9, .85, .384] + exp_nms_classes = [0, 0, 1, 0] + + def graph_fn(boxes, scores): + nms, _ = post_processing.multiclass_non_max_suppression( + boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class=max_output_size, + max_total_size=max_output_size, + soft_nms_sigma=0.5) + return [ + nms.get(), + nms.get_field(fields.BoxListFields.scores), + nms.get_field(fields.BoxListFields.classes) + ] + + (nms_corners_output, nms_scores_output, + nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores]) + self.assertAllClose( + nms_corners_output, exp_nms_corners, rtol=1e-2, atol=1e-2) + self.assertAllClose(nms_scores_output, exp_nms_scores, rtol=1e-2, atol=1e-2) + self.assertAllClose( + nms_classes_output, exp_nms_classes, rtol=1e-2, atol=1e-2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/post_processing.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/post_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..76b44bb86241332c8476edbd2ec4cde84099e238 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/post_processing.py @@ -0,0 +1,1241 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Post-processing operations on detected boxes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import numpy as np +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import standard_fields as fields +from object_detection.utils import shape_utils + +_NMS_TILE_SIZE = 512 + + +def batch_iou(boxes1, boxes2): + """Calculates the overlap between proposal and ground truth boxes. + + Some `boxes2` may have been padded. The returned `iou` tensor for these + boxes will be -1. + + Args: + boxes1: a tensor with a shape of [batch_size, N, 4]. N is the number of + proposals before groundtruth assignment. The last dimension is the pixel + coordinates in [ymin, xmin, ymax, xmax] form. + boxes2: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This + tensor might have paddings with a negative value. + + Returns: + iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES]. + """ + with tf.name_scope('BatchIOU'): + y1_min, x1_min, y1_max, x1_max = tf.split( + value=boxes1, num_or_size_splits=4, axis=2) + y2_min, x2_min, y2_max, x2_max = tf.split( + value=boxes2, num_or_size_splits=4, axis=2) + + # Calculates the intersection area. + intersection_xmin = tf.maximum(x1_min, tf.transpose(x2_min, [0, 2, 1])) + intersection_xmax = tf.minimum(x1_max, tf.transpose(x2_max, [0, 2, 1])) + intersection_ymin = tf.maximum(y1_min, tf.transpose(y2_min, [0, 2, 1])) + intersection_ymax = tf.minimum(y1_max, tf.transpose(y2_max, [0, 2, 1])) + intersection_area = tf.maximum( + (intersection_xmax - intersection_xmin), 0) * tf.maximum( + (intersection_ymax - intersection_ymin), 0) + + # Calculates the union area. + area1 = (y1_max - y1_min) * (x1_max - x1_min) + area2 = (y2_max - y2_min) * (x2_max - x2_min) + # Adds a small epsilon to avoid divide-by-zero. + union_area = area1 + tf.transpose(area2, + [0, 2, 1]) - intersection_area + 1e-8 + + # Calculates IoU. + iou = intersection_area / union_area + + # Fills -1 for padded ground truth boxes. + padding_mask = tf.logical_and( + tf.less(intersection_xmax, 0), tf.less(intersection_ymax, 0)) + iou = tf.where(padding_mask, -tf.ones_like(iou), iou) + + return iou + + +def _self_suppression(iou, iou_threshold, loop_condition, iou_sum): + """Bounding-boxes self-suppression loop body. + + Args: + iou: A float Tensor with shape [1, num_boxes, max_num_instance]: IOUs. + iou_threshold: A scalar, representing IOU threshold. + loop_condition: The loop condition returned from last iteration. + iou_sum: iou_sum_new returned from last iteration. + + Returns: + iou_suppressed: A float Tensor with shape [1, num_boxes, max_num_instance], + IOU after suppression. + iou_threshold: A scalar, representing IOU threshold. + loop_condition: Bool Tensor of shape [], the loop condition. + iou_sum_new: The new IOU sum. + """ + del loop_condition + can_suppress_others = tf.cast( + tf.reshape(tf.reduce_max(iou, 1) <= iou_threshold, [1, -1, 1]), iou.dtype) + iou_suppressed = tf.reshape( + tf.cast( + tf.reduce_max(can_suppress_others * iou, 1) <= iou_threshold, + iou.dtype), [1, -1, 1]) * iou + iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2]) + return [ + iou_suppressed, iou_threshold, + tf.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new + ] + + +def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx): + """Bounding-boxes cross-suppression loop body. + + Args: + boxes: A float Tensor of shape [1, anchors, 4], representing boxes. + box_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile + returned from last iteration + iou_threshold: A scalar, representing IOU threshold. + inner_idx: A scalar, representing inner index. + + Returns: + boxes: A float Tensor of shape [1, anchors, 4], representing boxes. + ret_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile + after suppression + iou_threshold: A scalar, representing IOU threshold. + inner_idx: A scalar, inner index incremented. + """ + new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0], + [1, _NMS_TILE_SIZE, 4]) + iou = batch_iou(new_slice, box_slice) + ret_slice = tf.expand_dims( + tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), + 2) * box_slice + return boxes, ret_slice, iou_threshold, inner_idx + 1 + + +def _suppression_loop_body(boxes, iou_threshold, output_size, idx): + """Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE). + + Args: + boxes: a tensor with a shape of [1, anchors, 4]. + iou_threshold: a float representing the threshold for deciding whether boxes + overlap too much with respect to IOU. + output_size: an int32 tensor of size [1]. Representing the number of + selected boxes. + idx: an integer scalar representing induction variable. + + Returns: + boxes: updated boxes. + iou_threshold: pass down iou_threshold to the next iteration. + output_size: the updated output_size. + idx: the updated induction variable. + """ + num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE + + # Iterates over tiles that can possibly suppress the current tile. + box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0], + [1, _NMS_TILE_SIZE, 4]) + _, box_slice, _, _ = tf.while_loop( + lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, + _cross_suppression, [boxes, box_slice, iou_threshold, + tf.constant(0)]) + + # Iterates over the current tile to compute self-suppression. + iou = batch_iou(box_slice, box_slice) + mask = tf.expand_dims( + tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape( + tf.range(_NMS_TILE_SIZE), [-1, 1]), 0) + iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype) + suppressed_iou, _, _, _ = tf.while_loop( + lambda _iou, _threshold, loop_condition, _iou_sum: loop_condition, + _self_suppression, + [iou, iou_threshold, + tf.constant(True), + tf.reduce_sum(iou, [1, 2])]) + suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0 + box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2) + + # Uses box_slice to update the input boxes. + mask = tf.reshape( + tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) + boxes = tf.tile(tf.expand_dims(box_slice, [1]), + [1, num_tiles, 1, 1]) * mask + tf.reshape( + boxes, [1, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask) + boxes = tf.reshape(boxes, [1, -1, 4]) + + # Updates output_size. + output_size += tf.reduce_sum( + tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1]) + return boxes, iou_threshold, output_size, idx + 1 + + +def partitioned_non_max_suppression_padded(boxes, + scores, + max_output_size, + iou_threshold=0.5, + score_threshold=float('-inf')): + """A tiled version of [`tf.image.non_max_suppression_padded`](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression_padded). + + The overall design of the algorithm is to handle boxes tile-by-tile: + + boxes = boxes.pad_to_multiple_of(tile_size) + num_tiles = len(boxes) // tile_size + output_boxes = [] + for i in range(num_tiles): + box_tile = boxes[i*tile_size : (i+1)*tile_size] + for j in range(i - 1): + suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] + iou = batch_iou(box_tile, suppressing_tile) + # if the box is suppressed in iou, clear it to a dot + box_tile *= _update_boxes(iou) + # Iteratively handle the diagonal tile. + iou = _box_overlap(box_tile, box_tile) + iou_changed = True + while iou_changed: + # boxes that are not suppressed by anything else + suppressing_boxes = _get_suppressing_boxes(iou) + # boxes that are suppressed by suppressing_boxes + suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) + # clear iou to 0 for boxes that are suppressed, as they cannot be used + # to suppress other boxes any more + new_iou = _clear_iou(iou, suppressed_boxes) + iou_changed = (new_iou != iou) + iou = new_iou + # remaining boxes that can still suppress others, are selected boxes. + output_boxes.append(_get_suppressing_boxes(iou)) + if len(output_boxes) >= max_output_size: + break + + Args: + boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. + scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single + score corresponding to each box (each row of boxes). + max_output_size: a scalar integer `Tensor` representing the maximum number + of boxes to be selected by non max suppression. + iou_threshold: a float representing the threshold for deciding whether boxes + overlap too much with respect to IOU. + score_threshold: A float representing the threshold for deciding when to + remove boxes based on score. + + Returns: + selected_indices: a tensor of shape [anchors]. + num_valid_boxes: a scalar int tensor. + nms_proposals: a tensor with a shape of [anchors, 4]. It has + same dtype as input boxes. + nms_scores: a tensor with a shape of [anchors]. It has same + dtype as input scores. + argsort_ids: a tensor of shape [anchors], mapping from input order of boxes + to output order of boxes. + """ + num_boxes = tf.shape(boxes)[0] + pad = tf.cast( + tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE), + tf.int32) * _NMS_TILE_SIZE - num_boxes + + scores, argsort_ids = tf.nn.top_k(scores, k=num_boxes, sorted=True) + boxes = tf.gather(boxes, argsort_ids) + num_boxes = tf.shape(boxes)[0] + num_boxes += pad + boxes = tf.pad( + tf.cast(boxes, tf.float32), [[0, pad], [0, 0]], constant_values=-1) + scores = tf.pad(tf.cast(scores, tf.float32), [[0, pad]]) + + # mask boxes to -1 by score threshold + scores_mask = tf.expand_dims( + tf.cast(scores > score_threshold, boxes.dtype), axis=1) + boxes = ((boxes + 1.) * scores_mask) - 1. + + boxes = tf.expand_dims(boxes, axis=0) + scores = tf.expand_dims(scores, axis=0) + + def _loop_cond(unused_boxes, unused_threshold, output_size, idx): + return tf.logical_and( + tf.reduce_min(output_size) < max_output_size, + idx < num_boxes // _NMS_TILE_SIZE) + + selected_boxes, _, output_size, _ = tf.while_loop( + _loop_cond, _suppression_loop_body, + [boxes, iou_threshold, + tf.zeros([1], tf.int32), + tf.constant(0)]) + idx = num_boxes - tf.cast( + tf.nn.top_k( + tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) * + tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0], + tf.int32) + idx = tf.minimum(idx, num_boxes - 1 - pad) + idx = tf.reshape(idx + tf.reshape(tf.range(1) * num_boxes, [-1, 1]), [-1]) + num_valid_boxes = tf.reduce_sum(output_size) + return (idx, num_valid_boxes, tf.reshape(boxes, [-1, 4]), + tf.reshape(scores, [-1]), argsort_ids) + + +def _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, + change_coordinate_frame, clip_window): + """Validates boxes, scores and iou_thresh. + + This function validates the boxes, scores, iou_thresh + and if change_coordinate_frame is True, clip_window must be specified. + + Args: + boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either + number of classes or 1 depending on whether a separate box is predicted + per class. + scores: A [k, num_classes] float32 tensor containing the scores for each of + the k detections. The scores have to be non-negative when + pad_to_max_output_size is True. + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided) + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not + have a valid scores field. + """ + if not 0 <= iou_thresh <= 1.0: + raise ValueError('iou_thresh must be between 0 and 1') + if scores.shape.ndims != 2: + raise ValueError('scores field must be of rank 2') + if shape_utils.get_dim_as_int(scores.shape[1]) is None: + raise ValueError('scores must have statically defined second ' 'dimension') + if boxes.shape.ndims != 3: + raise ValueError('boxes must be of rank 3.') + if not (shape_utils.get_dim_as_int( + boxes.shape[1]) == shape_utils.get_dim_as_int(scores.shape[1]) or + shape_utils.get_dim_as_int(boxes.shape[1]) == 1): + raise ValueError('second dimension of boxes must be either 1 or equal ' + 'to the second dimension of scores') + if shape_utils.get_dim_as_int(boxes.shape[2]) != 4: + raise ValueError('last dimension of boxes must be of size 4.') + if change_coordinate_frame and clip_window is None: + raise ValueError('if change_coordinate_frame is True, then a clip_window' + 'must be specified.') + + +def _clip_window_prune_boxes(sorted_boxes, clip_window, pad_to_max_output_size, + change_coordinate_frame): + """Prune boxes with zero area. + + Args: + sorted_boxes: A BoxList containing k detections. + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + pad_to_max_output_size: flag indicating whether to pad to max output size or + not. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided). + + Returns: + sorted_boxes: A BoxList containing k detections after pruning. + num_valid_nms_boxes_cumulative: Number of valid NMS boxes + """ + sorted_boxes = box_list_ops.clip_to_window( + sorted_boxes, + clip_window, + filter_nonoverlapping=not pad_to_max_output_size) + # Set the scores of boxes with zero area to -1 to keep the default + # behaviour of pruning out zero area boxes. + sorted_boxes_size = tf.shape(sorted_boxes.get())[0] + non_zero_box_area = tf.cast(box_list_ops.area(sorted_boxes), tf.bool) + sorted_boxes_scores = tf.where( + non_zero_box_area, sorted_boxes.get_field(fields.BoxListFields.scores), + -1 * tf.ones(sorted_boxes_size)) + sorted_boxes.add_field(fields.BoxListFields.scores, sorted_boxes_scores) + num_valid_nms_boxes_cumulative = tf.reduce_sum( + tf.cast(tf.greater_equal(sorted_boxes_scores, 0), tf.int32)) + sorted_boxes = box_list_ops.sort_by_field(sorted_boxes, + fields.BoxListFields.scores) + if change_coordinate_frame: + sorted_boxes = box_list_ops.change_coordinate_frame(sorted_boxes, + clip_window) + return sorted_boxes, num_valid_nms_boxes_cumulative + + +class NullContextmanager(object): + + def __enter__(self): + pass + + def __exit__(self, type_arg, value_arg, traceback_arg): + return False + + +def multiclass_non_max_suppression(boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size=0, + clip_window=None, + change_coordinate_frame=False, + masks=None, + boundaries=None, + pad_to_max_output_size=False, + use_partitioned_nms=False, + additional_fields=None, + soft_nms_sigma=0.0, + use_hard_nms=False, + use_cpu_nms=False, + scope=None): + """Multi-class version of non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. It operates independently for each class for + which scores are provided (via the scores field of the input box_list), + pruning boxes with score less than a provided threshold prior to + applying NMS. + + Please note that this operation is performed on *all* classes, therefore any + background classes should be removed prior to calling this function. + + Selected boxes are guaranteed to be sorted in decreasing order by score (but + the sort is not guaranteed to be stable). + + Args: + boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either + number of classes or 1 depending on whether a separate box is predicted + per class. + scores: A [k, num_classes] float32 tensor containing the scores for each of + the k detections. The scores have to be non-negative when + pad_to_max_output_size is True. + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + max_size_per_class: maximum number of retained boxes per class. + max_total_size: maximum number of boxes retained over all classes. By + default returns all boxes retained after capping boxes per class. + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window + is provided) + masks: (optional) a [k, q, mask_height, mask_width] float32 tensor + containing box masks. `q` can be either number of classes or 1 depending + on whether a separate mask is predicted per class. + boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 + tensor containing box boundaries. `q` can be either number of classes or 1 + depending on whether a separate boundary is predicted per class. + pad_to_max_output_size: If true, the output nmsed boxes are padded to be of + length `max_size_per_class`. Defaults to false. + use_partitioned_nms: If true, use partitioned version of + non_max_suppression. + additional_fields: (optional) If not None, a dictionary that maps keys to + tensors whose first dimensions are all of size `k`. After non-maximum + suppression, all tensors corresponding to the selected boxes will be + added to resulting BoxList. + soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; + See Bodla et al, https://arxiv.org/abs/1704.04503). When + `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) + NMS. Soft NMS is currently only supported when pad_to_max_output_size is + False. + use_hard_nms: Enforce the usage of hard NMS. + use_cpu_nms: Enforce NMS to run on CPU. + scope: name scope. + + Returns: + A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a + BoxList holds M boxes with a rank-1 scores field representing + corresponding scores for each box with scores sorted in decreasing order + and a rank-1 classes field representing a class label for each box. The + num_valid_nms_boxes is a 0-D integer tensor representing the number of + valid elements in `BoxList`, with the valid elements appearing first. + + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have + a valid scores field. + ValueError: if Soft NMS (tf.image.non_max_suppression_with_scores) is not + supported in the current TF version and `soft_nms_sigma` is nonzero. + """ + _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, + change_coordinate_frame, clip_window) + if pad_to_max_output_size and soft_nms_sigma != 0.0: + raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not ' + 'supported when pad_to_max_output_size is True.') + + with tf.name_scope(scope, 'MultiClassNonMaxSuppression'), tf.device( + 'cpu:0') if use_cpu_nms else NullContextmanager(): + num_scores = tf.shape(scores)[0] + num_classes = shape_utils.get_dim_as_int(scores.get_shape()[1]) + + selected_boxes_list = [] + num_valid_nms_boxes_cumulative = tf.constant(0) + per_class_boxes_list = tf.unstack(boxes, axis=1) + if masks is not None: + per_class_masks_list = tf.unstack(masks, axis=1) + if boundaries is not None: + per_class_boundaries_list = tf.unstack(boundaries, axis=1) + boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1 + else [0] * num_classes) + for class_idx, boxes_idx in zip(range(num_classes), boxes_ids): + per_class_boxes = per_class_boxes_list[boxes_idx] + boxlist_and_class_scores = box_list.BoxList(per_class_boxes) + class_scores = tf.reshape( + tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1]) + + boxlist_and_class_scores.add_field(fields.BoxListFields.scores, + class_scores) + if masks is not None: + per_class_masks = per_class_masks_list[boxes_idx] + boxlist_and_class_scores.add_field(fields.BoxListFields.masks, + per_class_masks) + if boundaries is not None: + per_class_boundaries = per_class_boundaries_list[boxes_idx] + boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, + per_class_boundaries) + if additional_fields is not None: + for key, tensor in additional_fields.items(): + boxlist_and_class_scores.add_field(key, tensor) + + nms_result = None + selected_scores = None + if pad_to_max_output_size: + max_selection_size = max_size_per_class + if use_partitioned_nms: + (selected_indices, num_valid_nms_boxes, + boxlist_and_class_scores.data['boxes'], + boxlist_and_class_scores.data['scores'], + _) = partitioned_non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + else: + selected_indices, num_valid_nms_boxes = ( + tf.image.non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field( + fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + pad_to_max_output_size=True)) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + else: + max_selection_size = tf.minimum(max_size_per_class, + boxlist_and_class_scores.num_boxes()) + if (hasattr(tf.image, 'non_max_suppression_with_scores') and + tf.compat.forward_compatible(2019, 6, 6) and not use_hard_nms): + (selected_indices, selected_scores + ) = tf.image.non_max_suppression_with_scores( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + soft_nms_sigma=soft_nms_sigma) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat( + [selected_indices, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) + selected_scores = tf.concat( + [selected_scores, + tf.zeros(max_selection_size-num_valid_nms_boxes, + tf.float32)], -1) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + else: + if soft_nms_sigma != 0: + raise ValueError('Soft NMS not supported in current TF version!') + selected_indices = tf.image.non_max_suppression( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat( + [selected_indices, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + # Make the scores -1 for invalid boxes. + valid_nms_boxes_indices = tf.less( + tf.range(max_selection_size), num_valid_nms_boxes) + + nms_result.add_field( + fields.BoxListFields.scores, + tf.where(valid_nms_boxes_indices, + selected_scores, -1*tf.ones(max_selection_size))) + num_valid_nms_boxes_cumulative += num_valid_nms_boxes + + nms_result.add_field( + fields.BoxListFields.classes, (tf.zeros_like( + nms_result.get_field(fields.BoxListFields.scores)) + class_idx)) + selected_boxes_list.append(nms_result) + selected_boxes = box_list_ops.concatenate(selected_boxes_list) + sorted_boxes = box_list_ops.sort_by_field(selected_boxes, + fields.BoxListFields.scores) + if clip_window is not None: + # When pad_to_max_output_size is False, it prunes the boxes with zero + # area. + sorted_boxes, num_valid_nms_boxes_cumulative = _clip_window_prune_boxes( + sorted_boxes, clip_window, pad_to_max_output_size, + change_coordinate_frame) + + if max_total_size: + max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) + sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) + num_valid_nms_boxes_cumulative = tf.where( + max_total_size > num_valid_nms_boxes_cumulative, + num_valid_nms_boxes_cumulative, max_total_size) + # Select only the valid boxes if pad_to_max_output_size is False. + if not pad_to_max_output_size: + sorted_boxes = box_list_ops.gather( + sorted_boxes, tf.range(num_valid_nms_boxes_cumulative)) + + return sorted_boxes, num_valid_nms_boxes_cumulative + + +def class_agnostic_non_max_suppression(boxes, + scores, + score_thresh, + iou_thresh, + max_classes_per_detection=1, + max_total_size=0, + clip_window=None, + change_coordinate_frame=False, + masks=None, + boundaries=None, + pad_to_max_output_size=False, + use_partitioned_nms=False, + additional_fields=None, + soft_nms_sigma=0.0, + scope=None): + """Class-agnostic version of non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. It operates on all the boxes using + max scores across all classes for which scores are provided (via the scores + field of the input box_list), pruning boxes with score less than a provided + threshold prior to applying NMS. + + Please note that this operation is performed in a class-agnostic way, + therefore any background classes should be removed prior to calling this + function. + + Selected boxes are guaranteed to be sorted in decreasing order by score (but + the sort is not guaranteed to be stable). + + Args: + boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either + number of classes or 1 depending on whether a separate box is predicted + per class. + scores: A [k, num_classes] float32 tensor containing the scores for each of + the k detections. The scores have to be non-negative when + pad_to_max_output_size is True. + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + max_classes_per_detection: maximum number of retained classes per detection + box in class-agnostic NMS. + max_total_size: maximum number of boxes retained over all classes. By + default returns all boxes retained after capping boxes per class. + clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] + representing the window to clip and normalize boxes to before performing + non-max suppression. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided) + masks: (optional) a [k, q, mask_height, mask_width] float32 tensor + containing box masks. `q` can be either number of classes or 1 depending + on whether a separate mask is predicted per class. + boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 + tensor containing box boundaries. `q` can be either number of classes or 1 + depending on whether a separate boundary is predicted per class. + pad_to_max_output_size: If true, the output nmsed boxes are padded to be of + length `max_size_per_class`. Defaults to false. + use_partitioned_nms: If true, use partitioned version of + non_max_suppression. + additional_fields: (optional) If not None, a dictionary that maps keys to + tensors whose first dimensions are all of size `k`. After non-maximum + suppression, all tensors corresponding to the selected boxes will be added + to resulting BoxList. + soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; + See Bodla et al, https://arxiv.org/abs/1704.04503). When + `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) + NMS. Soft NMS is currently only supported when pad_to_max_output_size is + False. + scope: name scope. + + Returns: + A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a + BoxList holds M boxes with a rank-1 scores field representing + corresponding scores for each box with scores sorted in decreasing order + and a rank-1 classes field representing a class label for each box. The + num_valid_nms_boxes is a 0-D integer tensor representing the number of + valid elements in `BoxList`, with the valid elements appearing first. + + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have + a valid scores field or if non-zero soft_nms_sigma is provided when + pad_to_max_output_size is True. + """ + _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, + change_coordinate_frame, clip_window) + if pad_to_max_output_size and soft_nms_sigma != 0.0: + raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not ' + 'supported when pad_to_max_output_size is True.') + + if max_classes_per_detection > 1: + raise ValueError('Max classes per detection box >1 not supported.') + q = shape_utils.get_dim_as_int(boxes.shape[1]) + if q > 1: + class_ids = tf.expand_dims( + tf.argmax(scores, axis=1, output_type=tf.int32), axis=1) + boxes = tf.batch_gather(boxes, class_ids) + if masks is not None: + masks = tf.batch_gather(masks, class_ids) + if boundaries is not None: + boundaries = tf.batch_gather(boundaries, class_ids) + boxes = tf.squeeze(boxes, axis=[1]) + if masks is not None: + masks = tf.squeeze(masks, axis=[1]) + if boundaries is not None: + boundaries = tf.squeeze(boundaries, axis=[1]) + + with tf.name_scope(scope, 'ClassAgnosticNonMaxSuppression'): + boxlist_and_class_scores = box_list.BoxList(boxes) + max_scores = tf.reduce_max(scores, axis=-1) + classes_with_max_scores = tf.argmax(scores, axis=-1) + boxlist_and_class_scores.add_field(fields.BoxListFields.scores, max_scores) + if masks is not None: + boxlist_and_class_scores.add_field(fields.BoxListFields.masks, masks) + if boundaries is not None: + boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, + boundaries) + + if additional_fields is not None: + for key, tensor in additional_fields.items(): + boxlist_and_class_scores.add_field(key, tensor) + + nms_result = None + selected_scores = None + if pad_to_max_output_size: + max_selection_size = max_total_size + if use_partitioned_nms: + (selected_indices, num_valid_nms_boxes, + boxlist_and_class_scores.data['boxes'], + boxlist_and_class_scores.data['scores'], + argsort_ids) = partitioned_non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + classes_with_max_scores = tf.gather(classes_with_max_scores, + argsort_ids) + else: + selected_indices, num_valid_nms_boxes = ( + tf.image.non_max_suppression_padded( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + pad_to_max_output_size=True)) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + else: + max_selection_size = tf.minimum(max_total_size, + boxlist_and_class_scores.num_boxes()) + if (hasattr(tf.image, 'non_max_suppression_with_scores') and + tf.compat.forward_compatible(2019, 6, 6)): + (selected_indices, selected_scores + ) = tf.image.non_max_suppression_with_scores( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + soft_nms_sigma=soft_nms_sigma) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat([ + selected_indices, + tf.zeros(max_selection_size - num_valid_nms_boxes, tf.int32) + ], 0) + selected_scores = tf.concat( + [selected_scores, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.float32)], -1) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + else: + if soft_nms_sigma != 0: + raise ValueError('Soft NMS not supported in current TF version!') + selected_indices = tf.image.non_max_suppression( + boxlist_and_class_scores.get(), + boxlist_and_class_scores.get_field(fields.BoxListFields.scores), + max_selection_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + num_valid_nms_boxes = tf.shape(selected_indices)[0] + selected_indices = tf.concat( + [selected_indices, + tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) + nms_result = box_list_ops.gather(boxlist_and_class_scores, + selected_indices) + selected_scores = nms_result.get_field(fields.BoxListFields.scores) + valid_nms_boxes_indices = tf.less( + tf.range(max_selection_size), num_valid_nms_boxes) + nms_result.add_field( + fields.BoxListFields.scores, + tf.where(valid_nms_boxes_indices, + selected_scores, -1*tf.ones(max_selection_size))) + + selected_classes = tf.gather(classes_with_max_scores, selected_indices) + selected_classes = tf.cast(selected_classes, tf.float32) + nms_result.add_field(fields.BoxListFields.classes, selected_classes) + selected_boxes = nms_result + sorted_boxes = box_list_ops.sort_by_field(selected_boxes, + fields.BoxListFields.scores) + + if clip_window is not None: + # When pad_to_max_output_size is False, it prunes the boxes with zero + # area. + sorted_boxes, num_valid_nms_boxes = _clip_window_prune_boxes( + sorted_boxes, clip_window, pad_to_max_output_size, + change_coordinate_frame) + + if max_total_size: + max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) + sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) + num_valid_nms_boxes = tf.where(max_total_size > num_valid_nms_boxes, + num_valid_nms_boxes, max_total_size) + # Select only the valid boxes if pad_to_max_output_size is False. + if not pad_to_max_output_size: + sorted_boxes = box_list_ops.gather(sorted_boxes, + tf.range(num_valid_nms_boxes)) + + return sorted_boxes, num_valid_nms_boxes + + +def batch_multiclass_non_max_suppression(boxes, + scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size=0, + clip_window=None, + change_coordinate_frame=False, + num_valid_boxes=None, + masks=None, + additional_fields=None, + soft_nms_sigma=0.0, + scope=None, + use_static_shapes=False, + use_partitioned_nms=False, + parallel_iterations=32, + use_class_agnostic_nms=False, + max_classes_per_detection=1, + use_dynamic_map_fn=False, + use_combined_nms=False, + use_hard_nms=False, + use_cpu_nms=False): + """Multi-class version of non maximum suppression that operates on a batch. + + This op is similar to `multiclass_non_max_suppression` but operates on a batch + of boxes and scores. See documentation for `multiclass_non_max_suppression` + for details. + + Args: + boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing + detections. If `q` is 1 then same boxes are used for all classes + otherwise, if `q` is equal to number of classes, class-specific boxes are + used. + scores: A [batch_size, num_anchors, num_classes] float32 tensor containing + the scores for each of the `num_anchors` detections. The scores have to be + non-negative when use_static_shapes is set True. + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap + with previously selected boxes are removed). + max_size_per_class: maximum number of retained boxes per class. + max_total_size: maximum number of boxes retained over all classes. By + default returns all boxes retained after capping boxes per class. + clip_window: A float32 tensor of shape [batch_size, 4] where each entry is + of the form [y_min, x_min, y_max, x_max] representing the window to clip + boxes to before performing non-max suppression. This argument can also be + a tensor of shape [4] in which case, the same clip window is applied to + all images in the batch. If clip_widow is None, all boxes are used to + perform non-max suppression. + change_coordinate_frame: Whether to normalize coordinates after clipping + relative to clip_window (this can only be set to True if a clip_window is + provided) + num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape + [batch_size] representing the number of valid boxes to be considered for + each image in the batch. This parameter allows for ignoring zero + paddings. + masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width] + float32 tensor containing box masks. `q` can be either number of classes + or 1 depending on whether a separate mask is predicted per class. + additional_fields: (optional) If not None, a dictionary that maps keys to + tensors whose dimensions are [batch_size, num_anchors, ...]. + soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; + See Bodla et al, https://arxiv.org/abs/1704.04503). When + `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) + NMS. Soft NMS is currently only supported when pad_to_max_output_size is + False. + scope: tf scope name. + use_static_shapes: If true, the output nmsed boxes are padded to be of + length `max_size_per_class` and it doesn't clip boxes to max_total_size. + Defaults to false. + use_partitioned_nms: If true, use partitioned version of + non_max_suppression. + parallel_iterations: (optional) number of batch items to process in + parallel. + use_class_agnostic_nms: If true, this uses class-agnostic non max + suppression + max_classes_per_detection: Maximum number of retained classes per detection + box in class-agnostic NMS. + use_dynamic_map_fn: If true, images in the batch will be processed within a + dynamic loop. Otherwise, a static loop will be used if possible. + use_combined_nms: If true, it uses tf.image.combined_non_max_suppression ( + multi-class version of NMS that operates on a batch). + It greedily selects a subset of detection bounding boxes, pruning away + boxes that have high IOU (intersection over union) overlap (> thresh) with + already selected boxes. It operates independently for each batch. + Within each batch, it operates independently for each class for which + scores are provided (via the scores field of the input box_list), + pruning boxes with score less than a provided threshold prior to applying + NMS. This operation is performed on *all* batches and *all* classes + in the batch, therefore any background classes should be removed prior to + calling this function. + Masks and additional fields are not supported. + See argument checks in the code below for unsupported arguments. + use_hard_nms: Enforce the usage of hard NMS. + use_cpu_nms: Enforce NMS to run on CPU. + + Returns: + 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor + containing the non-max suppressed boxes. + 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing + the scores for the boxes. + 'nmsed_classes': A [batch_size, max_detections] float32 tensor + containing the class for boxes. + 'nmsed_masks': (optional) a + [batch_size, max_detections, mask_height, mask_width] float32 tensor + containing masks for each selected box. This is set to None if input + `masks` is None. + 'nmsed_additional_fields': (optional) a dictionary of + [batch_size, max_detections, ...] float32 tensors corresponding to the + tensors specified in the input `additional_fields`. This is not returned + if input `additional_fields` is None. + 'num_detections': A [batch_size] int32 tensor indicating the number of + valid detections per batch item. Only the top num_detections[i] entries in + nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the + entries are zero paddings. + + Raises: + ValueError: if `q` in boxes.shape is not 1 or not equal to number of + classes as inferred from scores.shape. + """ + if use_combined_nms: + if change_coordinate_frame: + raise ValueError( + 'change_coordinate_frame (normalizing coordinates' + ' relative to clip_window) is not supported by combined_nms.') + if num_valid_boxes is not None: + raise ValueError('num_valid_boxes is not supported by combined_nms.') + if masks is not None: + raise ValueError('masks is not supported by combined_nms.') + if soft_nms_sigma != 0.0: + raise ValueError('Soft NMS is not supported by combined_nms.') + if use_class_agnostic_nms: + raise ValueError('class-agnostic NMS is not supported by combined_nms.') + if clip_window is not None: + tf.logging.warning( + 'clip_window is not supported by combined_nms unless it is' + ' [0. 0. 1. 1.] for each image.') + if additional_fields is not None: + tf.logging.warning('additional_fields is not supported by combined_nms.') + if parallel_iterations != 32: + tf.logging.warning('Number of batch items to be processed in parallel is' + ' not configurable by combined_nms.') + if max_classes_per_detection > 1: + tf.logging.warning( + 'max_classes_per_detection is not configurable by combined_nms.') + + with tf.name_scope(scope, 'CombinedNonMaxSuppression'): + (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, + batch_num_detections) = tf.image.combined_non_max_suppression( + boxes=boxes, + scores=scores, + max_output_size_per_class=max_size_per_class, + max_total_size=max_total_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh, + pad_per_class=use_static_shapes) + # Not supported by combined_non_max_suppression. + batch_nmsed_masks = None + # Not supported by combined_non_max_suppression. + batch_nmsed_additional_fields = None + return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, + batch_nmsed_masks, batch_nmsed_additional_fields, + batch_num_detections) + + q = shape_utils.get_dim_as_int(boxes.shape[2]) + num_classes = shape_utils.get_dim_as_int(scores.shape[2]) + if q != 1 and q != num_classes: + raise ValueError('third dimension of boxes must be either 1 or equal ' + 'to the third dimension of scores.') + if change_coordinate_frame and clip_window is None: + raise ValueError('if change_coordinate_frame is True, then a clip_window' + 'must be specified.') + original_masks = masks + + # Create ordered dictionary using the sorted keys from + # additional fields to ensure getting the same key value assignment + # in _single_image_nms_fn(). The dictionary is thus a sorted version of + # additional_fields. + if additional_fields is None: + ordered_additional_fields = collections.OrderedDict() + else: + ordered_additional_fields = collections.OrderedDict( + sorted(additional_fields.items(), key=lambda item: item[0])) + + with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'): + boxes_shape = boxes.shape + batch_size = shape_utils.get_dim_as_int(boxes_shape[0]) + num_anchors = shape_utils.get_dim_as_int(boxes_shape[1]) + + if batch_size is None: + batch_size = tf.shape(boxes)[0] + if num_anchors is None: + num_anchors = tf.shape(boxes)[1] + + # If num valid boxes aren't provided, create one and mark all boxes as + # valid. + if num_valid_boxes is None: + num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors + + # If masks aren't provided, create dummy masks so we can only have one copy + # of _single_image_nms_fn and discard the dummy masks after map_fn. + if masks is None: + masks_shape = tf.stack([batch_size, num_anchors, q, 1, 1]) + masks = tf.zeros(masks_shape) + + if clip_window is None: + clip_window = tf.stack([ + tf.reduce_min(boxes[:, :, :, 0]), + tf.reduce_min(boxes[:, :, :, 1]), + tf.reduce_max(boxes[:, :, :, 2]), + tf.reduce_max(boxes[:, :, :, 3]) + ]) + if clip_window.shape.ndims == 1: + clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1]) + + def _single_image_nms_fn(args): + """Runs NMS on a single image and returns padded output. + + Args: + args: A list of tensors consisting of the following: + per_image_boxes - A [num_anchors, q, 4] float32 tensor containing + detections. If `q` is 1 then same boxes are used for all classes + otherwise, if `q` is equal to number of classes, class-specific + boxes are used. + per_image_scores - A [num_anchors, num_classes] float32 tensor + containing the scores for each of the `num_anchors` detections. + per_image_masks - A [num_anchors, q, mask_height, mask_width] float32 + tensor containing box masks. `q` can be either number of classes + or 1 depending on whether a separate mask is predicted per class. + per_image_clip_window - A 1D float32 tensor of the form + [ymin, xmin, ymax, xmax] representing the window to clip the boxes + to. + per_image_additional_fields - (optional) A variable number of float32 + tensors each with size [num_anchors, ...]. + per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of + shape [batch_size] representing the number of valid boxes to be + considered for each image in the batch. This parameter allows for + ignoring zero paddings. + + Returns: + 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the + non-max suppressed boxes. + 'nmsed_scores': A [max_detections] float32 tensor containing the scores + for the boxes. + 'nmsed_classes': A [max_detections] float32 tensor containing the class + for boxes. + 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width] + float32 tensor containing masks for each selected box. This is set to + None if input `masks` is None. + 'nmsed_additional_fields': (optional) A variable number of float32 + tensors each with size [max_detections, ...] corresponding to the + input `per_image_additional_fields`. + 'num_detections': A [batch_size] int32 tensor indicating the number of + valid detections per batch item. Only the top num_detections[i] + entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The + rest of the entries are zero paddings. + """ + per_image_boxes = args[0] + per_image_scores = args[1] + per_image_masks = args[2] + per_image_clip_window = args[3] + # Make sure that the order of elements passed in args is aligned with + # the iteration order of ordered_additional_fields + per_image_additional_fields = { + key: value + for key, value in zip(ordered_additional_fields, args[4:-1]) + } + per_image_num_valid_boxes = args[-1] + if use_static_shapes: + total_proposals = tf.shape(per_image_scores) + per_image_scores = tf.where( + tf.less(tf.range(total_proposals[0]), per_image_num_valid_boxes), + per_image_scores, + tf.fill(total_proposals, np.finfo('float32').min)) + else: + per_image_boxes = tf.reshape( + tf.slice(per_image_boxes, 3 * [0], + tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4]) + per_image_scores = tf.reshape( + tf.slice(per_image_scores, [0, 0], + tf.stack([per_image_num_valid_boxes, -1])), + [-1, num_classes]) + per_image_masks = tf.reshape( + tf.slice(per_image_masks, 4 * [0], + tf.stack([per_image_num_valid_boxes, -1, -1, -1])), + [-1, q, shape_utils.get_dim_as_int(per_image_masks.shape[2]), + shape_utils.get_dim_as_int(per_image_masks.shape[3])]) + if per_image_additional_fields is not None: + for key, tensor in per_image_additional_fields.items(): + additional_field_shape = tensor.get_shape() + additional_field_dim = len(additional_field_shape) + per_image_additional_fields[key] = tf.reshape( + tf.slice( + per_image_additional_fields[key], + additional_field_dim * [0], + tf.stack([per_image_num_valid_boxes] + + (additional_field_dim - 1) * [-1])), [-1] + [ + shape_utils.get_dim_as_int(dim) + for dim in additional_field_shape[1:] + ]) + if use_class_agnostic_nms: + nmsed_boxlist, num_valid_nms_boxes = class_agnostic_non_max_suppression( + per_image_boxes, + per_image_scores, + score_thresh, + iou_thresh, + max_classes_per_detection, + max_total_size, + clip_window=per_image_clip_window, + change_coordinate_frame=change_coordinate_frame, + masks=per_image_masks, + pad_to_max_output_size=use_static_shapes, + use_partitioned_nms=use_partitioned_nms, + additional_fields=per_image_additional_fields, + soft_nms_sigma=soft_nms_sigma) + else: + nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression( + per_image_boxes, + per_image_scores, + score_thresh, + iou_thresh, + max_size_per_class, + max_total_size, + clip_window=per_image_clip_window, + change_coordinate_frame=change_coordinate_frame, + masks=per_image_masks, + pad_to_max_output_size=use_static_shapes, + use_partitioned_nms=use_partitioned_nms, + additional_fields=per_image_additional_fields, + soft_nms_sigma=soft_nms_sigma, + use_hard_nms=use_hard_nms, + use_cpu_nms=use_cpu_nms) + + if not use_static_shapes: + nmsed_boxlist = box_list_ops.pad_or_clip_box_list( + nmsed_boxlist, max_total_size) + num_detections = num_valid_nms_boxes + nmsed_boxes = nmsed_boxlist.get() + nmsed_scores = nmsed_boxlist.get_field(fields.BoxListFields.scores) + nmsed_classes = nmsed_boxlist.get_field(fields.BoxListFields.classes) + nmsed_masks = nmsed_boxlist.get_field(fields.BoxListFields.masks) + nmsed_additional_fields = [] + # Sorting is needed here to ensure that the values stored in + # nmsed_additional_fields are always kept in the same order + # across different execution runs. + for key in sorted(per_image_additional_fields.keys()): + nmsed_additional_fields.append(nmsed_boxlist.get_field(key)) + return ([nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] + + nmsed_additional_fields + [num_detections]) + + num_additional_fields = 0 + if ordered_additional_fields: + num_additional_fields = len(ordered_additional_fields) + num_nmsed_outputs = 4 + num_additional_fields + + if use_dynamic_map_fn: + map_fn = tf.map_fn + else: + map_fn = shape_utils.static_or_dynamic_map_fn + + batch_outputs = map_fn( + _single_image_nms_fn, + elems=([boxes, scores, masks, clip_window] + + list(ordered_additional_fields.values()) + [num_valid_boxes]), + dtype=(num_nmsed_outputs * [tf.float32] + [tf.int32]), + parallel_iterations=parallel_iterations) + + batch_nmsed_boxes = batch_outputs[0] + batch_nmsed_scores = batch_outputs[1] + batch_nmsed_classes = batch_outputs[2] + batch_nmsed_masks = batch_outputs[3] + batch_nmsed_values = batch_outputs[4:-1] + + batch_nmsed_additional_fields = {} + if num_additional_fields > 0: + # Sort the keys to ensure arranging elements in same order as + # in _single_image_nms_fn. + batch_nmsed_keys = list(ordered_additional_fields.keys()) + for i in range(len(batch_nmsed_keys)): + batch_nmsed_additional_fields[ + batch_nmsed_keys[i]] = batch_nmsed_values[i] + + batch_num_detections = batch_outputs[-1] + + if original_masks is None: + batch_nmsed_masks = None + + if not ordered_additional_fields: + batch_nmsed_additional_fields = None + + return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, + batch_nmsed_masks, batch_nmsed_additional_fields, + batch_num_detections) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/post_processing.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/post_processing.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e2708f0b034b471b7d6e5055589d6d3ba13b233 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/post_processing.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher.py new file mode 100644 index 0000000000000000000000000000000000000000..31e93eae80e25abde3166a56d212645ed4f17a5a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher.py @@ -0,0 +1,61 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Provides functions to prefetch tensors to feed into models.""" +import tensorflow.compat.v1 as tf + + +def prefetch(tensor_dict, capacity): + """Creates a prefetch queue for tensors. + + Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a + dequeue op that evaluates to a tensor_dict. This function is useful in + prefetching preprocessed tensors so that the data is readily available for + consumers. + + Example input pipeline when you don't need batching: + ---------------------------------------------------- + key, string_tensor = slim.parallel_reader.parallel_read(...) + tensor_dict = decoder.decode(string_tensor) + tensor_dict = preprocessor.preprocess(tensor_dict, ...) + prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) + tensor_dict = prefetch_queue.dequeue() + outputs = Model(tensor_dict) + ... + ---------------------------------------------------- + + For input pipelines with batching, refer to core/batcher.py + + Args: + tensor_dict: a dictionary of tensors to prefetch. + capacity: the size of the prefetch queue. + + Returns: + a FIFO prefetcher queue + """ + names = list(tensor_dict.keys()) + dtypes = [t.dtype for t in tensor_dict.values()] + shapes = [t.get_shape() for t in tensor_dict.values()] + prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, + shapes=shapes, + names=names, + name='prefetch_queue') + enqueue_op = prefetch_queue.enqueue(tensor_dict) + tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( + prefetch_queue, [enqueue_op])) + tf.summary.scalar( + 'queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), + tf.cast(prefetch_queue.size(), dtype=tf.float32) * (1. / capacity)) + return prefetch_queue diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50c31e010de1d6b658351759cc2b7f0e4e550c93 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..95e9155e5e38c762cee915389f55f0cc69334ae9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/prefetcher_tf1_test.py @@ -0,0 +1,109 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.prefetcher.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.core import prefetcher +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class PrefetcherTest(tf.test.TestCase): + """Test class for prefetcher.""" + + def test_prefetch_tensors_with_fully_defined_shapes(self): + with self.test_session() as sess: + batch_size = 10 + image_size = 32 + num_batches = 5 + examples = tf.Variable(tf.constant(0, dtype=tf.int64)) + counter = examples.count_up_to(num_batches) + image = tf.random_normal([batch_size, image_size, + image_size, 3], + dtype=tf.float32, + name='images') + label = tf.random_uniform([batch_size, 1], 0, 10, + dtype=tf.int32, name='labels') + + prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter, + 'image': image, + 'label': label}, + capacity=100) + tensor_dict = prefetch_queue.dequeue() + + self.assertAllEqual(tensor_dict['image'].get_shape().as_list(), + [batch_size, image_size, image_size, 3]) + self.assertAllEqual(tensor_dict['label'].get_shape().as_list(), + [batch_size, 1]) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + for _ in range(num_batches): + results = sess.run(tensor_dict) + self.assertEquals(results['image'].shape, + (batch_size, image_size, image_size, 3)) + self.assertEquals(results['label'].shape, (batch_size, 1)) + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(tensor_dict) + + def test_prefetch_tensors_with_partially_defined_shapes(self): + with self.test_session() as sess: + batch_size = 10 + image_size = 32 + num_batches = 5 + examples = tf.Variable(tf.constant(0, dtype=tf.int64)) + counter = examples.count_up_to(num_batches) + image = tf.random_normal([batch_size, + tf.Variable(image_size), + tf.Variable(image_size), 3], + dtype=tf.float32, + name='image') + image.set_shape([batch_size, None, None, 3]) + label = tf.random_uniform([batch_size, tf.Variable(1)], 0, + 10, dtype=tf.int32, name='label') + label.set_shape([batch_size, None]) + + prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter, + 'image': image, + 'label': label}, + capacity=100) + tensor_dict = prefetch_queue.dequeue() + + self.assertAllEqual(tensor_dict['image'].get_shape().as_list(), + [batch_size, None, None, 3]) + self.assertAllEqual(tensor_dict['label'].get_shape().as_list(), + [batch_size, None]) + + tf.initialize_all_variables().run() + with slim.queues.QueueRunners(sess): + for _ in range(num_batches): + results = sess.run(tensor_dict) + self.assertEquals(results['image'].shape, + (batch_size, image_size, image_size, 3)) + self.assertEquals(results['label'].shape, (batch_size, 1)) + with self.assertRaises(tf.errors.OutOfRangeError): + sess.run(tensor_dict) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..a1912edcd608c569f243697b73f99cbf933c976c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor.py @@ -0,0 +1,4607 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preprocess images and bounding boxes for detection. + +We perform two sets of operations in preprocessing stage: +(a) operations that are applied to both training and testing data, +(b) operations that are applied only to training data for the purpose of + data augmentation. + +A preprocessing function receives a set of inputs, +e.g. an image and bounding boxes, +performs an operation on them, and returns them. +Some examples are: randomly cropping the image, randomly mirroring the image, + randomly changing the brightness, contrast, hue and + randomly jittering the bounding boxes. + +The preprocess function receives a tensor_dict which is a dictionary that maps +different field names to their tensors. For example, +tensor_dict[fields.InputDataFields.image] holds the image tensor. +The image is a rank 4 tensor: [1, height, width, channels] with +dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where +in each row there is a box with [ymin xmin ymax xmax]. +Boxes are in normalized coordinates meaning +their coordinate values range in [0, 1] + +To preprocess multiple images with the same operations in cases where +nondeterministic operations are used, a preprocessor_cache.PreprocessorCache +object can be passed into the preprocess function or individual operations. +All nondeterministic operations except random_jitter_boxes support caching. +E.g. +Let tensor_dict{1,2,3,4,5} be copies of the same inputs. +Let preprocess_options contain nondeterministic operation(s) excluding +random_jitter_boxes. + +cache1 = preprocessor_cache.PreprocessorCache() +cache2 = preprocessor_cache.PreprocessorCache() +a = preprocess(tensor_dict1, preprocess_options, preprocess_vars_cache=cache1) +b = preprocess(tensor_dict2, preprocess_options, preprocess_vars_cache=cache1) +c = preprocess(tensor_dict3, preprocess_options, preprocess_vars_cache=cache2) +d = preprocess(tensor_dict4, preprocess_options, preprocess_vars_cache=cache2) +e = preprocess(tensor_dict5, preprocess_options) + +Then correspondings tensors of object pairs (a,b) and (c,d) +are guaranteed to be equal element-wise, but the equality of any other object +pair cannot be determined. + +Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing +functions receive a rank 3 tensor for processing the image. Thus, inside the +preprocess function we squeeze the image to become a rank 3 tensor and then +we pass it to the functions. At the end of the preprocess we expand the image +back to rank 4. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import inspect +import sys + +import six +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from tensorflow.python.ops import control_flow_ops +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import densepose_ops +from object_detection.core import keypoint_ops +from object_detection.core import preprocessor_cache +from object_detection.core import standard_fields as fields +from object_detection.utils import autoaugment_utils +from object_detection.utils import ops +from object_detection.utils import patch_ops +from object_detection.utils import shape_utils + + +def _apply_with_random_selector(x, + func, + num_cases, + preprocess_vars_cache=None, + key=''): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + If both preprocess_vars_cache AND key are the same between two calls, sel will + be the same value in both calls. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + key: variable identifier for preprocess_vars_cache. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + generator_func = functools.partial( + tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) + rand_sel = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.SELECTOR, + preprocess_vars_cache, key) + + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([func( + control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case) + for case in range(num_cases)])[0] + + +def _apply_with_random_selector_tuples(x, + func, + num_cases, + preprocess_vars_cache=None, + key=''): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + If both preprocess_vars_cache AND key are the same between two calls, sel will + be the same value in both calls. + + Args: + x: A tuple of input tensors. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + key: variable identifier for preprocess_vars_cache. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + num_inputs = len(x) + generator_func = functools.partial( + tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) + rand_sel = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES, + preprocess_vars_cache, key) + + # Pass the real x only to one of the func calls. + tuples = [list() for t in x] + for case in range(num_cases): + new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x] + output = func(tuple(new_x), case) + for j in range(num_inputs): + tuples[j].append(output[j]) + + for i in range(num_inputs): + tuples[i] = control_flow_ops.merge(tuples[i])[0] + return tuple(tuples) + + +def _get_or_create_preprocess_rand_vars(generator_func, + function_id, + preprocess_vars_cache, + key=''): + """Returns a tensor stored in preprocess_vars_cache or using generator_func. + + If the tensor was previously generated and appears in the PreprocessorCache, + the previously generated tensor will be returned. Otherwise, a new tensor + is generated using generator_func and stored in the cache. + + Args: + generator_func: A 0-argument function that generates a tensor. + function_id: identifier for the preprocessing function used. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + key: identifier for the variable stored. + Returns: + The generated tensor. + """ + if preprocess_vars_cache is not None: + var = preprocess_vars_cache.get(function_id, key) + if var is None: + var = generator_func() + preprocess_vars_cache.update(function_id, key, var) + else: + var = generator_func() + return var + + +def _random_integer(minval, maxval, seed): + """Returns a random 0-D tensor between minval and maxval. + + Args: + minval: minimum value of the random tensor. + maxval: maximum value of the random tensor. + seed: random seed. + + Returns: + A random 0-D tensor between minval and maxval. + """ + return tf.random_uniform( + [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed) + + +# TODO(mttang): This method is needed because the current +# tf.image.rgb_to_grayscale method does not support quantization. Replace with +# tf.image.rgb_to_grayscale after quantization support is added. +def _rgb_to_grayscale(images, name=None): + """Converts one or more images from RGB to Grayscale. + + Outputs a tensor of the same `DType` and rank as `images`. The size of the + last dimension of the output is 1, containing the Grayscale value of the + pixels. + + Args: + images: The RGB tensor to convert. Last dimension must have size 3 and + should contain RGB values. + name: A name for the operation (optional). + + Returns: + The converted grayscale image(s). + """ + with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name: + images = tf.convert_to_tensor(images, name='images') + # Remember original dtype to so we can convert back if needed + orig_dtype = images.dtype + flt_image = tf.image.convert_image_dtype(images, tf.float32) + + # Reference for converting between RGB and grayscale. + # https://en.wikipedia.org/wiki/Luma_%28video%29 + rgb_weights = [0.2989, 0.5870, 0.1140] + rank_1 = tf.expand_dims(tf.rank(images) - 1, 0) + gray_float = tf.reduce_sum( + flt_image * rgb_weights, rank_1, keep_dims=True) + gray_float.set_shape(images.get_shape()[:-1].concatenate([1])) + return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name) + + +def normalize_image(image, original_minval, original_maxval, target_minval, + target_maxval): + """Normalizes pixel values in the image. + + Moves the pixel values from the current [original_minval, original_maxval] + range to a the [target_minval, target_maxval] range. + + Args: + image: rank 3 float32 tensor containing 1 + image -> [height, width, channels]. + original_minval: current image minimum value. + original_maxval: current image maximum value. + target_minval: target image minimum value. + target_maxval: target image maximum value. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('NormalizeImage', values=[image]): + original_minval = float(original_minval) + original_maxval = float(original_maxval) + target_minval = float(target_minval) + target_maxval = float(target_maxval) + image = tf.cast(image, dtype=tf.float32) + image = tf.subtract(image, original_minval) + image = tf.multiply(image, (target_maxval - target_minval) / + (original_maxval - original_minval)) + image = tf.add(image, target_minval) + return image + + +def retain_boxes_above_threshold(boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + threshold=0.0): + """Retains boxes whose label weight is above a given threshold. + + If the label weight for a box is missing (represented by NaN), the box is + retained. The boxes that don't pass the threshold will not appear in the + returned tensor. + + Args: + boxes: float32 tensor of shape [num_instance, 4] representing boxes + location in normalized coordinates. + labels: rank 1 int32 tensor of shape [num_instance] containing the object + classes. + label_weights: float32 tensor of shape [num_instance] representing the + weight for each box. + label_confidences: float32 tensor of shape [num_instance] representing the + confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks are of + the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + threshold: scalar python float. + + Returns: + retained_boxes: [num_retained_instance, 4] + retianed_labels: [num_retained_instance] + retained_label_weights: [num_retained_instance] + + If multiclass_scores, masks, or keypoints are not None, the function also + returns: + + retained_multiclass_scores: [num_retained_instance, num_classes] + retained_masks: [num_retained_instance, height, width] + retained_keypoints: [num_retained_instance, num_keypoints, 2] + """ + with tf.name_scope('RetainBoxesAboveThreshold', + values=[boxes, labels, label_weights]): + indices = tf.where( + tf.logical_or(label_weights > threshold, tf.is_nan(label_weights))) + indices = tf.squeeze(indices, axis=1) + retained_boxes = tf.gather(boxes, indices) + retained_labels = tf.gather(labels, indices) + retained_label_weights = tf.gather(label_weights, indices) + result = [retained_boxes, retained_labels, retained_label_weights] + + if label_confidences is not None: + retained_label_confidences = tf.gather(label_confidences, indices) + result.append(retained_label_confidences) + + if multiclass_scores is not None: + retained_multiclass_scores = tf.gather(multiclass_scores, indices) + result.append(retained_multiclass_scores) + + if masks is not None: + retained_masks = tf.gather(masks, indices) + result.append(retained_masks) + + if keypoints is not None: + retained_keypoints = tf.gather(keypoints, indices) + result.append(retained_keypoints) + + return result + + +def drop_label_probabilistically(boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + dropped_label=None, + drop_probability=0.0, + seed=None): + """Drops boxes of a certain label with probability drop_probability. + + Boxes of the label dropped_label will not appear in the returned tensor. + + Args: + boxes: float32 tensor of shape [num_instance, 4] representing boxes + location in normalized coordinates. + labels: rank 1 int32 tensor of shape [num_instance] containing the object + classes. + label_weights: float32 tensor of shape [num_instance] representing the + weight for each box. + label_confidences: float32 tensor of shape [num_instance] representing the + confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks are of + the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + dropped_label: int32 id of label to drop. + drop_probability: float32 probability of dropping a label. + seed: random seed. + + Returns: + retained_boxes: [num_retained_instance, 4] + retianed_labels: [num_retained_instance] + retained_label_weights: [num_retained_instance] + + If multiclass_scores, masks, or keypoints are not None, the function also + returns: + + retained_multiclass_scores: [num_retained_instance, num_classes] + retained_masks: [num_retained_instance, height, width] + retained_keypoints: [num_retained_instance, num_keypoints, 2] + """ + with tf.name_scope('DropLabelProbabilistically', + values=[boxes, labels]): + indices = tf.where( + tf.logical_or( + tf.random_uniform(tf.shape(labels), seed=seed) > drop_probability, + tf.not_equal(labels, dropped_label))) + indices = tf.squeeze(indices, axis=1) + + retained_boxes = tf.gather(boxes, indices) + retained_labels = tf.gather(labels, indices) + retained_label_weights = tf.gather(label_weights, indices) + result = [retained_boxes, retained_labels, retained_label_weights] + + if label_confidences is not None: + retained_label_confidences = tf.gather(label_confidences, indices) + result.append(retained_label_confidences) + + if multiclass_scores is not None: + retained_multiclass_scores = tf.gather(multiclass_scores, indices) + result.append(retained_multiclass_scores) + + if masks is not None: + retained_masks = tf.gather(masks, indices) + result.append(retained_masks) + + if keypoints is not None: + retained_keypoints = tf.gather(keypoints, indices) + result.append(retained_keypoints) + + return result + + +def remap_labels(labels, + original_labels=None, + new_label=None): + """Remaps labels that have an id in original_labels to new_label. + + Args: + labels: rank 1 int32 tensor of shape [num_instance] containing the object + classes. + original_labels: int list of original labels that should be mapped from. + new_label: int label to map to + Returns: + Remapped labels + """ + new_labels = labels + for original_label in original_labels: + change = tf.where( + tf.equal(new_labels, original_label), + tf.add(tf.zeros_like(new_labels), new_label - original_label), + tf.zeros_like(new_labels)) + new_labels = tf.add( + new_labels, + change) + new_labels = tf.reshape(new_labels, tf.shape(labels)) + return new_labels + + +def _flip_boxes_left_right(boxes): + """Left-right flip the boxes. + + Args: + boxes: Float32 tensor containing the bounding boxes -> [..., 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each last dimension is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Flipped boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=-1) + flipped_xmin = tf.subtract(1.0, xmax) + flipped_xmax = tf.subtract(1.0, xmin) + flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1) + return flipped_boxes + + +def _flip_boxes_up_down(boxes): + """Up-down flip the boxes. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Flipped boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) + flipped_ymin = tf.subtract(1.0, ymax) + flipped_ymax = tf.subtract(1.0, ymin) + flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1) + return flipped_boxes + + +def _rot90_boxes(boxes): + """Rotate boxes counter-clockwise by 90 degrees. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + + Returns: + Rotated boxes. + """ + ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) + rotated_ymin = tf.subtract(1.0, xmax) + rotated_ymax = tf.subtract(1.0, xmin) + rotated_xmin = ymin + rotated_xmax = ymax + rotated_boxes = tf.concat( + [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1) + return rotated_boxes + + +def _flip_masks_left_right(masks): + """Left-right flip masks. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + flipped masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + return masks[:, :, ::-1] + + +def _flip_masks_up_down(masks): + """Up-down flip masks. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + flipped masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + return masks[:, ::-1, :] + + +def _rot90_masks(masks): + """Rotate masks counter-clockwise by 90 degrees. + + Args: + masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + + Returns: + rotated masks: rank 3 float32 tensor with shape + [num_instances, height, width] representing instance masks. + """ + masks = tf.transpose(masks, [0, 2, 1]) + return masks[:, ::-1, :] + + +def random_horizontal_flip(image, + boxes=None, + masks=None, + keypoints=None, + keypoint_visibilities=None, + densepose_part_ids=None, + densepose_surface_coords=None, + keypoint_flip_permutation=None, + probability=0.5, + seed=None, + preprocess_vars_cache=None): + """Randomly flips the image and detections horizontally. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_visibilities: (optional) rank 2 bool tensor with shape + [num_instances, num_keypoints]. + densepose_part_ids: (optional) rank 2 int32 tensor with shape + [num_instances, num_points] holding the part id for each + sampled point. These part_ids are 0-indexed, where the + first non-background part has index 0. + densepose_surface_coords: (optional) rank 3 float32 tensor with shape + [num_instances, num_points, 4]. The DensePose + coordinates are of the form (y, x, v, u) where + (y, x) are the normalized image coordinates for a + sampled point, and (v, u) is the surface + coordinate for the part. + keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. + probability: the probability of performing this augmentation. + seed: random seed + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, keypoints, keypoint_visibilities, + keypoint_flip_permutation, densepose_part_ids, or densepose_surface_coords + are not None,the function also returns the following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + keypoint_visibilities: rank 2 bool tensor with shape + [num_instances, num_keypoints]. + densepose_part_ids: rank 2 int32 tensor with shape + [num_instances, num_points]. + densepose_surface_coords: rank 3 float32 tensor with shape + [num_instances, num_points, 4]. + + Raises: + ValueError: if keypoints are provided but keypoint_flip_permutation is not. + ValueError: if either densepose_part_ids or densepose_surface_coords is + not None, but both are not None. + """ + + def _flip_image(image): + # flip image + image_flipped = tf.image.flip_left_right(image) + return image_flipped + + if keypoints is not None and keypoint_flip_permutation is None: + raise ValueError( + 'keypoints are provided but keypoints_flip_permutation is not provided') + + if ((densepose_part_ids is not None and densepose_surface_coords is None) or + (densepose_part_ids is None and densepose_surface_coords is not None)): + raise ValueError( + 'Must provide both `densepose_part_ids` and `densepose_surface_coords`') + + with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]): + result = [] + # random variable defining whether to do flip or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_flip_random = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP, + preprocess_vars_cache) + do_a_flip_random = tf.less(do_a_flip_random, probability) + + # flip image + image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes), + lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks), + lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None and keypoint_flip_permutation is not None: + permutation = keypoint_flip_permutation + keypoints = tf.cond( + do_a_flip_random, + lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation), + lambda: keypoints) + result.append(keypoints) + + # flip keypoint visibilities + if (keypoint_visibilities is not None and + keypoint_flip_permutation is not None): + kpt_flip_perm = keypoint_flip_permutation + keypoint_visibilities = tf.cond( + do_a_flip_random, + lambda: tf.gather(keypoint_visibilities, kpt_flip_perm, axis=1), + lambda: keypoint_visibilities) + result.append(keypoint_visibilities) + + # flip DensePose parts and coordinates + if densepose_part_ids is not None: + flip_densepose_fn = functools.partial( + densepose_ops.flip_horizontal, densepose_part_ids, + densepose_surface_coords) + densepose_tensors = tf.cond( + do_a_flip_random, + flip_densepose_fn, + lambda: (densepose_part_ids, densepose_surface_coords)) + result.extend(densepose_tensors) + + return tuple(result) + + +def random_vertical_flip(image, + boxes=None, + masks=None, + keypoints=None, + keypoint_flip_permutation=None, + probability=0.5, + seed=None, + preprocess_vars_cache=None): + """Randomly flips the image and detections vertically. + + The probability of flipping the image is 50%. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. + probability: the probability of performing this augmentation. + seed: random seed + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, keypoints, and keypoint_flip_permutation are not None, + the function also returns the following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + + Raises: + ValueError: if keypoints are provided but keypoint_flip_permutation is not. + """ + + def _flip_image(image): + # flip image + image_flipped = tf.image.flip_up_down(image) + return image_flipped + + if keypoints is not None and keypoint_flip_permutation is None: + raise ValueError( + 'keypoints are provided but keypoints_flip_permutation is not provided') + + with tf.name_scope('RandomVerticalFlip', values=[image, boxes]): + result = [] + # random variable defining whether to do flip or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_flip_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP, + preprocess_vars_cache) + do_a_flip_random = tf.less(do_a_flip_random, probability) + + # flip image + image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes), + lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks), + lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None and keypoint_flip_permutation is not None: + permutation = keypoint_flip_permutation + keypoints = tf.cond( + do_a_flip_random, + lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation), + lambda: keypoints) + result.append(keypoints) + + return tuple(result) + + +def random_rotation90(image, + boxes=None, + masks=None, + keypoints=None, + keypoint_rot_permutation=None, + probability=0.5, + seed=None, + preprocess_vars_cache=None): + """Randomly rotates the image and detections 90 degrees counter-clockwise. + + The probability of rotating the image is 50%. This can be combined with + random_horizontal_flip and random_vertical_flip to produce an output with a + uniform distribution of the eight possible 90 degree rotation / reflection + combinations. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels]. + boxes: (optional) rank 2 float32 tensor with shape [N, 4] + containing the bounding boxes. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_rot_permutation: rank 1 int32 tensor containing the keypoint flip + permutation. + probability: the probability of performing this augmentation. + seed: random seed + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + If boxes, masks, and keypoints, are not None, + the function also returns the following tensors. + + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + + def _rot90_image(image): + # flip image + image_rotated = tf.image.rot90(image) + return image_rotated + + with tf.name_scope('RandomRotation90', values=[image, boxes]): + result = [] + + # random variable defining whether to rotate by 90 degrees or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_rot90_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.ROTATION90, + preprocess_vars_cache) + do_a_rot90_random = tf.less(do_a_rot90_random, probability) + + # flip image + image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image), + lambda: image) + result.append(image) + + # flip boxes + if boxes is not None: + boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes), + lambda: boxes) + result.append(boxes) + + # flip masks + if masks is not None: + masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks), + lambda: masks) + result.append(masks) + + # flip keypoints + if keypoints is not None: + keypoints = tf.cond( + do_a_rot90_random, + lambda: keypoint_ops.rot90(keypoints, keypoint_rot_permutation), + lambda: keypoints) + result.append(keypoints) + + return tuple(result) + + +def random_pixel_value_scale(image, + minval=0.9, + maxval=1.1, + seed=None, + preprocess_vars_cache=None): + """Scales each value in the pixels of the image. + + This function scales each pixel independent of the other ones. + For each value in image tensor, draws a random number between + minval and maxval and multiples the values with them. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + minval: lower ratio of scaling pixel values. + maxval: upper ratio of scaling pixel values. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomPixelValueScale', values=[image]): + generator_func = functools.partial( + tf.random_uniform, tf.shape(image), + minval=minval, maxval=maxval, + dtype=tf.float32, seed=seed) + color_coef = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE, + preprocess_vars_cache) + + image = tf.multiply(image, color_coef) + image = tf.clip_by_value(image, 0.0, 255.0) + + return image + + +def random_image_scale(image, + masks=None, + min_scale_ratio=0.5, + max_scale_ratio=2.0, + seed=None, + preprocess_vars_cache=None): + """Scales the image size. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels]. + masks: (optional) rank 3 float32 tensor containing masks with + size [height, width, num_masks]. The value is set to None if there are no + masks. + min_scale_ratio: minimum scaling ratio. + max_scale_ratio: maximum scaling ratio. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + masks: If masks is not none, resized masks which are the same rank as input + masks will be returned. + """ + with tf.name_scope('RandomImageScale', values=[image]): + result = [] + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + generator_func = functools.partial( + tf.random_uniform, [], + minval=min_scale_ratio, maxval=max_scale_ratio, + dtype=tf.float32, seed=seed) + size_coef = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE, + preprocess_vars_cache) + + image_newysize = tf.cast( + tf.multiply(tf.cast(image_height, dtype=tf.float32), size_coef), + dtype=tf.int32) + image_newxsize = tf.cast( + tf.multiply(tf.cast(image_width, dtype=tf.float32), size_coef), + dtype=tf.int32) + image = tf.image.resize_images( + image, [image_newysize, image_newxsize], align_corners=True) + result.append(image) + if masks is not None: + masks = tf.image.resize_images( + masks, [image_newysize, image_newxsize], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True) + result.append(masks) + return tuple(result) + + +def _augment_only_rgb_channels(image, augment_function): + """Augments only the RGB slice of an image with additional channels.""" + rgb_slice = image[:, :, :3] + augmented_rgb_slice = augment_function(rgb_slice) + image = tf.concat([augmented_rgb_slice, image[:, :, 3:]], -1) + return image + + +def random_rgb_to_gray(image, + probability=0.1, + seed=None, + preprocess_vars_cache=None): + """Changes the image from RGB to Grayscale with the given probability. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + probability: the probability of returning a grayscale image. + The probability should be a number between [0, 1]. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + def _image_to_gray(image): + image_gray1 = _rgb_to_grayscale(image) + image_gray3 = tf.image.grayscale_to_rgb(image_gray1) + return image_gray3 + + with tf.name_scope('RandomRGBtoGray', values=[image]): + # random variable defining whether to change to grayscale or not + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_gray_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY, + preprocess_vars_cache) + + image = tf.cond( + tf.greater(do_gray_random, probability), lambda: image, + lambda: _augment_only_rgb_channels(image, _image_to_gray)) + + return image + + +def random_adjust_brightness(image, + max_delta=0.2, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts brightness. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + max_delta: how much to change the brightness. A value between [0, 1). + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + boxes: boxes which is the same shape as input boxes. + """ + with tf.name_scope('RandomAdjustBrightness', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + -max_delta, max_delta, seed=seed) + delta = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS, + preprocess_vars_cache) + + def _adjust_brightness(image): + image = tf.image.adjust_brightness(image / 255, delta) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + + image = _augment_only_rgb_channels(image, _adjust_brightness) + return image + + +def random_adjust_contrast(image, + min_delta=0.8, + max_delta=1.25, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts contrast. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + min_delta: see max_delta. + max_delta: how much to change the contrast. Contrast will change with a + value between min_delta and max_delta. This value will be + multiplied to the current contrast of the image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomAdjustContrast', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + min_delta, max_delta, seed=seed) + contrast_factor = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST, + preprocess_vars_cache) + + def _adjust_contrast(image): + image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + image = _augment_only_rgb_channels(image, _adjust_contrast) + return image + + +def random_adjust_hue(image, + max_delta=0.02, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts hue. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + max_delta: change hue randomly with a value between 0 and max_delta. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomAdjustHue', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + -max_delta, max_delta, seed=seed) + delta = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE, + preprocess_vars_cache) + def _adjust_hue(image): + image = tf.image.adjust_hue(image / 255, delta) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + image = _augment_only_rgb_channels(image, _adjust_hue) + return image + + +def random_adjust_saturation(image, + min_delta=0.8, + max_delta=1.25, + seed=None, + preprocess_vars_cache=None): + """Randomly adjusts saturation. + + Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + min_delta: see max_delta. + max_delta: how much to change the saturation. Saturation will change with a + value between min_delta and max_delta. This value will be + multiplied to the current saturation of the image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + """ + with tf.name_scope('RandomAdjustSaturation', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], + min_delta, max_delta, seed=seed) + saturation_factor = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADJUST_SATURATION, + preprocess_vars_cache) + def _adjust_saturation(image): + image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255 + image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) + return image + image = _augment_only_rgb_channels(image, _adjust_saturation) + return image + + +def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None): + """Randomly distorts color. + + Randomly distorts color using a combination of brightness, hue, contrast and + saturation changes. Makes sure the output image is still between 0 and 255. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + color_ordering: Python int, a type of distortion (valid values: 0, 1). + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same shape as input image. + + Raises: + ValueError: if color_ordering is not in {0, 1}. + """ + with tf.name_scope('RandomDistortColor', values=[image]): + if color_ordering == 0: + image = random_adjust_brightness( + image, max_delta=32. / 255., + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_saturation( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_hue( + image, max_delta=0.2, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_contrast( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + + elif color_ordering == 1: + image = random_adjust_brightness( + image, max_delta=32. / 255., + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_contrast( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_saturation( + image, min_delta=0.5, max_delta=1.5, + preprocess_vars_cache=preprocess_vars_cache) + image = random_adjust_hue( + image, max_delta=0.2, + preprocess_vars_cache=preprocess_vars_cache) + else: + raise ValueError('color_ordering must be in {0, 1}') + return image + + +def random_jitter_boxes(boxes, ratio=0.05, seed=None): + """Randomly jitter boxes in image. + + Args: + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + ratio: The ratio of the box width and height that the corners can jitter. + For example if the width is 100 pixels and ratio is 0.05, + the corners can jitter up to 5 pixels in the x direction. + seed: random seed. + + Returns: + boxes: boxes which is the same shape as input boxes. + """ + def random_jitter_box(box, ratio, seed): + """Randomly jitter box. + + Args: + box: bounding box [1, 1, 4]. + ratio: max ratio between jittered box and original box, + a number between [0, 0.5]. + seed: random seed. + + Returns: + jittered_box: jittered box. + """ + rand_numbers = tf.random_uniform( + [1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed) + box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1]) + box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0]) + hw_coefs = tf.stack([box_height, box_width, box_height, box_width]) + hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers) + jittered_box = tf.add(box, hw_rand_coefs) + jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0) + return jittered_box + + with tf.name_scope('RandomJitterBoxes', values=[boxes]): + # boxes are [N, 4]. Lets first make them [N, 1, 1, 4] + boxes_shape = tf.shape(boxes) + boxes = tf.expand_dims(boxes, 1) + boxes = tf.expand_dims(boxes, 2) + + distorted_boxes = tf.map_fn( + lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32) + + distorted_boxes = tf.reshape(distorted_boxes, boxes_shape) + + return distorted_boxes + + +def _strict_random_crop_image(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + keypoint_visibilities=None, + densepose_num_points=None, + densepose_part_ids=None, + densepose_surface_coords=None, + min_object_covered=1.0, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.1, 1.0), + overlap_thresh=0.3, + clip_boxes=True, + preprocess_vars_cache=None): + """Performs random crop. + + Note: Keypoint coordinates that are outside the crop will be set to NaN, which + is consistent with the original keypoint encoding for non-existing keypoints. + This function always crops the image and is supposed to be used by + `random_crop_image` function which sometimes returns the image unchanged. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes with shape + [num_instances, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_visibilities: (optional) rank 2 bool tensor with shape + [num_instances, num_keypoints]. + densepose_num_points: (optional) rank 1 int32 tensor with shape + [num_instances] with the number of sampled points per + instance. + densepose_part_ids: (optional) rank 2 int32 tensor with shape + [num_instances, num_points] holding the part id for each + sampled point. These part_ids are 0-indexed, where the + first non-background part has index 0. + densepose_surface_coords: (optional) rank 3 float32 tensor with shape + [num_instances, num_points, 4]. The DensePose + coordinates are of the form (y, x, v, u) where + (y, x) are the normalized image coordinates for a + sampled point, and (v, u) is the surface + coordinate for the part. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If label_weights, multiclass_scores, masks, keypoints, + keypoint_visibilities, densepose_num_points, densepose_part_ids, or + densepose_surface_coords is not None, the function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + keypoint_visibilities: rank 2 bool tensor with shape + [num_instances, num_keypoints] + densepose_num_points: rank 1 int32 tensor with shape [num_instances]. + densepose_part_ids: rank 2 int32 tensor with shape + [num_instances, num_points]. + densepose_surface_coords: rank 3 float32 tensor with shape + [num_instances, num_points, 4]. + + Raises: + ValueError: If some but not all of the DensePose tensors are provided. + """ + with tf.name_scope('RandomCropImage', values=[image, boxes]): + densepose_tensors = [densepose_num_points, densepose_part_ids, + densepose_surface_coords] + if (any(t is not None for t in densepose_tensors) and + not all(t is not None for t in densepose_tensors)): + raise ValueError('If cropping DensePose labels, must provide ' + '`densepose_num_points`, `densepose_part_ids`, and ' + '`densepose_surface_coords`') + image_shape = tf.shape(image) + + # boxes are [N, 4]. Lets first make them [N, 1, 4]. + boxes_expanded = tf.expand_dims( + tf.clip_by_value( + boxes, clip_value_min=0.0, clip_value_max=1.0), 1) + + generator_func = functools.partial( + tf.image.sample_distorted_bounding_box, + image_shape, + bounding_boxes=boxes_expanded, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=100, + use_image_if_no_bounding_boxes=True) + + # for ssd cropping, each value of min_object_covered has its own + # cached random variable + sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE, + preprocess_vars_cache, key=min_object_covered) + + im_box_begin, im_box_size, im_box = sample_distorted_bounding_box + im_box_end = im_box_begin + im_box_size + new_image = image[im_box_begin[0]:im_box_end[0], + im_box_begin[1]:im_box_end[1], :] + new_image.set_shape([None, None, image.get_shape()[2]]) + + # [1, 4] + im_box_rank2 = tf.squeeze(im_box, axis=[0]) + # [4] + im_box_rank1 = tf.squeeze(im_box) + + boxlist = box_list.BoxList(boxes) + boxlist.add_field('labels', labels) + + if label_weights is not None: + boxlist.add_field('label_weights', label_weights) + + if label_confidences is not None: + boxlist.add_field('label_confidences', label_confidences) + + if multiclass_scores is not None: + boxlist.add_field('multiclass_scores', multiclass_scores) + + im_boxlist = box_list.BoxList(im_box_rank2) + + # remove boxes that are outside cropped image + boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window( + boxlist, im_box_rank1) + + # remove boxes that are outside image + overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( + boxlist, im_boxlist, overlap_thresh) + + # change the coordinate of the remaining boxes + new_labels = overlapping_boxlist.get_field('labels') + new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, + im_box_rank1) + new_boxes = new_boxlist.get() + if clip_boxes: + new_boxes = tf.clip_by_value( + new_boxes, clip_value_min=0.0, clip_value_max=1.0) + + result = [new_image, new_boxes, new_labels] + + if label_weights is not None: + new_label_weights = overlapping_boxlist.get_field('label_weights') + result.append(new_label_weights) + + if label_confidences is not None: + new_label_confidences = overlapping_boxlist.get_field('label_confidences') + result.append(new_label_confidences) + + if multiclass_scores is not None: + new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') + result.append(new_multiclass_scores) + + if masks is not None: + masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids) + masks_of_boxes_completely_inside_window = tf.gather( + masks_of_boxes_inside_window, keep_ids) + new_masks = masks_of_boxes_completely_inside_window[:, im_box_begin[ + 0]:im_box_end[0], im_box_begin[1]:im_box_end[1]] + result.append(new_masks) + + if keypoints is not None: + keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids) + keypoints_of_boxes_completely_inside_window = tf.gather( + keypoints_of_boxes_inside_window, keep_ids) + new_keypoints = keypoint_ops.change_coordinate_frame( + keypoints_of_boxes_completely_inside_window, im_box_rank1) + if clip_boxes: + new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, + [0.0, 0.0, 1.0, 1.0]) + result.append(new_keypoints) + + if keypoint_visibilities is not None: + kpt_vis_of_boxes_inside_window = tf.gather(keypoint_visibilities, + inside_window_ids) + kpt_vis_of_boxes_completely_inside_window = tf.gather( + kpt_vis_of_boxes_inside_window, keep_ids) + if clip_boxes: + # Set any keypoints with NaN coordinates to invisible. + new_kpt_visibilities = keypoint_ops.set_keypoint_visibilities( + new_keypoints, kpt_vis_of_boxes_completely_inside_window) + result.append(new_kpt_visibilities) + + if densepose_num_points is not None: + filtered_dp_tensors = [] + for dp_tensor in densepose_tensors: + dp_tensor_inside_window = tf.gather(dp_tensor, inside_window_ids) + dp_tensor_completely_inside_window = tf.gather(dp_tensor_inside_window, + keep_ids) + filtered_dp_tensors.append(dp_tensor_completely_inside_window) + new_dp_num_points = filtered_dp_tensors[0] + new_dp_point_ids = filtered_dp_tensors[1] + new_dp_surf_coords = densepose_ops.change_coordinate_frame( + filtered_dp_tensors[2], im_box_rank1) + if clip_boxes: + new_dp_num_points, new_dp_point_ids, new_dp_surf_coords = ( + densepose_ops.prune_outside_window( + new_dp_num_points, new_dp_point_ids, new_dp_surf_coords, + window=[0.0, 0.0, 1.0, 1.0])) + result.extend([new_dp_num_points, new_dp_point_ids, new_dp_surf_coords]) + return tuple(result) + + +def random_crop_image(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + keypoint_visibilities=None, + densepose_num_points=None, + densepose_part_ids=None, + densepose_surface_coords=None, + min_object_covered=1.0, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.1, 1.0), + overlap_thresh=0.3, + clip_boxes=True, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly crops the image. + + Given the input image and its bounding boxes, this op randomly + crops a subimage. Given a user-provided set of input constraints, + the crop window is resampled until it satisfies these constraints. + If within 100 trials it is unable to find a valid crop, the original + image is returned. See the Args section for a description of the input + constraints. Both input boxes and returned Boxes are in normalized + form (e.g., lie in the unit square [0, 1]). + This function will return the original image with probability random_coef. + + Note: Keypoint coordinates that are outside the crop will be set to NaN, which + is consistent with the original keypoint encoding for non-existing keypoints. + Also, the keypoint visibility will be set to False. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes with shape + [num_instances, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances]. + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + keypoint_visibilities: (optional) rank 2 bool tensor with shape + [num_instances, num_keypoints]. + densepose_num_points: (optional) rank 1 int32 tensor with shape + [num_instances] with the number of sampled points per + instance. + densepose_part_ids: (optional) rank 2 int32 tensor with shape + [num_instances, num_points] holding the part id for each + sampled point. These part_ids are 0-indexed, where the + first non-background part has index 0. + densepose_surface_coords: (optional) rank 3 float32 tensor with shape + [num_instances, num_points, 4]. The DensePose + coordinates are of the form (y, x, v, u) where + (y, x) are the normalized image coordinates for a + sampled point, and (v, u) is the surface + coordinate for the part. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + labels: new labels. + + If label_weights, multiclass_scores, masks, keypoints, + keypoint_visibilities, densepose_num_points, densepose_part_ids, + densepose_surface_coords is not None, the function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + keypoint_visibilities: rank 2 bool tensor with shape + [num_instances, num_keypoints] + densepose_num_points: rank 1 int32 tensor with shape [num_instances]. + densepose_part_ids: rank 2 int32 tensor with shape + [num_instances, num_points]. + densepose_surface_coords: rank 3 float32 tensor with shape + [num_instances, num_points, 4]. + """ + + def strict_random_crop_image_fn(): + return _strict_random_crop_image( + image, + boxes, + labels, + label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + masks=masks, + keypoints=keypoints, + keypoint_visibilities=keypoint_visibilities, + densepose_num_points=densepose_num_points, + densepose_part_ids=densepose_part_ids, + densepose_surface_coords=densepose_surface_coords, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + preprocess_vars_cache=preprocess_vars_cache) + + # avoids tf.cond to make faster RCNN training on borg. See b/140057645. + if random_coef < sys.float_info.min: + result = strict_random_crop_image_fn() + else: + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_a_crop_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE, + preprocess_vars_cache) + do_a_crop_random = tf.greater(do_a_crop_random, random_coef) + + outputs = [image, boxes, labels] + + if label_weights is not None: + outputs.append(label_weights) + if label_confidences is not None: + outputs.append(label_confidences) + if multiclass_scores is not None: + outputs.append(multiclass_scores) + if masks is not None: + outputs.append(masks) + if keypoints is not None: + outputs.append(keypoints) + if keypoint_visibilities is not None: + outputs.append(keypoint_visibilities) + if densepose_num_points is not None: + outputs.extend([densepose_num_points, densepose_part_ids, + densepose_surface_coords]) + + result = tf.cond(do_a_crop_random, strict_random_crop_image_fn, + lambda: tuple(outputs)) + return result + + +def random_pad_image(image, + boxes, + masks=None, + keypoints=None, + densepose_surface_coords=None, + min_image_size=None, + max_image_size=None, + pad_color=None, + seed=None, + preprocess_vars_cache=None): + """Randomly pads the image. + + This function randomly pads the image with zeros. The final size of the + padded image will be between min_image_size and max_image_size. + if min_image_size is smaller than the input image size, min_image_size will + be set to the input image size. The same for max_image_size. The input image + will be located at a uniformly random location inside the padded image. + The relative location of the boxes to the original image will remain the same. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [N, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [N, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + densepose_surface_coords: (optional) rank 3 float32 tensor with shape + [N, num_points, 4]. The DensePose coordinates are + of the form (y, x, v, u) where (y, x) are the + normalized image coordinates for a sampled point, + and (v, u) is the surface coordinate for the part. + min_image_size: a tensor of size [min_height, min_width], type tf.int32. + If passed as None, will be set to image size + [height, width]. + max_image_size: a tensor of size [max_height, max_width], type tf.int32. + If passed as None, will be set to twice the + image [height * 2, width * 2]. + pad_color: padding color. A rank 1 tensor of [channels] with dtype= + tf.float32. if set as None, it will be set to average color of + the input image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + + if masks is not None, the function also returns: + masks: rank 3 float32 tensor with shape [N, new_height, new_width] + if keypoints is not None, the function also returns: + keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2] + if densepose_surface_coords is not None, the function also returns: + densepose_surface_coords: rank 3 float32 tensor with shape + [num_instances, num_points, 4] + """ + if pad_color is None: + pad_color = tf.reduce_mean(image, axis=[0, 1]) + + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + + if max_image_size is None: + max_image_size = tf.stack([image_height * 2, image_width * 2]) + max_image_size = tf.maximum(max_image_size, + tf.stack([image_height, image_width])) + + if min_image_size is None: + min_image_size = tf.stack([image_height, image_width]) + min_image_size = tf.maximum(min_image_size, + tf.stack([image_height, image_width])) + + target_height = tf.cond( + max_image_size[0] > min_image_size[0], + lambda: _random_integer(min_image_size[0], max_image_size[0], seed), + lambda: max_image_size[0]) + + target_width = tf.cond( + max_image_size[1] > min_image_size[1], + lambda: _random_integer(min_image_size[1], max_image_size[1], seed), + lambda: max_image_size[1]) + + offset_height = tf.cond( + target_height > image_height, + lambda: _random_integer(0, target_height - image_height, seed), + lambda: tf.constant(0, dtype=tf.int32)) + + offset_width = tf.cond( + target_width > image_width, + lambda: _random_integer(0, target_width - image_width, seed), + lambda: tf.constant(0, dtype=tf.int32)) + + gen_func = lambda: (target_height, target_width, offset_height, offset_width) + params = _get_or_create_preprocess_rand_vars( + gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE, + preprocess_vars_cache) + target_height, target_width, offset_height, offset_width = params + + new_image = tf.image.pad_to_bounding_box( + image, + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width) + + # Setting color of the padded pixels + image_ones = tf.ones_like(image) + image_ones_padded = tf.image.pad_to_bounding_box( + image_ones, + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width) + image_color_padded = (1.0 - image_ones_padded) * pad_color + new_image += image_color_padded + + # setting boxes + new_window = tf.cast( + tf.stack([ + -offset_height, -offset_width, target_height - offset_height, + target_width - offset_width + ]), + dtype=tf.float32) + new_window /= tf.cast( + tf.stack([image_height, image_width, image_height, image_width]), + dtype=tf.float32) + boxlist = box_list.BoxList(boxes) + new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) + new_boxes = new_boxlist.get() + + result = [new_image, new_boxes] + + if masks is not None: + new_masks = tf.image.pad_to_bounding_box( + masks[:, :, :, tf.newaxis], + offset_height=offset_height, + offset_width=offset_width, + target_height=target_height, + target_width=target_width)[:, :, :, 0] + result.append(new_masks) + + if keypoints is not None: + new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window) + result.append(new_keypoints) + + if densepose_surface_coords is not None: + new_densepose_surface_coords = densepose_ops.change_coordinate_frame( + densepose_surface_coords, new_window) + result.append(new_densepose_surface_coords) + + return tuple(result) + + +def random_absolute_pad_image(image, + boxes, + masks=None, + keypoints=None, + densepose_surface_coords=None, + max_height_padding=None, + max_width_padding=None, + pad_color=None, + seed=None, + preprocess_vars_cache=None): + """Randomly pads the image by small absolute amounts. + + As random_pad_image above, but the padding is of size [0, max_height_padding] + or [0, max_width_padding] instead of padding to a fixed size of + max_height_padding for all images. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [N, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [N, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + densepose_surface_coords: (optional) rank 3 float32 tensor with shape + [N, num_points, 4]. The DensePose coordinates are + of the form (y, x, v, u) where (y, x) are the + normalized image coordinates for a sampled point, + and (v, u) is the surface coordinate for the part. + max_height_padding: a scalar tf.int32 tensor denoting the maximum amount of + height padding. The padding will be chosen uniformly at + random from [0, max_height_padding). + max_width_padding: a scalar tf.int32 tensor denoting the maximum amount of + width padding. The padding will be chosen uniformly at + random from [0, max_width_padding). + pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. + if set as None, it will be set to average color of the input + image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + if masks is not None, the function also returns: + masks: rank 3 float32 tensor with shape [N, new_height, new_width] + if keypoints is not None, the function also returns: + keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2] + """ + min_image_size = tf.shape(image)[:2] + max_image_size = min_image_size + tf.cast( + [max_height_padding, max_width_padding], dtype=tf.int32) + return random_pad_image( + image, + boxes, + masks=masks, + keypoints=keypoints, + densepose_surface_coords=densepose_surface_coords, + min_image_size=min_image_size, + max_image_size=max_image_size, + pad_color=pad_color, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + +def random_crop_pad_image(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + min_object_covered=1.0, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.1, 1.0), + overlap_thresh=0.3, + clip_boxes=True, + random_coef=0.0, + min_padded_size_ratio=(1.0, 1.0), + max_padded_size_ratio=(2.0, 2.0), + pad_color=None, + seed=None, + preprocess_vars_cache=None): + """Randomly crops and pads the image. + + Given an input image and its bounding boxes, this op first randomly crops + the image and then randomly pads the image with background values. Parameters + min_padded_size_ratio and max_padded_size_ratio, determine the range of the + final output image size. Specifically, the final image size will have a size + in the range of min_padded_size_ratio * tf.shape(image) and + max_padded_size_ratio * tf.shape(image). Note that these ratios are with + respect to the size of the original image, so we can't capture the same + effect easily by independently applying RandomCropImage + followed by RandomPadImage. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: rank 1 float32 containing the label weights. + label_confidences: rank 1 float32 containing the label confidences. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. + if set as None, it will be set to average color of the randomly + cropped image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + padded_image: padded image. + padded_boxes: boxes which is the same rank as input boxes. Boxes are in + normalized form. + cropped_labels: cropped labels. + if label_weights is not None also returns: + cropped_label_weights: cropped label weights. + if multiclass_scores is not None also returns: + cropped_multiclass_scores: cropped_multiclass_scores. + + """ + image_size = tf.shape(image) + image_height = image_size[0] + image_width = image_size[1] + result = random_crop_image( + image=image, + boxes=boxes, + labels=labels, + label_weights=label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + random_coef=random_coef, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + cropped_image, cropped_boxes, cropped_labels = result[:3] + + min_image_size = tf.cast( + tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) * + min_padded_size_ratio, + dtype=tf.int32) + max_image_size = tf.cast( + tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) * + max_padded_size_ratio, + dtype=tf.int32) + + padded_image, padded_boxes = random_pad_image( + cropped_image, + cropped_boxes, + min_image_size=min_image_size, + max_image_size=max_image_size, + pad_color=pad_color, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + cropped_padded_output = (padded_image, padded_boxes, cropped_labels) + + index = 3 + if label_weights is not None: + cropped_label_weights = result[index] + cropped_padded_output += (cropped_label_weights,) + index += 1 + + if label_confidences is not None: + cropped_label_confidences = result[index] + cropped_padded_output += (cropped_label_confidences,) + index += 1 + + if multiclass_scores is not None: + cropped_multiclass_scores = result[index] + cropped_padded_output += (cropped_multiclass_scores,) + + return cropped_padded_output + + +def random_crop_to_aspect_ratio(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + aspect_ratio=1.0, + overlap_thresh=0.3, + clip_boxes=True, + seed=None, + preprocess_vars_cache=None): + """Randomly crops an image to the specified aspect ratio. + + Randomly crops the a portion of the image such that the crop is of the + specified aspect ratio, and the crop is as large as possible. If the specified + aspect ratio is larger than the aspect ratio of the image, this op will + randomly remove rows from the top and bottom of the image. If the specified + aspect ratio is less than the aspect ratio of the image, this op will randomly + remove cols from the left and right of the image. If the specified aspect + ratio is the same as the aspect ratio of the image, this op will return the + image. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + aspect_ratio: the aspect ratio of cropped image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If label_weights, masks, keypoints, or multiclass_scores is not None, the + function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + + Raises: + ValueError: If image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('RandomCropToAspectRatio', values=[image]): + image_shape = tf.shape(image) + orig_height = image_shape[0] + orig_width = image_shape[1] + orig_aspect_ratio = tf.cast( + orig_width, dtype=tf.float32) / tf.cast( + orig_height, dtype=tf.float32) + new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) + + def target_height_fn(): + return tf.cast( + tf.round(tf.cast(orig_width, dtype=tf.float32) / new_aspect_ratio), + dtype=tf.int32) + + target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio, + lambda: orig_height, target_height_fn) + + def target_width_fn(): + return tf.cast( + tf.round(tf.cast(orig_height, dtype=tf.float32) * new_aspect_ratio), + dtype=tf.int32) + + target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio, + lambda: orig_width, target_width_fn) + + # either offset_height = 0 and offset_width is randomly chosen from + # [0, offset_width - target_width), or else offset_width = 0 and + # offset_height is randomly chosen from [0, offset_height - target_height) + offset_height = _random_integer(0, orig_height - target_height + 1, seed) + offset_width = _random_integer(0, orig_width - target_width + 1, seed) + + generator_func = lambda: (offset_height, offset_width) + offset_height, offset_width = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO, + preprocess_vars_cache) + + new_image = tf.image.crop_to_bounding_box( + image, offset_height, offset_width, target_height, target_width) + + im_box = tf.stack([ + tf.cast(offset_height, dtype=tf.float32) / + tf.cast(orig_height, dtype=tf.float32), + tf.cast(offset_width, dtype=tf.float32) / + tf.cast(orig_width, dtype=tf.float32), + tf.cast(offset_height + target_height, dtype=tf.float32) / + tf.cast(orig_height, dtype=tf.float32), + tf.cast(offset_width + target_width, dtype=tf.float32) / + tf.cast(orig_width, dtype=tf.float32) + ]) + + boxlist = box_list.BoxList(boxes) + boxlist.add_field('labels', labels) + + boxlist.add_field('label_weights', label_weights) + + if label_confidences is not None: + boxlist.add_field('label_confidences', label_confidences) + + if multiclass_scores is not None: + boxlist.add_field('multiclass_scores', multiclass_scores) + + im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0)) + + # remove boxes whose overlap with the image is less than overlap_thresh + overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( + boxlist, im_boxlist, overlap_thresh) + + # change the coordinate of the remaining boxes + new_labels = overlapping_boxlist.get_field('labels') + new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, + im_box) + if clip_boxes: + new_boxlist = box_list_ops.clip_to_window( + new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32)) + new_boxes = new_boxlist.get() + + result = [new_image, new_boxes, new_labels] + + new_label_weights = overlapping_boxlist.get_field('label_weights') + result.append(new_label_weights) + + if label_confidences is not None: + new_label_confidences = ( + overlapping_boxlist.get_field('label_confidences')) + result.append(new_label_confidences) + + if multiclass_scores is not None: + new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') + result.append(new_multiclass_scores) + + if masks is not None: + masks_inside_window = tf.gather(masks, keep_ids) + masks_box_begin = tf.stack([0, offset_height, offset_width]) + masks_box_size = tf.stack([-1, target_height, target_width]) + new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size) + result.append(new_masks) + + if keypoints is not None: + keypoints_inside_window = tf.gather(keypoints, keep_ids) + new_keypoints = keypoint_ops.change_coordinate_frame( + keypoints_inside_window, im_box) + if clip_boxes: + new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, + [0.0, 0.0, 1.0, 1.0]) + result.append(new_keypoints) + + return tuple(result) + + +def random_pad_to_aspect_ratio(image, + boxes, + masks=None, + keypoints=None, + aspect_ratio=1.0, + min_padded_size_ratio=(1.0, 1.0), + max_padded_size_ratio=(2.0, 2.0), + seed=None, + preprocess_vars_cache=None): + """Randomly zero pads an image to the specified aspect ratio. + + Pads the image so that the resulting image will have the specified aspect + ratio without scaling less than the min_padded_size_ratio or more than the + max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio + is lower than what is possible to maintain the aspect ratio, then this method + will use the least padding to achieve the specified aspect ratio. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + aspect_ratio: aspect ratio of the final image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If masks, or keypoints is not None, the function also returns: + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + + Raises: + ValueError: If image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('RandomPadToAspectRatio', values=[image]): + image_shape = tf.shape(image) + image_height = tf.cast(image_shape[0], dtype=tf.float32) + image_width = tf.cast(image_shape[1], dtype=tf.float32) + image_aspect_ratio = image_width / image_height + new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) + target_height = tf.cond( + image_aspect_ratio <= new_aspect_ratio, + lambda: image_height, + lambda: image_width / new_aspect_ratio) + target_width = tf.cond( + image_aspect_ratio >= new_aspect_ratio, + lambda: image_width, + lambda: image_height * new_aspect_ratio) + + min_height = tf.maximum( + min_padded_size_ratio[0] * image_height, target_height) + min_width = tf.maximum( + min_padded_size_ratio[1] * image_width, target_width) + max_height = tf.maximum( + max_padded_size_ratio[0] * image_height, target_height) + max_width = tf.maximum( + max_padded_size_ratio[1] * image_width, target_width) + + max_scale = tf.minimum(max_height / target_height, max_width / target_width) + min_scale = tf.minimum( + max_scale, + tf.maximum(min_height / target_height, min_width / target_width)) + + generator_func = functools.partial(tf.random_uniform, [], + min_scale, max_scale, seed=seed) + scale = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO, + preprocess_vars_cache) + + target_height = tf.round(scale * target_height) + target_width = tf.round(scale * target_width) + + new_image = tf.image.pad_to_bounding_box( + image, 0, 0, tf.cast(target_height, dtype=tf.int32), + tf.cast(target_width, dtype=tf.int32)) + + im_box = tf.stack([ + 0.0, + 0.0, + target_height / image_height, + target_width / image_width + ]) + boxlist = box_list.BoxList(boxes) + new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box) + new_boxes = new_boxlist.get() + + result = [new_image, new_boxes] + + if masks is not None: + new_masks = tf.expand_dims(masks, -1) + new_masks = tf.image.pad_to_bounding_box( + new_masks, 0, 0, tf.cast(target_height, dtype=tf.int32), + tf.cast(target_width, dtype=tf.int32)) + new_masks = tf.squeeze(new_masks, [-1]) + result.append(new_masks) + + if keypoints is not None: + new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box) + result.append(new_keypoints) + + return tuple(result) + + +def random_black_patches(image, + max_black_patches=10, + probability=0.5, + size_to_image_ratio=0.1, + random_seed=None, + preprocess_vars_cache=None): + """Randomly adds some black patches to the image. + + This op adds up to max_black_patches square black patches of a fixed size + to the image where size is specified via the size_to_image_ratio parameter. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + max_black_patches: number of times that the function tries to add a + black box to the image. + probability: at each try, what is the chance of adding a box. + size_to_image_ratio: Determines the ratio of the size of the black patches + to the size of the image. + box_size = size_to_image_ratio * + min(image_width, image_height) + random_seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image + """ + def add_black_patch_to_image(image, idx): + """Function for adding one patch to the image. + + Args: + image: image + idx: counter for number of patches that could have been added + + Returns: + image with a randomly added black box + """ + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + box_size = tf.cast( + tf.multiply( + tf.minimum( + tf.cast(image_height, dtype=tf.float32), + tf.cast(image_width, dtype=tf.float32)), size_to_image_ratio), + dtype=tf.int32) + + generator_func = functools.partial(tf.random_uniform, [], minval=0.0, + maxval=(1.0 - size_to_image_ratio), + seed=random_seed) + normalized_y_min = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, + preprocess_vars_cache, key=str(idx) + 'y') + normalized_x_min = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, + preprocess_vars_cache, key=str(idx) + 'x') + + y_min = tf.cast( + normalized_y_min * tf.cast(image_height, dtype=tf.float32), + dtype=tf.int32) + x_min = tf.cast( + normalized_x_min * tf.cast(image_width, dtype=tf.float32), + dtype=tf.int32) + black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32) + mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min, + image_height, image_width) + image = tf.multiply(image, mask) + return image + + with tf.name_scope('RandomBlackPatchInImage', values=[image]): + for idx in range(max_black_patches): + generator_func = functools.partial(tf.random_uniform, [], + minval=0.0, maxval=1.0, + dtype=tf.float32, seed=random_seed) + random_prob = _get_or_create_preprocess_rand_vars( + generator_func, + preprocessor_cache.PreprocessorCache.BLACK_PATCHES, + preprocess_vars_cache, key=idx) + image = tf.cond( + tf.greater(random_prob, probability), lambda: image, + functools.partial(add_black_patch_to_image, image=image, idx=idx)) + return image + + +def random_jpeg_quality(image, + min_jpeg_quality=0, + max_jpeg_quality=100, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly encode the image to a random JPEG quality level. + + Args: + image: rank 3 float32 tensor with shape [height, width, channels] and + values in the range [0, 255]. + min_jpeg_quality: An int for the lower bound for selecting a random jpeg + quality level. + max_jpeg_quality: An int for the upper bound for selecting a random jpeg + quality level. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the encoded image, + and if it is 1.0, we will always get the original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this function is called + multiple times with the same non-null cache, it will perform + deterministically. + + Returns: + image: image which is the same shape as input image. + """ + def _adjust_jpeg_quality(): + """Encodes the image as jpeg with a random quality and then decodes.""" + generator_func = functools.partial( + tf.random_uniform, [], + minval=min_jpeg_quality, + maxval=max_jpeg_quality, + dtype=tf.int32, + seed=seed) + quality = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY, + preprocess_vars_cache, key='quality') + + # Need to convert to uint8 before calling adjust_jpeg_quality since it + # assumes that float features are in the range [0, 1], where herein the + # range is [0, 255]. + image_uint8 = tf.cast(image, tf.uint8) + adjusted_image = tf.image.adjust_jpeg_quality(image_uint8, quality) + return tf.cast(adjusted_image, tf.float32) + + with tf.name_scope('RandomJpegQuality', values=[image]): + generator_func = functools.partial(tf.random_uniform, [], seed=seed) + do_encoding_random = _get_or_create_preprocess_rand_vars( + generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY, + preprocess_vars_cache) + do_encoding_random = tf.greater_equal(do_encoding_random, random_coef) + image = tf.cond(do_encoding_random, _adjust_jpeg_quality, + lambda: tf.cast(image, tf.float32)) + + return image + + +def random_downscale_to_target_pixels(image, + masks=None, + min_target_pixels=300000, + max_target_pixels=800000, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly downscales the image to a target number of pixels. + + If the image contains less than the chosen target number of pixels, it will + not be downscaled. + + Args: + image: Rank 3 float32 tensor with shape [height, width, channels] and + values in the range [0, 255]. + masks: (optional) Rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks are of + the same height, width as the input `image`. + min_target_pixels: Integer. An inclusive lower bound for for the target + number of pixels. + max_target_pixels: Integer. An exclusive upper bound for for the target + number of pixels. + random_coef: Float. Random coefficient that defines the chance of getting + the original image. If random_coef is 0, we will always apply downscaling, + and if it is 1.0, we will always get the original image. + seed: (optional) Integer. Random seed. + preprocess_vars_cache: (optional) PreprocessorCache object that records + previously performed augmentations. Updated in-place. If this function is + called multiple times with the same non-null cache, it will perform + deterministically. + + Returns: + Tuple with elements: + image: Resized image which is the same rank as input image. + masks: If masks is not None, resized masks which are the same rank as + the input masks. + + Raises: + ValueError: If min_target_pixels or max_target_pixels are not positive. + """ + if min_target_pixels <= 0: + raise ValueError('Minimum target pixels must be positive') + if max_target_pixels <= 0: + raise ValueError('Maximum target pixels must be positive') + + def _resize_image_to_target(target_height, target_width): + # pylint: disable=unbalanced-tuple-unpacking + new_image, _ = resize_image(image, None, target_height, target_width) + return (new_image,) + + def _resize_image_and_masks_to_target(target_height, target_width): + # pylint: disable=unbalanced-tuple-unpacking + new_image, new_masks, _ = resize_image(image, masks, target_height, + target_width) + return new_image, new_masks + + with tf.name_scope('RandomDownscaleToTargetPixels', values=[image]): + generator_fn = functools.partial(tf.random_uniform, [], seed=seed) + do_downscale_random = _get_or_create_preprocess_rand_vars( + generator_fn, + preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS, + preprocess_vars_cache) + do_downscale_random = tf.greater_equal(do_downscale_random, random_coef) + + generator_fn = functools.partial( + tf.random_uniform, [], + minval=min_target_pixels, + maxval=max_target_pixels, + dtype=tf.int32, + seed=seed) + target_pixels = _get_or_create_preprocess_rand_vars( + generator_fn, + preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS, + preprocess_vars_cache, + key='target_pixels') + + image_shape = tf.shape(image) + image_height = image_shape[0] + image_width = image_shape[1] + image_pixels = image_height * image_width + scale_factor = tf.sqrt( + tf.cast(target_pixels, dtype=tf.float32) / + tf.cast(image_pixels, dtype=tf.float32)) + target_height = tf.cast( + scale_factor * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32) + target_width = tf.cast( + scale_factor * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32) + image_larger_than_target = tf.greater(image_pixels, target_pixels) + + should_apply_resize = tf.logical_and(do_downscale_random, + image_larger_than_target) + if masks is not None: + resize_fn = functools.partial(_resize_image_and_masks_to_target, + target_height, target_width) + return tf.cond(should_apply_resize, resize_fn, + lambda: (tf.cast(image, dtype=tf.float32), masks)) + else: + resize_fn = lambda: _resize_image_to_target(target_height, target_width) + return tf.cond(should_apply_resize, resize_fn, + lambda: (tf.cast(image, dtype=tf.float32),)) + + +def random_patch_gaussian(image, + min_patch_size=1, + max_patch_size=250, + min_gaussian_stddev=0.0, + max_gaussian_stddev=1.0, + random_coef=0.0, + seed=None, + preprocess_vars_cache=None): + """Randomly applies gaussian noise to a random patch on the image. + + The gaussian noise is applied to the image with values scaled to the range + [0.0, 1.0]. The result of applying gaussian noise to the scaled image is + clipped to be within the range [0.0, 1.0], equivalent to the range + [0.0, 255.0] after rescaling the image back. + + See "Improving Robustness Without Sacrificing Accuracy with Patch Gaussian + Augmentation " by Lopes et al., 2019, for further details. + https://arxiv.org/abs/1906.02611 + + Args: + image: Rank 3 float32 tensor with shape [height, width, channels] and + values in the range [0.0, 255.0]. + min_patch_size: Integer. An inclusive lower bound for the patch size. + max_patch_size: Integer. An exclusive upper bound for the patch size. + min_gaussian_stddev: Float. An inclusive lower bound for the standard + deviation of the gaussian noise. + max_gaussian_stddev: Float. An exclusive upper bound for the standard + deviation of the gaussian noise. + random_coef: Float. Random coefficient that defines the chance of getting + the original image. If random_coef is 0.0, we will always apply + downscaling, and if it is 1.0, we will always get the original image. + seed: (optional) Integer. Random seed. + preprocess_vars_cache: (optional) PreprocessorCache object that records + previously performed augmentations. Updated in-place. If this function is + called multiple times with the same non-null cache, it will perform + deterministically. + + Returns: + Rank 3 float32 tensor with same shape as the input image and with gaussian + noise applied within a random patch. + + Raises: + ValueError: If min_patch_size is < 1. + """ + if min_patch_size < 1: + raise ValueError('Minimum patch size must be >= 1.') + + get_or_create_rand_vars_fn = functools.partial( + _get_or_create_preprocess_rand_vars, + function_id=preprocessor_cache.PreprocessorCache.PATCH_GAUSSIAN, + preprocess_vars_cache=preprocess_vars_cache) + + def _apply_patch_gaussian(image): + """Applies a patch gaussian with random size, location, and stddev.""" + patch_size = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=min_patch_size, + maxval=max_patch_size, + dtype=tf.int32, + seed=seed), + key='patch_size') + gaussian_stddev = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=min_gaussian_stddev, + maxval=max_gaussian_stddev, + dtype=tf.float32, + seed=seed), + key='gaussian_stddev') + + image_shape = tf.shape(image) + y = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=0, + maxval=image_shape[0], + dtype=tf.int32, + seed=seed), + key='y') + x = get_or_create_rand_vars_fn( + functools.partial( + tf.random_uniform, [], + minval=0, + maxval=image_shape[1], + dtype=tf.int32, + seed=seed), + key='x') + gaussian = get_or_create_rand_vars_fn( + functools.partial( + tf.random.normal, + image_shape, + stddev=gaussian_stddev, + dtype=tf.float32, + seed=seed), + key='gaussian') + + scaled_image = image / 255.0 + image_plus_gaussian = tf.clip_by_value(scaled_image + gaussian, 0.0, 1.0) + patch_mask = patch_ops.get_patch_mask(y, x, patch_size, image_shape) + patch_mask = tf.expand_dims(patch_mask, -1) + patch_mask = tf.tile(patch_mask, [1, 1, image_shape[2]]) + patched_image = tf.where(patch_mask, image_plus_gaussian, scaled_image) + return patched_image * 255.0 + + with tf.name_scope('RandomPatchGaussian', values=[image]): + image = tf.cast(image, tf.float32) + patch_gaussian_random = get_or_create_rand_vars_fn( + functools.partial(tf.random_uniform, [], seed=seed)) + do_patch_gaussian = tf.greater_equal(patch_gaussian_random, random_coef) + image = tf.cond(do_patch_gaussian, + lambda: _apply_patch_gaussian(image), + lambda: image) + return image + + +# TODO(barretzoph): Put in AutoAugment Paper link when paper is live. +def autoaugment_image(image, boxes, policy_name='v0'): + """Apply an autoaugment policy to the image and boxes. + + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 255]. + boxes: rank 2 float32 tensor containing the bounding boxes with shape + [num_instances, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + policy_name: The name of the AutoAugment policy to use. The available + options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for + all of the results in the paper and was found to achieve the best results + on the COCO dataset. `v1`, `v2` and `v3` are additional good policies + found on the COCO dataset that have slight variation in what operations + were used during the search procedure along with how many operations are + applied in parallel to a single image (2 vs 3). + + + Returns: + image: the augmented image. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. boxes will have been augmented along with image. + """ + return autoaugment_utils.distort_image_with_autoaugment( + image, boxes, policy_name) + + +def image_to_float(image): + """Used in Faster R-CNN. Casts image pixel values to float. + + Args: + image: input image which might be in tf.uint8 or sth else format + + Returns: + image: image in tf.float32 format. + """ + with tf.name_scope('ImageToFloat', values=[image]): + image = tf.cast(image, dtype=tf.float32) + return image + + +def random_resize_method(image, target_size, preprocess_vars_cache=None): + """Uses a random resize method to resize the image to target size. + + Args: + image: a rank 3 tensor. + target_size: a list of [target_height, target_width] + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + resized image. + """ + + resized_image = _apply_with_random_selector( + image, + lambda x, method: tf.image.resize_images(x, target_size, method), + num_cases=4, + preprocess_vars_cache=preprocess_vars_cache, + key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD) + + return resized_image + + +def resize_to_range(image, + masks=None, + min_dimension=None, + max_dimension=None, + method=tf.image.ResizeMethod.BILINEAR, + align_corners=False, + pad_to_max_dimension=False, + per_channel_pad_value=(0, 0, 0)): + """Resizes an image so its dimensions are within the provided value. + + The output size can be described by two cases: + 1. If the image can be rescaled so its minimum dimension is equal to the + provided value without the other dimension exceeding max_dimension, + then do so. + 2. Otherwise, resize so the largest dimension is equal to max_dimension. + + Args: + image: A 3D tensor of shape [height, width, channels] + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. + min_dimension: (optional) (scalar) desired size of the smaller image + dimension. + max_dimension: (optional) (scalar) maximum allowed size + of the larger image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + align_corners: bool. If true, exactly align all 4 corners of the input + and output. Defaults to False. + pad_to_max_dimension: Whether to resize the image and pad it with zeros + so the resulting image is of the spatial size + [max_dimension, max_dimension]. If masks are included they are padded + similarly. + per_channel_pad_value: A tuple of per-channel scalar value to use for + padding. By default pads zeros. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A 3D tensor of shape [new_height, new_width, channels], + where the image has been resized (with bilinear interpolation) so that + min(new_height, new_width) == min_dimension or + max(new_height, new_width) == max_dimension. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width]. + resized_image_shape: A 1D tensor of shape [3] containing shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + def _resize_landscape_image(image): + # resize a landscape image + return tf.image.resize_images( + image, tf.stack([min_dimension, max_dimension]), method=method, + align_corners=align_corners, preserve_aspect_ratio=True) + + def _resize_portrait_image(image): + # resize a portrait image + return tf.image.resize_images( + image, tf.stack([max_dimension, min_dimension]), method=method, + align_corners=align_corners, preserve_aspect_ratio=True) + + with tf.name_scope('ResizeToRange', values=[image, min_dimension]): + if image.get_shape().is_fully_defined(): + if image.get_shape()[0] < image.get_shape()[1]: + new_image = _resize_landscape_image(image) + else: + new_image = _resize_portrait_image(image) + new_size = tf.constant(new_image.get_shape().as_list()) + else: + new_image = tf.cond( + tf.less(tf.shape(image)[0], tf.shape(image)[1]), + lambda: _resize_landscape_image(image), + lambda: _resize_portrait_image(image)) + new_size = tf.shape(new_image) + + if pad_to_max_dimension: + channels = tf.unstack(new_image, axis=2) + if len(channels) != len(per_channel_pad_value): + raise ValueError('Number of channels must be equal to the length of ' + 'per-channel pad value.') + new_image = tf.stack( + [ + tf.pad( + channels[i], [[0, max_dimension - new_size[0]], + [0, max_dimension - new_size[1]]], + constant_values=per_channel_pad_value[i]) + for i in range(len(channels)) + ], + axis=2) + new_image.set_shape([max_dimension, max_dimension, 3]) + + result = [new_image] + if masks is not None: + new_masks = tf.expand_dims(masks, 3) + new_masks = tf.image.resize_images( + new_masks, + new_size[:-1], + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=align_corners) + if pad_to_max_dimension: + new_masks = tf.image.pad_to_bounding_box( + new_masks, 0, 0, max_dimension, max_dimension) + new_masks = tf.squeeze(new_masks, 3) + result.append(new_masks) + + result.append(new_size) + return result + + +def _get_image_info(image): + """Returns the height, width and number of channels in the image.""" + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + num_channels = tf.shape(image)[2] + return (image_height, image_width, num_channels) + + +# TODO(alirezafathi): Make sure the static shapes are preserved. +def resize_to_min_dimension(image, masks=None, min_dimension=600, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes image and masks given the min size maintaining the aspect ratio. + + If one of the image dimensions is smaller than min_dimension, it will scale + the image such that its smallest dimension is equal to min_dimension. + Otherwise, will keep the image size as is. + + Args: + image: a tensor of size [height, width, channels]. + masks: (optional) a tensors of size [num_instances, height, width]. + min_dimension: minimum image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + + Returns: + An array containing resized_image, resized_masks, and resized_image_shape. + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A tensor of size [new_height, new_width, channels]. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width] + resized_image_shape: A 1D tensor of shape [3] containing the shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]): + (image_height, image_width, num_channels) = _get_image_info(image) + min_image_dimension = tf.minimum(image_height, image_width) + min_target_dimension = tf.maximum(min_image_dimension, min_dimension) + target_ratio = tf.cast(min_target_dimension, dtype=tf.float32) / tf.cast( + min_image_dimension, dtype=tf.float32) + target_height = tf.cast( + tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32) + target_width = tf.cast( + tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32) + image = tf.image.resize_images( + tf.expand_dims(image, axis=0), size=[target_height, target_width], + method=method, + align_corners=True) + result = [tf.squeeze(image, axis=0)] + + if masks is not None: + masks = tf.image.resize_nearest_neighbor( + tf.expand_dims(masks, axis=3), + size=[target_height, target_width], + align_corners=True) + result.append(tf.squeeze(masks, axis=3)) + + result.append(tf.stack([target_height, target_width, num_channels])) + return result + + +def resize_to_max_dimension(image, masks=None, max_dimension=600, + method=tf.image.ResizeMethod.BILINEAR): + """Resizes image and masks given the max size maintaining the aspect ratio. + + If one of the image dimensions is greater than max_dimension, it will scale + the image such that its largest dimension is equal to max_dimension. + Otherwise, will keep the image size as is. + + Args: + image: a tensor of size [height, width, channels]. + masks: (optional) a tensors of size [num_instances, height, width]. + max_dimension: maximum image dimension. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + + Returns: + An array containing resized_image, resized_masks, and resized_image_shape. + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A tensor of size [new_height, new_width, channels]. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width] + resized_image_shape: A 1D tensor of shape [3] containing the shape of the + resized image. + + Raises: + ValueError: if the image is not a 3D tensor. + """ + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizeGivenMaxDimension', values=[image, max_dimension]): + (image_height, image_width, num_channels) = _get_image_info(image) + max_image_dimension = tf.maximum(image_height, image_width) + max_target_dimension = tf.minimum(max_image_dimension, max_dimension) + target_ratio = tf.cast(max_target_dimension, dtype=tf.float32) / tf.cast( + max_image_dimension, dtype=tf.float32) + target_height = tf.cast( + tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32) + target_width = tf.cast( + tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32) + image = tf.image.resize_images( + tf.expand_dims(image, axis=0), size=[target_height, target_width], + method=method, + align_corners=True) + result = [tf.squeeze(image, axis=0)] + + if masks is not None: + masks = tf.image.resize_nearest_neighbor( + tf.expand_dims(masks, axis=3), + size=[target_height, target_width], + align_corners=True) + result.append(tf.squeeze(masks, axis=3)) + + result.append(tf.stack([target_height, target_width, num_channels])) + return result + + +def resize_pad_to_multiple(image, masks=None, multiple=1): + """Resize an image by zero padding it to the specified multiple. + + For example, with an image of size (101, 199, 3) and multiple=4, + the returned image will have shape (104, 200, 3). + + Args: + image: a tensor of shape [height, width, channels] + masks: (optional) a tensor of shape [num_instances, height, width] + multiple: int, the multiple to which the height and width of the input + will be padded. + + Returns: + resized_image: The image with 0 padding applied, such that output + dimensions are divisible by `multiple` + resized_masks: If masks are given, they are resized to the same + spatial dimensions as the image. + resized_image_shape: An integer tensor of shape [3] which holds + the shape of the input image. + + """ + + if len(image.get_shape()) != 3: + raise ValueError('Image should be 3D tensor') + + with tf.name_scope('ResizePadToMultiple', values=[image, multiple]): + image_height, image_width, num_channels = _get_image_info(image) + image = image[tf.newaxis, :, :, :] + image = ops.pad_to_multiple(image, multiple)[0, :, :, :] + result = [image] + + if masks is not None: + masks = tf.transpose(masks, (1, 2, 0)) + masks = masks[tf.newaxis, :, :, :] + + masks = ops.pad_to_multiple(masks, multiple)[0, :, :, :] + masks = tf.transpose(masks, (2, 0, 1)) + result.append(masks) + + result.append(tf.stack([image_height, image_width, num_channels])) + return result + + +def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): + """Scales boxes from normalized to pixel coordinates. + + Args: + image: A 3D float32 tensor of shape [height, width, channels]. + boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding + boxes in normalized coordinates. Each row is of the form + [ymin, xmin, ymax, xmax]. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + + Returns: + image: unchanged input image. + scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the + bounding boxes in pixel coordinates. + scaled_keypoints: a 3D float32 tensor with shape + [num_instances, num_keypoints, 2] containing the keypoints in pixel + coordinates. + """ + boxlist = box_list.BoxList(boxes) + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() + result = [image, scaled_boxes] + if keypoints is not None: + scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) + result.append(scaled_keypoints) + return tuple(result) + + +# TODO(alirezafathi): Investigate if instead the function should return None if +# masks is None. +# pylint: disable=g-doc-return-or-yield +def resize_image(image, + masks=None, + new_height=600, + new_width=1024, + method=tf.image.ResizeMethod.BILINEAR, + align_corners=False): + """Resizes images to the given height and width. + + Args: + image: A 3D tensor of shape [height, width, channels] + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. + new_height: (optional) (scalar) desired height of the image. + new_width: (optional) (scalar) desired width of the image. + method: (optional) interpolation method used in resizing. Defaults to + BILINEAR. + align_corners: bool. If true, exactly align all 4 corners of the input + and output. Defaults to False. + + Returns: + Note that the position of the resized_image_shape changes based on whether + masks are present. + resized_image: A tensor of size [new_height, new_width, channels]. + resized_masks: If masks is not None, also outputs masks. A 3D tensor of + shape [num_instances, new_height, new_width] + resized_image_shape: A 1D tensor of shape [3] containing the shape of the + resized image. + """ + with tf.name_scope( + 'ResizeImage', + values=[image, new_height, new_width, method, align_corners]): + new_image = tf.image.resize_images( + image, tf.stack([new_height, new_width]), + method=method, + align_corners=align_corners) + image_shape = shape_utils.combined_static_and_dynamic_shape(image) + result = [new_image] + if masks is not None: + num_instances = tf.shape(masks)[0] + new_size = tf.stack([new_height, new_width]) + def resize_masks_branch(): + new_masks = tf.expand_dims(masks, 3) + new_masks = tf.image.resize_nearest_neighbor( + new_masks, new_size, align_corners=align_corners) + new_masks = tf.squeeze(new_masks, axis=3) + return new_masks + + def reshape_masks_branch(): + # The shape function will be computed for both branches of the + # condition, regardless of which branch is actually taken. Make sure + # that we don't trigger an assertion in the shape function when trying + # to reshape a non empty tensor into an empty one. + new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]]) + return new_masks + + masks = tf.cond(num_instances > 0, resize_masks_branch, + reshape_masks_branch) + result.append(masks) + + result.append(tf.stack([new_height, new_width, image_shape[2]])) + return result + + +def subtract_channel_mean(image, means=None): + """Normalizes an image by subtracting a mean from each channel. + + Args: + image: A 3D tensor of shape [height, width, channels] + means: float list containing a mean for each channel + Returns: + normalized_images: a tensor of shape [height, width, channels] + Raises: + ValueError: if images is not a 4D tensor or if the number of means is not + equal to the number of channels. + """ + with tf.name_scope('SubtractChannelMean', values=[image, means]): + if len(image.get_shape()) != 3: + raise ValueError('Input must be of size [height, width, channels]') + if len(means) != image.get_shape()[-1]: + raise ValueError('len(means) must match the number of channels') + return image - [[means]] + + +def one_hot_encoding(labels, num_classes=None): + """One-hot encodes the multiclass labels. + + Example usage: + labels = tf.constant([1, 4], dtype=tf.int32) + one_hot = OneHotEncoding(labels, num_classes=5) + one_hot.eval() # evaluates to [0, 1, 0, 0, 1] + + Args: + labels: A tensor of shape [None] corresponding to the labels. + num_classes: Number of classes in the dataset. + Returns: + onehot_labels: a tensor of shape [num_classes] corresponding to the one hot + encoding of the labels. + Raises: + ValueError: if num_classes is not specified. + """ + with tf.name_scope('OneHotEncoding', values=[labels]): + if num_classes is None: + raise ValueError('num_classes must be specified') + + labels = tf.one_hot(labels, num_classes, 1, 0) + return tf.reduce_max(labels, 0) + + +def rgb_to_gray(image): + """Converts a 3 channel RGB image to a 1 channel grayscale image. + + Args: + image: Rank 3 float32 tensor containing 1 image -> [height, width, 3] + with pixel values varying between [0, 1]. + + Returns: + image: A single channel grayscale image -> [image, height, 1]. + """ + return _rgb_to_grayscale(image) + + +def random_self_concat_image( + image, boxes, labels, label_weights, label_confidences=None, + multiclass_scores=None, concat_vertical_probability=0.1, + concat_horizontal_probability=0.1, seed=None, + preprocess_vars_cache=None): + """Randomly concatenates the image with itself. + + This function randomly concatenates the image with itself; the random + variables for vertical and horizontal concatenation are independent. + Afterwards, we adjust the old bounding boxes, and add new bounding boxes + for the new objects. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: rank 1 float32 containing the label weights. + label_confidences: (optional) rank 1 float32 containing the label + confidences. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for + each box for each class. + concat_vertical_probability: (optional) a tf.float32 scalar denoting the + probability of a vertical concatenation. + concat_horizontal_probability: (optional) a tf.float32 scalar denoting the + probability of a horizontal concatenation. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + if label_confidences is not None also returns: + maybe_concat_label_confidences: cropped label weights. + if multiclass_scores is not None also returns: + maybe_concat_multiclass_scores: cropped_multiclass_scores. + """ + + concat_vertical = (tf.random_uniform([], seed=seed) < + concat_vertical_probability) + # Note the seed + 1 so we get some semblance of independence even with + # fixed seeds. + concat_horizontal = (tf.random_uniform([], seed=seed + 1 if seed else None) + < concat_horizontal_probability) + + gen_func = lambda: (concat_vertical, concat_horizontal) + params = _get_or_create_preprocess_rand_vars( + gen_func, preprocessor_cache.PreprocessorCache.SELF_CONCAT_IMAGE, + preprocess_vars_cache) + concat_vertical, concat_horizontal = params + + def _concat_image(image, boxes, labels, label_weights, axis): + """Concats the image to itself on `axis`.""" + output_images = tf.concat([image, image], axis=axis) + + if axis == 0: + # Concat vertically, so need to reduce the y coordinates. + old_scaling = tf.constant([0.5, 1.0, 0.5, 1.0]) + new_translation = tf.constant([0.5, 0.0, 0.5, 0.0]) + elif axis == 1: + old_scaling = tf.constant([1.0, 0.5, 1.0, 0.5]) + new_translation = tf.constant([0.0, 0.5, 0.0, 0.5]) + + old_boxes = old_scaling * boxes + new_boxes = old_boxes + new_translation + all_boxes = tf.concat([old_boxes, new_boxes], axis=0) + + return [output_images, all_boxes, tf.tile(labels, [2]), tf.tile( + label_weights, [2])] + + image, boxes, labels, label_weights = tf.cond( + concat_vertical, + lambda: _concat_image(image, boxes, labels, label_weights, axis=0), + lambda: [image, boxes, labels, label_weights], + strict=True) + + outputs = tf.cond( + concat_horizontal, + lambda: _concat_image(image, boxes, labels, label_weights, axis=1), + lambda: [image, boxes, labels, label_weights], + strict=True) + + if label_confidences is not None: + label_confidences = tf.cond(concat_vertical, + lambda: tf.tile(label_confidences, [2]), + lambda: label_confidences) + outputs.append(tf.cond(concat_horizontal, + lambda: tf.tile(label_confidences, [2]), + lambda: label_confidences)) + + if multiclass_scores is not None: + multiclass_scores = tf.cond(concat_vertical, + lambda: tf.tile(multiclass_scores, [2, 1]), + lambda: multiclass_scores) + outputs.append(tf.cond(concat_horizontal, + lambda: tf.tile(multiclass_scores, [2, 1]), + lambda: multiclass_scores)) + + return outputs + + +def ssd_random_crop(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio_range=((0.5, 2.0),) * 7, + area_range=((0.1, 1.0),) * 7, + overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 7, + random_coef=(0.15,) * 7, + seed=None, + preprocess_vars_cache=None): + """Random crop preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: rank 1 float32 tensor containing the weights. + label_confidences: rank 1 float32 tensor containing the confidences. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If label_weights, multiclass_scores, masks, or keypoints is not None, the + function also returns: + label_weights: rank 1 float32 tensor with shape [num_instances]. + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + + def random_crop_selector(selected_result, index): + """Applies random_crop_image to selected result. + + Args: + selected_result: A tuple containing image, boxes, labels, keypoints (if + not None), and masks (if not None). + index: The index that was randomly selected. + + Returns: A tuple containing image, boxes, labels, keypoints (if not None), + and masks (if not None). + """ + + i = 3 + image, boxes, labels = selected_result[:i] + selected_label_weights = None + selected_label_confidences = None + selected_multiclass_scores = None + selected_masks = None + selected_keypoints = None + if label_weights is not None: + selected_label_weights = selected_result[i] + i += 1 + if label_confidences is not None: + selected_label_confidences = selected_result[i] + i += 1 + if multiclass_scores is not None: + selected_multiclass_scores = selected_result[i] + i += 1 + if masks is not None: + selected_masks = selected_result[i] + i += 1 + if keypoints is not None: + selected_keypoints = selected_result[i] + + return random_crop_image( + image=image, + boxes=boxes, + labels=labels, + label_weights=selected_label_weights, + label_confidences=selected_label_confidences, + multiclass_scores=selected_multiclass_scores, + masks=selected_masks, + keypoints=selected_keypoints, + min_object_covered=min_object_covered[index], + aspect_ratio_range=aspect_ratio_range[index], + area_range=area_range[index], + overlap_thresh=overlap_thresh[index], + clip_boxes=clip_boxes[index], + random_coef=random_coef[index], + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + result = _apply_with_random_selector_tuples( + tuple( + t for t in (image, boxes, labels, label_weights, label_confidences, + multiclass_scores, masks, keypoints) if t is not None), + random_crop_selector, + num_cases=len(min_object_covered), + preprocess_vars_cache=preprocess_vars_cache, + key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID) + return result + + +def ssd_random_crop_pad(image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio_range=((0.5, 2.0),) * 6, + area_range=((0.1, 1.0),) * 6, + overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 6, + random_coef=(0.15,) * 6, + min_padded_size_ratio=((1.0, 1.0),) * 6, + max_padded_size_ratio=((2.0, 2.0),) * 6, + pad_color=(None,) * 6, + seed=None, + preprocess_vars_cache=None): + """Random crop preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + Args: + image: rank 3 float32 tensor containing 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: float32 tensor of shape [num_instances] representing the + confidences for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. + if set as None, it will be set to average color of the randomly + cropped image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: Image shape will be [new_height, new_width, channels]. + boxes: boxes which is the same rank as input boxes. Boxes are in normalized + form. + new_labels: new labels. + new_label_weights: new label weights. + """ + + def random_crop_pad_selector(image_boxes_labels, index): + """Random crop preprocessing helper.""" + i = 3 + image, boxes, labels = image_boxes_labels[:i] + selected_label_weights = None + selected_label_confidences = None + selected_multiclass_scores = None + if label_weights is not None: + selected_label_weights = image_boxes_labels[i] + i += 1 + if label_confidences is not None: + selected_label_confidences = image_boxes_labels[i] + i += 1 + if multiclass_scores is not None: + selected_multiclass_scores = image_boxes_labels[i] + + return random_crop_pad_image( + image, + boxes, + labels, + label_weights=selected_label_weights, + label_confidences=selected_label_confidences, + multiclass_scores=selected_multiclass_scores, + min_object_covered=min_object_covered[index], + aspect_ratio_range=aspect_ratio_range[index], + area_range=area_range[index], + overlap_thresh=overlap_thresh[index], + clip_boxes=clip_boxes[index], + random_coef=random_coef[index], + min_padded_size_ratio=min_padded_size_ratio[index], + max_padded_size_ratio=max_padded_size_ratio[index], + pad_color=pad_color[index], + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + return _apply_with_random_selector_tuples( + tuple(t for t in (image, boxes, labels, label_weights, label_confidences, + multiclass_scores) if t is not None), + random_crop_pad_selector, + num_cases=len(min_object_covered), + preprocess_vars_cache=preprocess_vars_cache, + key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID) + + +def ssd_random_crop_fixed_aspect_ratio( + image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio=1.0, + area_range=((0.1, 1.0),) * 7, + overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 7, + random_coef=(0.15,) * 7, + seed=None, + preprocess_vars_cache=None): + """Random crop preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + The only difference is that the aspect ratio of the crops are fixed. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidences for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio: aspect ratio of the cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If multiclass_scores, masks, or keypoints is not None, the function also + returns: + + multiclass_scores: rank 2 float32 tensor with shape + [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range) + + crop_result = ssd_random_crop( + image, + boxes, + labels, + label_weights=label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + masks=masks, + keypoints=keypoints, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + random_coef=random_coef, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + i = 3 + new_image, new_boxes, new_labels = crop_result[:i] + new_label_weights = None + new_label_confidences = None + new_multiclass_scores = None + new_masks = None + new_keypoints = None + if label_weights is not None: + new_label_weights = crop_result[i] + i += 1 + if label_confidences is not None: + new_label_confidences = crop_result[i] + i += 1 + if multiclass_scores is not None: + new_multiclass_scores = crop_result[i] + i += 1 + if masks is not None: + new_masks = crop_result[i] + i += 1 + if keypoints is not None: + new_keypoints = crop_result[i] + + result = random_crop_to_aspect_ratio( + new_image, + new_boxes, + new_labels, + label_weights=new_label_weights, + label_confidences=new_label_confidences, + multiclass_scores=new_multiclass_scores, + masks=new_masks, + keypoints=new_keypoints, + aspect_ratio=aspect_ratio, + clip_boxes=clip_boxes, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + return result + + +def ssd_random_crop_pad_fixed_aspect_ratio( + image, + boxes, + labels, + label_weights, + label_confidences=None, + multiclass_scores=None, + masks=None, + keypoints=None, + min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + aspect_ratio=1.0, + aspect_ratio_range=((0.5, 2.0),) * 7, + area_range=((0.1, 1.0),) * 7, + overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), + clip_boxes=(True,) * 7, + random_coef=(0.15,) * 7, + min_padded_size_ratio=(1.0, 1.0), + max_padded_size_ratio=(2.0, 2.0), + seed=None, + preprocess_vars_cache=None): + """Random crop and pad preprocessing with default parameters as in SSD paper. + + Liu et al., SSD: Single shot multibox detector. + For further information on random crop preprocessing refer to RandomCrop + function above. + + The only difference is that after the initial crop, images are zero-padded + to a fixed aspect ratio instead of being resized to that aspect ratio. + + Args: + image: rank 3 float32 tensor contains 1 image -> [height, width, channels] + with pixel values varying between [0, 1]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. + Each row is in the form of [ymin, xmin, ymax, xmax]. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + multiclass_scores: (optional) float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x + normalized coordinates. + min_object_covered: the cropped image must cover at least this fraction of + at least one of the input bounding boxes. + aspect_ratio: the final aspect ratio to pad to. + aspect_ratio_range: allowed range for aspect ratio of cropped image. + area_range: allowed range for area ratio between cropped image and the + original image. + overlap_thresh: minimum overlap thresh with new cropped + image to keep the box. + clip_boxes: whether to clip the boxes to the cropped image. + random_coef: a random coefficient that defines the chance of getting the + original image. If random_coef is 0, we will always get the + cropped image, and if it is 1.0, we will always get the + original image. + min_padded_size_ratio: min ratio of padded image height and width to the + input image's height and width. + max_padded_size_ratio: max ratio of padded image height and width to the + input image's height and width. + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + + If multiclass_scores, masks, or keypoints is not None, the function also + returns: + + multiclass_scores: rank 2 with shape [num_instances, num_classes] + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + keypoints: rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2] + """ + crop_result = ssd_random_crop( + image, + boxes, + labels, + label_weights=label_weights, + label_confidences=label_confidences, + multiclass_scores=multiclass_scores, + masks=masks, + keypoints=keypoints, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + overlap_thresh=overlap_thresh, + clip_boxes=clip_boxes, + random_coef=random_coef, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + i = 3 + new_image, new_boxes, new_labels = crop_result[:i] + new_label_weights = None + new_label_confidences = None + new_multiclass_scores = None + new_masks = None + new_keypoints = None + if label_weights is not None: + new_label_weights = crop_result[i] + i += 1 + if label_confidences is not None: + new_label_confidences = crop_result[i] + i += 1 + if multiclass_scores is not None: + new_multiclass_scores = crop_result[i] + i += 1 + if masks is not None: + new_masks = crop_result[i] + i += 1 + if keypoints is not None: + new_keypoints = crop_result[i] + + result = random_pad_to_aspect_ratio( + new_image, + new_boxes, + masks=new_masks, + keypoints=new_keypoints, + aspect_ratio=aspect_ratio, + min_padded_size_ratio=min_padded_size_ratio, + max_padded_size_ratio=max_padded_size_ratio, + seed=seed, + preprocess_vars_cache=preprocess_vars_cache) + + result = list(result) + i = 3 + result.insert(2, new_labels) + if new_label_weights is not None: + result.insert(i, new_label_weights) + i += 1 + if new_label_confidences is not None: + result.insert(i, new_label_confidences) + i += 1 + if multiclass_scores is not None: + result.insert(i, new_multiclass_scores) + result = tuple(result) + + return result + + +def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0): + """Converts multiclass logits to softmax scores after applying temperature. + + Args: + multiclass_scores: float32 tensor of shape + [num_instances, num_classes] representing the score for each box for each + class. + temperature: Scale factor to use prior to applying softmax. Larger + temperatures give more uniform distruibutions after softmax. + + Returns: + multiclass_scores: float32 tensor of shape + [num_instances, num_classes] with scaling and softmax applied. + """ + + # Multiclass scores must be stored as logits. Apply temp and softmax. + multiclass_scores_scaled = tf.multiply( + multiclass_scores, 1.0 / temperature, name='scale_logits') + multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax') + + return multiclass_scores + + +def _get_crop_border(border, size): + border = tf.cast(border, tf.float32) + size = tf.cast(size, tf.float32) + + i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0)) + divisor = tf.pow(2.0, i) + divisor = tf.clip_by_value(divisor, 1, border) + divisor = tf.cast(divisor, tf.int32) + + return tf.cast(border, tf.int32) // divisor + + +def random_square_crop_by_scale(image, boxes, labels, label_weights, + label_confidences=None, masks=None, + keypoints=None, max_border=128, scale_min=0.6, + scale_max=1.3, num_scales=8, seed=None, + preprocess_vars_cache=None): + """Randomly crop a square in proportion to scale and image size. + + Extract a square sized crop from an image whose side length is sampled by + randomly scaling the maximum spatial dimension of the image. If part of + the crop falls outside the image, it is filled with zeros. + The augmentation is borrowed from [1] + [1]: https://arxiv.org/abs/1904.07850 + + Args: + image: rank 3 float32 tensor containing 1 image -> + [height, width, channels]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning their coordinates vary + between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. + Boxes on the crop boundary are clipped to the boundary and boxes + falling outside the crop are ignored. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + masks: (optional) rank 3 float32 tensor with shape + [num_instances, height, width] containing instance masks. The masks + are of the same height, width as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape + [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized + coordinates. + max_border: The maximum size of the border. The border defines distance in + pixels to the image boundaries that will not be considered as a center of + a crop. To make sure that the border does not go over the center of the + image, we chose the border value by computing the minimum k, such that + (max_border / (2**k)) < image_dimension/2. + scale_min: float, the minimum value for scale. + scale_max: float, the maximum value for scale. + num_scales: int, the number of discrete scale values to sample between + [scale_min, scale_max] + seed: random seed. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + label_weights: rank 1 float32 tensor with shape [num_instances]. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + + """ + + img_shape = tf.shape(image) + height, width = img_shape[0], img_shape[1] + scales = tf.linspace(scale_min, scale_max, num_scales) + + scale = _get_or_create_preprocess_rand_vars( + lambda: scales[_random_integer(0, num_scales, seed)], + preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, + preprocess_vars_cache, 'scale') + + image_size = scale * tf.cast(tf.maximum(height, width), tf.float32) + image_size = tf.cast(image_size, tf.int32) + h_border = _get_crop_border(max_border, height) + w_border = _get_crop_border(max_border, width) + + def y_function(): + y = _random_integer(h_border, + tf.cast(height, tf.int32) - h_border + 1, + seed) + return y + + def x_function(): + x = _random_integer(w_border, + tf.cast(width, tf.int32) - w_border + 1, + seed) + return x + + y_center = _get_or_create_preprocess_rand_vars( + y_function, + preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, + preprocess_vars_cache, 'y_center') + + x_center = _get_or_create_preprocess_rand_vars( + x_function, + preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE, + preprocess_vars_cache, 'x_center') + + half_size = tf.cast(image_size / 2, tf.int32) + crop_ymin, crop_ymax = y_center - half_size, y_center + half_size + crop_xmin, crop_xmax = x_center - half_size, x_center + half_size + + ymin = tf.maximum(crop_ymin, 0) + xmin = tf.maximum(crop_xmin, 0) + ymax = tf.minimum(crop_ymax, height - 1) + xmax = tf.minimum(crop_xmax, width - 1) + + cropped_image = image[ymin:ymax, xmin:xmax] + offset_y = tf.maximum(0, ymin - crop_ymin) + offset_x = tf.maximum(0, xmin - crop_xmin) + + oy_i = offset_y + ox_i = offset_x + + output_image = tf.image.pad_to_bounding_box( + cropped_image, offset_height=oy_i, offset_width=ox_i, + target_height=image_size, target_width=image_size) + + if ymin == 0: + # We might be padding the image. + box_ymin = -offset_y + else: + box_ymin = crop_ymin + + if xmin == 0: + # We might be padding the image. + box_xmin = -offset_x + else: + box_xmin = crop_xmin + + box_ymax = box_ymin + image_size + box_xmax = box_xmin + image_size + + image_box = [box_ymin / height, box_xmin / width, + box_ymax / height, box_xmax / width] + boxlist = box_list.BoxList(boxes) + boxlist = box_list_ops.change_coordinate_frame(boxlist, image_box) + boxlist, indices = box_list_ops.prune_completely_outside_window( + boxlist, [0.0, 0.0, 1.0, 1.0]) + boxlist = box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0], + filter_nonoverlapping=False) + + return_values = [output_image, boxlist.get(), + tf.gather(labels, indices), + tf.gather(label_weights, indices)] + + if label_confidences is not None: + return_values.append(tf.gather(label_confidences, indices)) + + if masks is not None: + new_masks = tf.expand_dims(masks, -1) + new_masks = new_masks[:, ymin:ymax, xmin:xmax] + new_masks = tf.image.pad_to_bounding_box( + new_masks, oy_i, ox_i, image_size, image_size) + new_masks = tf.squeeze(new_masks, [-1]) + return_values.append(tf.gather(new_masks, indices)) + + if keypoints is not None: + keypoints = tf.gather(keypoints, indices) + keypoints = keypoint_ops.change_coordinate_frame(keypoints, image_box) + keypoints = keypoint_ops.prune_outside_window(keypoints, + [0.0, 0.0, 1.0, 1.0]) + return_values.append(keypoints) + + return return_values + + +def random_scale_crop_and_pad_to_square( + image, + boxes, + labels, + label_weights, + masks=None, + keypoints=None, + label_confidences=None, + scale_min=0.1, + scale_max=2.0, + output_size=512, + resize_method=tf.image.ResizeMethod.BILINEAR, + seed=None): + """Randomly scale, crop, and then pad an image to fixed square dimensions. + + Randomly scale, crop, and then pad an image to the desired square output + dimensions. Specifically, this method first samples a random_scale factor + from a uniform distribution between scale_min and scale_max, and then resizes + the image such that it's maximum dimension is (output_size * random_scale). + Secondly, a square output_size crop is extracted from the resized image + (note, this will only occur when random_scale > 1.0). Lastly, the cropped + region is padded to the desired square output_size, by filling with zeros. + The augmentation is borrowed from [1] + [1]: https://arxiv.org/abs/1911.09070 + + Args: + image: rank 3 float32 tensor containing 1 image -> + [height, width, channels]. + boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes + are in normalized form meaning their coordinates vary between [0, 1]. Each + row is in the form of [ymin, xmin, ymax, xmax]. Boxes on the crop boundary + are clipped to the boundary and boxes falling outside the crop are + ignored. + labels: rank 1 int32 tensor containing the object classes. + label_weights: float32 tensor of shape [num_instances] representing the + weight for each box. + masks: (optional) rank 3 float32 tensor with shape [num_instances, height, + width] containing instance masks. The masks are of the same height, width + as the input `image`. + keypoints: (optional) rank 3 float32 tensor with shape [num_instances, + num_keypoints, 2]. The keypoints are in y-x normalized coordinates. + label_confidences: (optional) float32 tensor of shape [num_instance] + representing the confidence for each box. + scale_min: float, the minimum value for the random scale factor. + scale_max: float, the maximum value for the random scale factor. + output_size: int, the desired (square) output image size. + resize_method: tf.image.ResizeMethod, resize method to use when scaling the + input images. + seed: random seed. + + Returns: + image: image which is the same rank as input image. + boxes: boxes which is the same rank as input boxes. + Boxes are in normalized form. + labels: new labels. + label_weights: rank 1 float32 tensor with shape [num_instances]. + masks: rank 3 float32 tensor with shape [num_instances, height, width] + containing instance masks. + label_confidences: confidences for retained boxes. + """ + img_shape = tf.shape(image) + input_height, input_width = img_shape[0], img_shape[1] + random_scale = tf.random_uniform([], scale_min, scale_max, seed=seed) + + # Compute the scaled height and width from the random scale. + max_input_dim = tf.cast(tf.maximum(input_height, input_width), tf.float32) + input_ar_y = tf.cast(input_height, tf.float32) / max_input_dim + input_ar_x = tf.cast(input_width, tf.float32) / max_input_dim + scaled_height = tf.cast(random_scale * output_size * input_ar_y, tf.int32) + scaled_width = tf.cast(random_scale * output_size * input_ar_x, tf.int32) + + # Compute the offsets: + offset_y = tf.cast(scaled_height - output_size, tf.float32) + offset_x = tf.cast(scaled_width - output_size, tf.float32) + offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1, seed=seed) + offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1, seed=seed) + offset_y = tf.cast(offset_y, tf.int32) + offset_x = tf.cast(offset_x, tf.int32) + + # Scale, crop, and pad the input image. + scaled_image = tf.image.resize_images( + image, [scaled_height, scaled_width], method=resize_method) + scaled_image = scaled_image[offset_y:offset_y + output_size, + offset_x:offset_x + output_size, :] + output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0, output_size, + output_size) + + # Update the boxes. + new_window = tf.cast( + tf.stack([offset_y, offset_x, + offset_y + output_size, offset_x + output_size]), + dtype=tf.float32) + new_window /= tf.cast( + tf.stack([scaled_height, scaled_width, scaled_height, scaled_width]), + dtype=tf.float32) + boxlist = box_list.BoxList(boxes) + boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) + boxlist, indices = box_list_ops.prune_completely_outside_window( + boxlist, [0.0, 0.0, 1.0, 1.0]) + boxlist = box_list_ops.clip_to_window( + boxlist, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False) + + return_values = [output_image, boxlist.get(), + tf.gather(labels, indices), + tf.gather(label_weights, indices)] + + if masks is not None: + new_masks = tf.expand_dims(masks, -1) + new_masks = tf.image.resize_images( + new_masks, [scaled_height, scaled_width], method=resize_method) + new_masks = new_masks[:, offset_y:offset_y + output_size, + offset_x:offset_x + output_size, :] + new_masks = tf.image.pad_to_bounding_box( + new_masks, 0, 0, output_size, output_size) + new_masks = tf.squeeze(new_masks, [-1]) + return_values.append(tf.gather(new_masks, indices)) + + if keypoints is not None: + keypoints = tf.gather(keypoints, indices) + keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window) + keypoints = keypoint_ops.prune_outside_window( + keypoints, [0.0, 0.0, 1.0, 1.0]) + return_values.append(keypoints) + + if label_confidences is not None: + return_values.append(tf.gather(label_confidences, indices)) + + return return_values + + +def get_default_func_arg_map(include_label_weights=True, + include_label_confidences=False, + include_multiclass_scores=False, + include_instance_masks=False, + include_keypoints=False, + include_keypoint_visibilities=False, + include_dense_pose=False): + """Returns the default mapping from a preprocessor function to its args. + + Args: + include_label_weights: If True, preprocessing functions will modify the + label weights, too. + include_label_confidences: If True, preprocessing functions will modify the + label confidences, too. + include_multiclass_scores: If True, preprocessing functions will modify the + multiclass scores, too. + include_instance_masks: If True, preprocessing functions will modify the + instance masks, too. + include_keypoints: If True, preprocessing functions will modify the + keypoints, too. + include_keypoint_visibilities: If True, preprocessing functions will modify + the keypoint visibilities, too. + include_dense_pose: If True, preprocessing functions will modify the + DensePose labels, too. + + Returns: + A map from preprocessing functions to the arguments they receive. + """ + groundtruth_label_weights = None + if include_label_weights: + groundtruth_label_weights = ( + fields.InputDataFields.groundtruth_weights) + + groundtruth_label_confidences = None + if include_label_confidences: + groundtruth_label_confidences = ( + fields.InputDataFields.groundtruth_confidences) + + multiclass_scores = None + if include_multiclass_scores: + multiclass_scores = (fields.InputDataFields.multiclass_scores) + + groundtruth_instance_masks = None + if include_instance_masks: + groundtruth_instance_masks = ( + fields.InputDataFields.groundtruth_instance_masks) + + groundtruth_keypoints = None + if include_keypoints: + groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints + + groundtruth_keypoint_visibilities = None + if include_keypoint_visibilities: + groundtruth_keypoint_visibilities = ( + fields.InputDataFields.groundtruth_keypoint_visibilities) + + groundtruth_dp_num_points = None + groundtruth_dp_part_ids = None + groundtruth_dp_surface_coords = None + if include_dense_pose: + groundtruth_dp_num_points = ( + fields.InputDataFields.groundtruth_dp_num_points) + groundtruth_dp_part_ids = ( + fields.InputDataFields.groundtruth_dp_part_ids) + groundtruth_dp_surface_coords = ( + fields.InputDataFields.groundtruth_dp_surface_coords) + + prep_func_arg_map = { + normalize_image: (fields.InputDataFields.image,), + random_horizontal_flip: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + groundtruth_keypoint_visibilities, + groundtruth_dp_part_ids, + groundtruth_dp_surface_coords, + ), + random_vertical_flip: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_rotation90: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_pixel_value_scale: (fields.InputDataFields.image,), + random_image_scale: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + random_rgb_to_gray: (fields.InputDataFields.image,), + random_adjust_brightness: (fields.InputDataFields.image,), + random_adjust_contrast: (fields.InputDataFields.image,), + random_adjust_hue: (fields.InputDataFields.image,), + random_adjust_saturation: (fields.InputDataFields.image,), + random_distort_color: (fields.InputDataFields.image,), + random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,), + random_crop_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_label_confidences, + multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, + groundtruth_keypoint_visibilities, groundtruth_dp_num_points, + groundtruth_dp_part_ids, groundtruth_dp_surface_coords), + random_pad_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, + groundtruth_keypoints, groundtruth_dp_surface_coords), + random_absolute_pad_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, + groundtruth_keypoints, groundtruth_dp_surface_coords), + random_crop_pad_image: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores), + random_crop_to_aspect_ratio: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_pad_to_aspect_ratio: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + random_black_patches: (fields.InputDataFields.image,), + random_jpeg_quality: (fields.InputDataFields.image,), + random_downscale_to_target_pixels: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + random_patch_gaussian: (fields.InputDataFields.image,), + autoaugment_image: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + ), + retain_boxes_above_threshold: ( + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + drop_label_probabilistically: ( + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + remap_labels: (fields.InputDataFields.groundtruth_classes,), + image_to_float: (fields.InputDataFields.image,), + random_resize_method: (fields.InputDataFields.image,), + resize_to_range: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + resize_to_min_dimension: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + scale_boxes_to_pixel_coordinates: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + groundtruth_keypoints, + ), + resize_image: ( + fields.InputDataFields.image, + groundtruth_instance_masks, + ), + subtract_channel_mean: (fields.InputDataFields.image,), + one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,), + rgb_to_gray: (fields.InputDataFields.image,), + random_self_concat_image: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_label_confidences, + multiclass_scores), + ssd_random_crop: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores, + groundtruth_instance_masks, groundtruth_keypoints), + ssd_random_crop_pad: (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, multiclass_scores), + ssd_random_crop_fixed_aspect_ratio: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_label_confidences, + multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints + ), + ssd_random_crop_pad_fixed_aspect_ratio: ( + fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, + groundtruth_label_confidences, + multiclass_scores, + groundtruth_instance_masks, + groundtruth_keypoints, + ), + convert_class_logits_to_softmax: (multiclass_scores,), + random_square_crop_by_scale: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_label_confidences, + groundtruth_instance_masks, groundtruth_keypoints), + random_scale_crop_and_pad_to_square: + (fields.InputDataFields.image, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + groundtruth_label_weights, groundtruth_instance_masks, + groundtruth_keypoints, groundtruth_label_confidences), + } + + return prep_func_arg_map + + +def preprocess(tensor_dict, + preprocess_options, + func_arg_map=None, + preprocess_vars_cache=None): + """Preprocess images and bounding boxes. + + Various types of preprocessing (to be implemented) based on the + preprocess_options dictionary e.g. "crop image" (affects image and possibly + boxes), "white balance image" (affects only image), etc. If self._options + is None, no preprocessing is done. + + Args: + tensor_dict: dictionary that contains images, boxes, and can contain other + things as well. + images-> rank 4 float32 tensor contains + 1 image -> [1, height, width, 3]. + with pixel values varying between [0, 1] + boxes-> rank 2 float32 tensor containing + the bounding boxes -> [N, 4]. + Boxes are in normalized form meaning + their coordinates vary between [0, 1]. + Each row is in the form + of [ymin, xmin, ymax, xmax]. + preprocess_options: It is a list of tuples, where each tuple contains a + function and a dictionary that contains arguments and + their values. + func_arg_map: mapping from preprocessing functions to arguments that they + expect to receive and return. + preprocess_vars_cache: PreprocessorCache object that records previously + performed augmentations. Updated in-place. If this + function is called multiple times with the same + non-null cache, it will perform deterministically. + + Returns: + tensor_dict: which contains the preprocessed images, bounding boxes, etc. + + Raises: + ValueError: (a) If the functions passed to Preprocess + are not in func_arg_map. + (b) If the arguments that a function needs + do not exist in tensor_dict. + (c) If image in tensor_dict is not rank 4 + """ + if func_arg_map is None: + func_arg_map = get_default_func_arg_map() + # changes the images to image (rank 4 to rank 3) since the functions + # receive rank 3 tensor for image + if fields.InputDataFields.image in tensor_dict: + images = tensor_dict[fields.InputDataFields.image] + if len(images.get_shape()) != 4: + raise ValueError('images in tensor_dict should be rank 4') + image = tf.squeeze(images, axis=0) + tensor_dict[fields.InputDataFields.image] = image + + # Preprocess inputs based on preprocess_options + for option in preprocess_options: + func, params = option + if func not in func_arg_map: + raise ValueError('The function %s does not exist in func_arg_map' % + (func.__name__)) + arg_names = func_arg_map[func] + for a in arg_names: + if a is not None and a not in tensor_dict: + raise ValueError('The function %s requires argument %s' % + (func.__name__, a)) + + def get_arg(key): + return tensor_dict[key] if key is not None else None + + args = [get_arg(a) for a in arg_names] + if preprocess_vars_cache is not None: + if six.PY2: + # pylint: disable=deprecated-method + arg_spec = inspect.getargspec(func) + # pylint: enable=deprecated-method + else: + arg_spec = inspect.getfullargspec(func) + if 'preprocess_vars_cache' in arg_spec.args: + params['preprocess_vars_cache'] = preprocess_vars_cache + + results = func(*args, **params) + if not isinstance(results, (list, tuple)): + results = (results,) + # Removes None args since the return values will not contain those. + arg_names = [arg_name for arg_name in arg_names if arg_name is not None] + for res, arg_name in zip(results, arg_names): + tensor_dict[arg_name] = res + + # changes the image to images (rank 3 to rank 4) to be compatible to what + # we received in the first place + if fields.InputDataFields.image in tensor_dict: + image = tensor_dict[fields.InputDataFields.image] + images = tf.expand_dims(image, 0) + tensor_dict[fields.InputDataFields.image] = images + + return tensor_dict diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5d824eb934806a22ddfb62fed04c9460cf5c42b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_cache.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..948710564cfba22f294b98f0078ee632aa15854c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_cache.py @@ -0,0 +1,109 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Records previous preprocessing operations and allows them to be repeated. + +Used with object_detection.core.preprocessor. Passing a PreprocessorCache +into individual data augmentation functions or the general preprocess() function +will store all randomly generated variables in the PreprocessorCache. When +a preprocessor function is called multiple times with the same +PreprocessorCache object, that function will perform the same augmentation +on all calls. +""" + +import collections + + +class PreprocessorCache(object): + """Dictionary wrapper storing random variables generated during preprocessing. + """ + + # Constant keys representing different preprocessing functions + ROTATION90 = 'rotation90' + HORIZONTAL_FLIP = 'horizontal_flip' + VERTICAL_FLIP = 'vertical_flip' + PIXEL_VALUE_SCALE = 'pixel_value_scale' + IMAGE_SCALE = 'image_scale' + RGB_TO_GRAY = 'rgb_to_gray' + ADJUST_BRIGHTNESS = 'adjust_brightness' + ADJUST_CONTRAST = 'adjust_contrast' + ADJUST_HUE = 'adjust_hue' + ADJUST_SATURATION = 'adjust_saturation' + DISTORT_COLOR = 'distort_color' + STRICT_CROP_IMAGE = 'strict_crop_image' + CROP_IMAGE = 'crop_image' + PAD_IMAGE = 'pad_image' + CROP_TO_ASPECT_RATIO = 'crop_to_aspect_ratio' + RESIZE_METHOD = 'resize_method' + PAD_TO_ASPECT_RATIO = 'pad_to_aspect_ratio' + BLACK_PATCHES = 'black_patches' + ADD_BLACK_PATCH = 'add_black_patch' + SELECTOR = 'selector' + SELECTOR_TUPLES = 'selector_tuples' + SELF_CONCAT_IMAGE = 'self_concat_image' + SSD_CROP_SELECTOR_ID = 'ssd_crop_selector_id' + SSD_CROP_PAD_SELECTOR_ID = 'ssd_crop_pad_selector_id' + JPEG_QUALITY = 'jpeg_quality' + DOWNSCALE_TO_TARGET_PIXELS = 'downscale_to_target_pixels' + PATCH_GAUSSIAN = 'patch_gaussian' + SQUARE_CROP_BY_SCALE = 'square_crop_scale' + + # 27 permitted function ids + _VALID_FNS = [ROTATION90, HORIZONTAL_FLIP, VERTICAL_FLIP, PIXEL_VALUE_SCALE, + IMAGE_SCALE, RGB_TO_GRAY, ADJUST_BRIGHTNESS, ADJUST_CONTRAST, + ADJUST_HUE, ADJUST_SATURATION, DISTORT_COLOR, STRICT_CROP_IMAGE, + CROP_IMAGE, PAD_IMAGE, CROP_TO_ASPECT_RATIO, RESIZE_METHOD, + PAD_TO_ASPECT_RATIO, BLACK_PATCHES, ADD_BLACK_PATCH, SELECTOR, + SELECTOR_TUPLES, SELF_CONCAT_IMAGE, SSD_CROP_SELECTOR_ID, + SSD_CROP_PAD_SELECTOR_ID, JPEG_QUALITY, + DOWNSCALE_TO_TARGET_PIXELS, PATCH_GAUSSIAN, + SQUARE_CROP_BY_SCALE] + + def __init__(self): + self._history = collections.defaultdict(dict) + + def clear(self): + """Resets cache.""" + self._history = collections.defaultdict(dict) + + def get(self, function_id, key): + """Gets stored value given a function id and key. + + Args: + function_id: identifier for the preprocessing function used. + key: identifier for the variable stored. + Returns: + value: the corresponding value, expected to be a tensor or + nested structure of tensors. + Raises: + ValueError: if function_id is not one of the 23 valid function ids. + """ + if function_id not in self._VALID_FNS: + raise ValueError('Function id not recognized: %s.' % str(function_id)) + return self._history[function_id].get(key) + + def update(self, function_id, key, value): + """Adds a value to the dictionary. + + Args: + function_id: identifier for the preprocessing function used. + key: identifier for the variable stored. + value: the value to store, expected to be a tensor or nested structure + of tensors. + Raises: + ValueError: if function_id is not one of the 23 valid function ids. + """ + if function_id not in self._VALID_FNS: + raise ValueError('Function id not recognized: %s.' % str(function_id)) + self._history[function_id][key] = value diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_cache.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_cache.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1f0acfe12c42a6afff05987f823a5836163584d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_cache.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0f1e07ab714898c1dc690a880ba1d194b2e4d5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/preprocessor_test.py @@ -0,0 +1,3962 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.preprocessor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized +import numpy as np +import six +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import preprocessor +from object_detection.core import preprocessor_cache +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock # pylint: disable=g-import-not-at-top + + +class PreprocessorTest(test_case.TestCase, parameterized.TestCase): + + def createColorfulTestImage(self): + ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8)) + ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8)) + ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8)) + imr = tf.concat([ch255, ch0, ch0], 3) + img = tf.concat([ch255, ch255, ch0], 3) + imb = tf.concat([ch255, ch0, ch255], 3) + imw = tf.concat([ch128, ch128, ch128], 3) + imu = tf.concat([imr, img], 2) + imd = tf.concat([imb, imw], 2) + im = tf.concat([imu, imd], 1) + return im + + def createTestImages(self): + images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128], + [0, 128, 128, 128], [192, 192, 128, 128]]], + dtype=tf.uint8) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128], + [0, 128, 192, 192], [192, 192, 128, 192]]], + dtype=tf.uint8) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192], + [0, 128, 128, 0], [192, 192, 192, 128]]], + dtype=tf.uint8) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def createEmptyTestBoxes(self): + boxes = tf.constant([[]], dtype=tf.float32) + return boxes + + def createTestBoxes(self): + boxes = tf.constant( + [[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) + return boxes + + def createTestGroundtruthWeights(self): + return tf.constant([1.0, 0.5], dtype=tf.float32) + + def createTestMasks(self): + mask = np.array([ + [[255.0, 0.0, 0.0], + [255.0, 0.0, 0.0], + [255.0, 0.0, 0.0]], + [[255.0, 255.0, 0.0], + [255.0, 255.0, 0.0], + [255.0, 255.0, 0.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def createTestKeypoints(self): + keypoints_np = np.array([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], + ]) + keypoints = tf.constant(keypoints_np, dtype=tf.float32) + keypoint_visibilities = tf.constant( + [ + [True, True, False], + [False, True, True] + ]) + return keypoints, keypoint_visibilities + + def createTestKeypointsInsideCrop(self): + keypoints = np.array([ + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], + [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], + ]) + return tf.constant(keypoints, dtype=tf.float32) + + def createTestKeypointsOutsideCrop(self): + keypoints = np.array([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], + ]) + return tf.constant(keypoints, dtype=tf.float32) + + def createTestDensePose(self): + dp_num_points = tf.constant([1, 3], dtype=tf.int32) + dp_part_ids = tf.constant( + [[4, 0, 0], + [1, 0, 5]], dtype=tf.int32) + dp_surface_coords = tf.constant( + [ + # Instance 0. + [[0.1, 0.2, 0.6, 0.7], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + # Instance 1. + [[0.8, 0.9, 0.2, 0.4], + [0.1, 0.3, 0.2, 0.8], + [0.6, 1.0, 0.3, 0.4]], + ], dtype=tf.float32) + return dp_num_points, dp_part_ids, dp_surface_coords + + def createKeypointFlipPermutation(self): + return [0, 2, 1] + + def createKeypointRotPermutation(self): + return [0, 2, 1] + + def createTestLabels(self): + labels = tf.constant([1, 2], dtype=tf.int32) + return labels + + def createTestLabelsLong(self): + labels = tf.constant([1, 2, 4], dtype=tf.int32) + return labels + + def createTestBoxesOutOfImage(self): + boxes = tf.constant( + [[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32) + return boxes + + def createTestMultiClassScores(self): + return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32) + + def expectedImagesAfterNormalization(self): + images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0], + [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0], + [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], + [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedMaxImageAfterColorScale(self): + images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], + [-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], + [-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6], + [-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedMinImageAfterColorScale(self): + images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1], + [-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1], + [-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4], + [-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedImagesAfterLeftRightFlip(self): + images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1], + [0, 0, 0, -1], [0, 0, 0.5, 0.5]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1], + [0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], + [-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedImagesAfterUpDownFlip(self): + images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], + [-1, -1, 0, 0], [0, 0, 0, 0]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], + [-1, -1, 0, 0], [-1, -1, 0, 0]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], + [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedImagesAfterRot90(self): + images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0], + [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], + dtype=tf.float32) + images_r = tf.expand_dims(images_r, 3) + images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], + [-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]], + dtype=tf.float32) + images_g = tf.expand_dims(images_g, 3) + images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], + [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], + dtype=tf.float32) + images_b = tf.expand_dims(images_b, 3) + images = tf.concat([images_r, images_g, images_b], 3) + return images + + def expectedBoxesAfterLeftRightFlip(self): + boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]], + dtype=tf.float32) + return boxes + + def expectedBoxesAfterUpDownFlip(self): + boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], + dtype=tf.float32) + return boxes + + def expectedBoxesAfterRot90(self): + boxes = tf.constant( + [[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32) + return boxes + + def expectedMasksAfterLeftRightFlip(self): + mask = np.array([ + [[0.0, 0.0, 255.0], + [0.0, 0.0, 255.0], + [0.0, 0.0, 255.0]], + [[0.0, 255.0, 255.0], + [0.0, 255.0, 255.0], + [0.0, 255.0, 255.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedMasksAfterUpDownFlip(self): + mask = np.array([ + [[255.0, 0.0, 0.0], + [255.0, 0.0, 0.0], + [255.0, 0.0, 0.0]], + [[255.0, 255.0, 0.0], + [255.0, 255.0, 0.0], + [255.0, 255.0, 0.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedMasksAfterRot90(self): + mask = np.array([ + [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [255.0, 255.0, 255.0]], + [[0.0, 0.0, 0.0], + [255.0, 255.0, 255.0], + [255.0, 255.0, 255.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedLabelScoresAfterThresholding(self): + return tf.constant([1.0], dtype=tf.float32) + + def expectedBoxesAfterThresholding(self): + return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32) + + def expectedLabelsAfterThresholding(self): + return tf.constant([1], dtype=tf.float32) + + def expectedMultiClassScoresAfterThresholding(self): + return tf.constant([[1.0, 0.0]], dtype=tf.float32) + + def expectedMasksAfterThresholding(self): + mask = np.array([ + [[255.0, 0.0, 0.0], + [255.0, 0.0, 0.0], + [255.0, 0.0, 0.0]]]) + return tf.constant(mask, dtype=tf.float32) + + def expectedKeypointsAfterThresholding(self): + keypoints = np.array([ + [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]] + ]) + return tf.constant(keypoints, dtype=tf.float32) + + def expectedLabelScoresAfterThresholdingWithMissingScore(self): + return tf.constant([np.nan], dtype=tf.float32) + + def expectedBoxesAfterThresholdingWithMissingScore(self): + return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32) + + def expectedLabelsAfterThresholdingWithMissingScore(self): + return tf.constant([2], dtype=tf.float32) + + def expectedLabelScoresAfterDropping(self): + return tf.constant([0.5], dtype=tf.float32) + + def expectedBoxesAfterDropping(self): + return tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) + + def expectedLabelsAfterDropping(self): + return tf.constant([2], dtype=tf.float32) + + def expectedMultiClassScoresAfterDropping(self): + return tf.constant([[0.5, 0.5]], dtype=tf.float32) + + def expectedMasksAfterDropping(self): + masks = np.array([[[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], + [255.0, 255.0, 0.0]]]) + return tf.constant(masks, dtype=tf.float32) + + def expectedKeypointsAfterDropping(self): + keypoints = np.array([[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) + return tf.constant(keypoints, dtype=tf.float32) + + def expectedLabelsAfterRemapping(self): + return tf.constant([3, 3, 4], dtype=tf.float32) + + def testRgbToGrayscale(self): + def graph_fn(): + images = self.createTestImages() + grayscale_images = preprocessor._rgb_to_grayscale(images) + expected_images = tf.image.rgb_to_grayscale(images) + return grayscale_images, expected_images + (grayscale_images, expected_images) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(expected_images, grayscale_images) + + def testNormalizeImage(self): + def graph_fn(): + preprocess_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 256, + 'target_minval': -1, + 'target_maxval': 1 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + images_expected = self.expectedImagesAfterNormalization() + return images, images_expected + images_, images_expected_ = self.execute_cpu(graph_fn, []) + images_shape_ = images_.shape + images_expected_shape_ = images_expected_.shape + expected_shape = [1, 4, 4, 3] + self.assertAllEqual(images_expected_shape_, images_shape_) + self.assertAllEqual(images_shape_, expected_shape) + self.assertAllClose(images_, images_expected_) + + def testRetainBoxesAboveThreshold(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + (retained_boxes, retained_labels, + retained_weights) = preprocessor.retain_boxes_above_threshold( + boxes, labels, weights, threshold=0.6) + return [ + retained_boxes, retained_labels, retained_weights, + self.expectedBoxesAfterThresholding(), + self.expectedLabelsAfterThresholding(), + self.expectedLabelScoresAfterThresholding() + ] + + (retained_boxes_, retained_labels_, retained_weights_, + expected_retained_boxes_, expected_retained_labels_, + expected_retained_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose( + retained_boxes_, expected_retained_boxes_) + self.assertAllClose( + retained_labels_, expected_retained_labels_) + self.assertAllClose( + retained_weights_, expected_retained_weights_) + + def testRetainBoxesAboveThresholdWithMultiClassScores(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + (_, _, _, + retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold( + boxes, + labels, + weights, + multiclass_scores=multiclass_scores, + threshold=0.6) + return [ + retained_multiclass_scores, + self.expectedMultiClassScoresAfterThresholding() + ] + + (retained_multiclass_scores_, + expected_retained_multiclass_scores_) = self.execute(graph_fn, []) + self.assertAllClose(retained_multiclass_scores_, + expected_retained_multiclass_scores_) + + def testRetainBoxesAboveThresholdWithMasks(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = self.createTestMasks() + _, _, _, retained_masks = preprocessor.retain_boxes_above_threshold( + boxes, labels, weights, masks, threshold=0.6) + return [ + retained_masks, self.expectedMasksAfterThresholding()] + retained_masks_, expected_retained_masks_ = self.execute_cpu(graph_fn, []) + + self.assertAllClose( + retained_masks_, expected_retained_masks_) + + def testRetainBoxesAboveThresholdWithKeypoints(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + (_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold( + boxes, labels, weights, keypoints=keypoints, threshold=0.6) + return [retained_keypoints, self.expectedKeypointsAfterThresholding()] + + (retained_keypoints_, + expected_retained_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_keypoints_, expected_retained_keypoints_) + + def testDropLabelProbabilistically(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + (retained_boxes, retained_labels, + retained_weights) = preprocessor.drop_label_probabilistically( + boxes, labels, weights, dropped_label=1, drop_probability=1.0) + return [ + retained_boxes, retained_labels, retained_weights, + self.expectedBoxesAfterDropping(), + self.expectedLabelsAfterDropping(), + self.expectedLabelScoresAfterDropping() + ] + + (retained_boxes_, retained_labels_, retained_weights_, + expected_retained_boxes_, expected_retained_labels_, + expected_retained_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_boxes_, expected_retained_boxes_) + self.assertAllClose(retained_labels_, expected_retained_labels_) + self.assertAllClose(retained_weights_, expected_retained_weights_) + + def testDropLabelProbabilisticallyWithMultiClassScores(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + (_, _, _, + retained_multiclass_scores) = preprocessor.drop_label_probabilistically( + boxes, + labels, + weights, + multiclass_scores=multiclass_scores, + dropped_label=1, + drop_probability=1.0) + return [retained_multiclass_scores, + self.expectedMultiClassScoresAfterDropping()] + (retained_multiclass_scores_, + expected_retained_multiclass_scores_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_multiclass_scores_, + expected_retained_multiclass_scores_) + + def testDropLabelProbabilisticallyWithMasks(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = self.createTestMasks() + (_, _, _, retained_masks) = preprocessor.drop_label_probabilistically( + boxes, + labels, + weights, + masks=masks, + dropped_label=1, + drop_probability=1.0) + return [retained_masks, self.expectedMasksAfterDropping()] + (retained_masks_, expected_retained_masks_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_masks_, expected_retained_masks_) + + def testDropLabelProbabilisticallyWithKeypoints(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + (_, _, _, retained_keypoints) = preprocessor.drop_label_probabilistically( + boxes, + labels, + weights, + keypoints=keypoints, + dropped_label=1, + drop_probability=1.0) + return [retained_keypoints, self.expectedKeypointsAfterDropping()] + + (retained_keypoints_, + expected_retained_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_keypoints_, expected_retained_keypoints_) + + def testRemapLabels(self): + def graph_fn(): + labels = self.createTestLabelsLong() + remapped_labels = preprocessor.remap_labels(labels, [1, 2], 3) + return [remapped_labels, self.expectedLabelsAfterRemapping()] + + (remapped_labels_, expected_remapped_labels_) = self.execute_cpu(graph_fn, + []) + self.assertAllClose(remapped_labels_, expected_remapped_labels_) + + def testFlipBoxesLeftRight(self): + def graph_fn(): + boxes = self.createTestBoxes() + flipped_boxes = preprocessor._flip_boxes_left_right(boxes) + expected_boxes = self.expectedBoxesAfterLeftRightFlip() + return flipped_boxes, expected_boxes + flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) + + def testFlipBoxesUpDown(self): + def graph_fn(): + boxes = self.createTestBoxes() + flipped_boxes = preprocessor._flip_boxes_up_down(boxes) + expected_boxes = self.expectedBoxesAfterUpDownFlip() + return flipped_boxes, expected_boxes + flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) + + def testRot90Boxes(self): + def graph_fn(): + boxes = self.createTestBoxes() + rotated_boxes = preprocessor._rot90_boxes(boxes) + expected_boxes = self.expectedBoxesAfterRot90() + return rotated_boxes, expected_boxes + rotated_boxes, expected_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten()) + + def testFlipMasksLeftRight(self): + def graph_fn(): + test_mask = self.createTestMasks() + flipped_mask = preprocessor._flip_masks_left_right(test_mask) + expected_mask = self.expectedMasksAfterLeftRightFlip() + return flipped_mask, expected_mask + flipped_mask, expected_mask = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) + + def testFlipMasksUpDown(self): + def graph_fn(): + test_mask = self.createTestMasks() + flipped_mask = preprocessor._flip_masks_up_down(test_mask) + expected_mask = self.expectedMasksAfterUpDownFlip() + return flipped_mask, expected_mask + flipped_mask, expected_mask = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) + + def testRot90Masks(self): + def graph_fn(): + test_mask = self.createTestMasks() + rotated_mask = preprocessor._rot90_masks(test_mask) + expected_mask = self.expectedMasksAfterRot90() + return [rotated_mask, expected_mask] + rotated_mask, expected_mask = self.execute(graph_fn, []) + self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten()) + + def _testPreprocessorCache(self, + preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False): + if self.is_tf2(): return + def graph_fn(): + cache = preprocessor_cache.PreprocessorCache() + images = self.createTestImages() + boxes = self.createTestBoxes() + weights = self.createTestGroundtruthWeights() + classes = self.createTestLabels() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=test_masks, include_keypoints=test_keypoints) + out = [] + for _ in range(2): + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_weights: weights + } + if test_boxes: + tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes + tensor_dict[fields.InputDataFields.groundtruth_classes] = classes + if test_masks: + tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks + if test_keypoints: + tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints + out.append( + preprocessor.preprocess(tensor_dict, preprocess_options, + preprocessor_arg_map, cache)) + return out + + out1, out2 = self.execute_cpu_tf1(graph_fn, []) + for (_, v1), (_, v2) in zip(out1.items(), out2.items()): + self.assertAllClose(v1, v2) + + def testRandomHorizontalFlip(self): + def graph_fn(): + preprocess_options = [(preprocessor.random_horizontal_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createTestBoxes() + tensor_dict = {fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes} + images_expected1 = self.expectedImagesAfterLeftRightFlip() + boxes_expected1 = self.expectedBoxesAfterLeftRightFlip() + images_expected2 = images + boxes_expected2 = boxes + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) + boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) + boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) + boxes_diff_expected = tf.zeros_like(boxes_diff) + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes_diff, + boxes_diff_expected] + (images_diff_, images_diff_expected_, boxes_diff_, + boxes_diff_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_diff_, boxes_diff_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomHorizontalFlipWithEmptyBoxes(self): + def graph_fn(): + preprocess_options = [(preprocessor.random_horizontal_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createEmptyTestBoxes() + tensor_dict = {fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes} + images_expected1 = self.expectedImagesAfterLeftRightFlip() + boxes_expected = self.createEmptyTestBoxes() + images_expected2 = images + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes, boxes_expected] + (images_diff_, images_diff_expected_, boxes_, + boxes_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_, boxes_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomHorizontalFlipWithCache(self): + keypoint_flip_permutation = self.createKeypointFlipPermutation() + preprocess_options = [ + (preprocessor.random_horizontal_flip, + {'keypoint_flip_permutation': keypoint_flip_permutation})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + + def testRandomVerticalFlip(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_vertical_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterUpDownFlip() + boxes_expected1 = self.expectedBoxesAfterUpDownFlip() + images_expected2 = images + boxes_expected2 = boxes + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) + boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) + boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) + boxes_diff_expected = tf.zeros_like(boxes_diff) + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [ + images_diff, images_diff_expected, boxes_diff, boxes_diff_expected + ] + + (images_diff_, images_diff_expected_, boxes_diff_, + boxes_diff_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_diff_, boxes_diff_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomVerticalFlipWithEmptyBoxes(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_vertical_flip, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createEmptyTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterUpDownFlip() + boxes_expected = self.createEmptyTestBoxes() + images_expected2 = images + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes, boxes_expected] + + (images_diff_, images_diff_expected_, boxes_, + boxes_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_, boxes_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomVerticalFlipWithCache(self): + keypoint_flip_permutation = self.createKeypointFlipPermutation() + preprocess_options = [ + (preprocessor.random_vertical_flip, + {'keypoint_flip_permutation': keypoint_flip_permutation})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomVerticalFlipWithMaskAndKeypoints(self): + preprocess_options = [(preprocessor.random_vertical_flip, {})] + image_height = 3 + image_width = 3 + images = tf.random_uniform([1, image_height, image_width, 3]) + boxes = self.createTestBoxes() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + keypoint_flip_permutation = self.createKeypointFlipPermutation() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_instance_masks: masks, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + preprocess_options = [ + (preprocessor.random_vertical_flip, + {'keypoint_flip_permutation': keypoint_flip_permutation})] + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, include_keypoints=True) + tensor_dict = preprocessor.preprocess( + tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] + self.assertIsNotNone(boxes) + self.assertIsNotNone(masks) + self.assertIsNotNone(keypoints) + + def testRandomRotation90(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_rotation90, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterRot90() + boxes_expected1 = self.expectedBoxesAfterRot90() + images_expected2 = images + boxes_expected2 = boxes + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) + boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) + boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) + boxes_diff_expected = tf.zeros_like(boxes_diff) + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [ + images_diff, images_diff_expected, boxes_diff, boxes_diff_expected + ] + + (images_diff_, images_diff_expected_, boxes_diff_, + boxes_diff_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_diff_, boxes_diff_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomRotation90WithEmptyBoxes(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_rotation90, {})] + images = self.expectedImagesAfterNormalization() + boxes = self.createEmptyTestBoxes() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes + } + images_expected1 = self.expectedImagesAfterRot90() + boxes_expected = self.createEmptyTestBoxes() + images_expected2 = images + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images = tensor_dict[fields.InputDataFields.image] + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + + images_diff1 = tf.squared_difference(images, images_expected1) + images_diff2 = tf.squared_difference(images, images_expected2) + images_diff = tf.multiply(images_diff1, images_diff2) + images_diff_expected = tf.zeros_like(images_diff) + return [images_diff, images_diff_expected, boxes, boxes_expected] + + (images_diff_, images_diff_expected_, boxes_, + boxes_expected_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(boxes_, boxes_expected_) + self.assertAllClose(images_diff_, images_diff_expected_) + + def testRandomRotation90WithCache(self): + preprocess_options = [(preprocessor.random_rotation90, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomRotation90WithMaskAndKeypoints(self): + image_height = 3 + image_width = 3 + images = tf.random_uniform([1, image_height, image_width, 3]) + boxes = self.createTestBoxes() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + keypoint_rot_permutation = self.createKeypointRotPermutation() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_instance_masks: masks, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + preprocess_options = [(preprocessor.random_rotation90, { + 'keypoint_rot_permutation': keypoint_rot_permutation + })] + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, include_keypoints=True) + tensor_dict = preprocessor.preprocess( + tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) + boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] + keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] + self.assertIsNotNone(boxes) + self.assertIsNotNone(masks) + self.assertIsNotNone(keypoints) + + def testRandomPixelValueScale(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_min = tf.cast(images, dtype=tf.float32) * 0.9 / 255.0 + images_max = tf.cast(images, dtype=tf.float32) * 1.1 / 255.0 + images = tensor_dict[fields.InputDataFields.image] + values_greater = tf.greater_equal(images, images_min) + values_less = tf.less_equal(images, images_max) + values_true = tf.fill([1, 4, 4, 3], True) + return [values_greater, values_less, values_true] + + (values_greater_, values_less_, + values_true_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(values_greater_, values_true_) + self.assertAllClose(values_less_, values_true_) + + def testRandomPixelValueScaleWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_pixel_value_scale, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testRandomImageScale(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_image_scale, {})] + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images_scaled = tensor_dict[fields.InputDataFields.image] + images_original_shape = tf.shape(images_original) + images_scaled_shape = tf.shape(images_scaled) + return [images_original_shape, images_scaled_shape] + + (images_original_shape_, + images_scaled_shape_) = self.execute_cpu(graph_fn, []) + self.assertLessEqual(images_original_shape_[1] * 0.5, + images_scaled_shape_[1]) + self.assertGreaterEqual(images_original_shape_[1] * 2.0, + images_scaled_shape_[1]) + self.assertLessEqual(images_original_shape_[2] * 0.5, + images_scaled_shape_[2]) + self.assertGreaterEqual(images_original_shape_[2] * 2.0, + images_scaled_shape_[2]) + + def testRandomImageScaleWithCache(self): + preprocess_options = [(preprocessor.random_image_scale, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomRGBtoGray(self): + + def graph_fn(): + preprocess_options = [(preprocessor.random_rgb_to_gray, {})] + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) + images_gray = tensor_dict[fields.InputDataFields.image] + images_gray_r, images_gray_g, images_gray_b = tf.split( + value=images_gray, num_or_size_splits=3, axis=3) + images_r, images_g, images_b = tf.split( + value=images_original, num_or_size_splits=3, axis=3) + images_r_diff1 = tf.squared_difference( + tf.cast(images_r, dtype=tf.float32), + tf.cast(images_gray_r, dtype=tf.float32)) + images_r_diff2 = tf.squared_difference( + tf.cast(images_gray_r, dtype=tf.float32), + tf.cast(images_gray_g, dtype=tf.float32)) + images_r_diff = tf.multiply(images_r_diff1, images_r_diff2) + images_g_diff1 = tf.squared_difference( + tf.cast(images_g, dtype=tf.float32), + tf.cast(images_gray_g, dtype=tf.float32)) + images_g_diff2 = tf.squared_difference( + tf.cast(images_gray_g, dtype=tf.float32), + tf.cast(images_gray_b, dtype=tf.float32)) + images_g_diff = tf.multiply(images_g_diff1, images_g_diff2) + images_b_diff1 = tf.squared_difference( + tf.cast(images_b, dtype=tf.float32), + tf.cast(images_gray_b, dtype=tf.float32)) + images_b_diff2 = tf.squared_difference( + tf.cast(images_gray_b, dtype=tf.float32), + tf.cast(images_gray_r, dtype=tf.float32)) + images_b_diff = tf.multiply(images_b_diff1, images_b_diff2) + image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1]) + return [images_r_diff, images_g_diff, images_b_diff, image_zero1] + + (images_r_diff_, images_g_diff_, images_b_diff_, + image_zero1_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(images_r_diff_, image_zero1_) + self.assertAllClose(images_g_diff_, image_zero1_) + self.assertAllClose(images_b_diff_, image_zero1_) + + def testRandomRGBtoGrayWithCache(self): + preprocess_options = [( + preprocessor.random_rgb_to_gray, {'probability': 0.5})] + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomAdjustBrightness(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_adjust_brightness, {})) + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_bright = tensor_dict[fields.InputDataFields.image] + image_original_shape = tf.shape(images_original) + image_bright_shape = tf.shape(images_bright) + return [image_original_shape, image_bright_shape] + + (image_original_shape_, + image_bright_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image_original_shape_, image_bright_shape_) + + def testRandomAdjustBrightnessWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_adjust_brightness, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomAdjustContrast(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_adjust_contrast, {})) + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_contrast = tensor_dict[fields.InputDataFields.image] + image_original_shape = tf.shape(images_original) + image_contrast_shape = tf.shape(images_contrast) + return [image_original_shape, image_contrast_shape] + + (image_original_shape_, + image_contrast_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image_original_shape_, image_contrast_shape_) + + def testRandomAdjustContrastWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_adjust_contrast, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomAdjustHue(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_adjust_hue, {})) + images_original = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_hue = tensor_dict[fields.InputDataFields.image] + image_original_shape = tf.shape(images_original) + image_hue_shape = tf.shape(images_hue) + return [image_original_shape, image_hue_shape] + + (image_original_shape_, image_hue_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image_original_shape_, image_hue_shape_) + + def testRandomAdjustHueWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_adjust_hue, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomDistortColor(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_distort_color, {})) + images_original = self.createTestImages() + images_original_shape = tf.shape(images_original) + tensor_dict = {fields.InputDataFields.image: images_original} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images_distorted_color = tensor_dict[fields.InputDataFields.image] + images_distorted_color_shape = tf.shape(images_distorted_color) + return [images_original_shape, images_distorted_color_shape] + + (images_original_shape_, + images_distorted_color_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_original_shape_, images_distorted_color_shape_) + + def testRandomDistortColorWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_distort_color, {})) + self._testPreprocessorCache(preprocess_options, + test_boxes=False, + test_masks=False, + test_keypoints=False) + + def testRandomJitterBoxes(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.random_jitter_boxes, {})) + boxes = self.createTestBoxes() + boxes_shape = tf.shape(boxes) + tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] + distorted_boxes_shape = tf.shape(distorted_boxes) + return [boxes_shape, distorted_boxes_shape] + + (boxes_shape_, distorted_boxes_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) + + def testRandomCropImage(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_crop_image, {})) + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testRandomCropImageWithCache(self): + preprocess_options = [(preprocessor.random_rgb_to_gray, + {'probability': 0.5}), + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1, + }), + (preprocessor.random_crop_image, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testRandomCropImageGrayscale(self): + + def graph_fn(): + preprocessing_options = [(preprocessor.rgb_to_gray, {}), + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1, + }), (preprocessor.random_crop_image, {})] + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testRandomCropImageWithBoxOutOfImage(self): + + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_crop_image, {})) + images = self.createTestImages() + boxes = self.createTestBoxesOutOfImage() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testRandomCropImageWithRandomCoefOne(self): + + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_image, { + 'random_coef': 1.0 + })] + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_weights = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + boxes_shape = tf.shape(boxes) + distorted_boxes_shape = tf.shape(distorted_boxes) + images_shape = tf.shape(images) + distorted_images_shape = tf.shape(distorted_images) + return [ + boxes_shape, distorted_boxes_shape, images_shape, + distorted_images_shape, images, distorted_images, boxes, + distorted_boxes, labels, distorted_labels, weights, distorted_weights + ] + + (boxes_shape_, distorted_boxes_shape_, images_shape_, + distorted_images_shape_, images_, distorted_images_, boxes_, + distorted_boxes_, labels_, distorted_labels_, weights_, + distorted_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) + self.assertAllEqual(images_shape_, distorted_images_shape_) + self.assertAllClose(images_, distorted_images_) + self.assertAllClose(boxes_, distorted_boxes_) + self.assertAllEqual(labels_, distorted_labels_) + self.assertAllEqual(weights_, distorted_weights_) + + def testRandomCropWithMockSampleDistortedBoundingBox(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createColorfulTestImage() + boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], + [0.3, 0.1, 0.4, 0.7]], + dtype=tf.float32) + labels = tf.constant([1, 7, 11], dtype=tf.int32) + weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object(tf.image, 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = (tf.constant( + [6, 143, 0], dtype=tf.int32), tf.constant( + [190, 237, -1], dtype=tf.int32), tf.constant( + [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_weights = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + expected_boxes = tf.constant( + [[0.178947, 0.07173, 0.75789469, 0.66244733], + [0.28421, 0.0, 0.38947365, 0.57805908]], + dtype=tf.float32) + expected_labels = tf.constant([7, 11], dtype=tf.int32) + expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) + return [ + distorted_boxes, distorted_labels, distorted_weights, + expected_boxes, expected_labels, expected_weights + ] + + (distorted_boxes_, distorted_labels_, distorted_weights_, expected_boxes_, + expected_labels_, expected_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(distorted_boxes_, expected_boxes_) + self.assertAllEqual(distorted_labels_, expected_labels_) + self.assertAllEqual(distorted_weights_, expected_weights_) + + def testRandomCropWithoutClipBoxes(self): + + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createColorfulTestImage() + boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], + [0.2, 0.4, 0.75, 0.75], + [0.3, 0.1, 0.4, 0.7]], dtype=tf.float32) + keypoints = tf.constant([ + [[0.1, 0.1], [0.8, 0.3]], + [[0.2, 0.4], [0.75, 0.75]], + [[0.3, 0.1], [0.4, 0.7]], + ], dtype=tf.float32) + labels = tf.constant([1, 7, 11], dtype=tf.int32) + weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_keypoints: keypoints, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + + preprocessing_options = [(preprocessor.random_crop_image, { + 'clip_boxes': False, + })] + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + with mock.patch.object(tf.image, 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = (tf.constant( + [6, 143, 0], dtype=tf.int32), tf.constant( + [190, 237, -1], dtype=tf.int32), tf.constant( + [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_weights = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + expected_boxes = tf.constant( + [[0.178947, 0.07173, 0.75789469, 0.66244733], + [0.28421, -0.434599, 0.38947365, 0.57805908]], + dtype=tf.float32) + expected_keypoints = tf.constant( + [[[0.178947, 0.07173], [0.75789469, 0.66244733]], + [[0.28421, -0.434599], [0.38947365, 0.57805908]]], + dtype=tf.float32) + expected_labels = tf.constant([7, 11], dtype=tf.int32) + expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) + return [distorted_boxes, distorted_keypoints, distorted_labels, + distorted_weights, expected_boxes, expected_keypoints, + expected_labels, expected_weights] + + (distorted_boxes_, distorted_keypoints_, distorted_labels_, + distorted_weights_, expected_boxes_, expected_keypoints_, expected_labels_, + expected_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(distorted_boxes_, expected_boxes_) + self.assertAllClose(distorted_keypoints_, expected_keypoints_) + self.assertAllEqual(distorted_labels_, expected_labels_) + self.assertAllEqual(distorted_weights_, expected_weights_) + + def testRandomCropImageWithMultiClassScores(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_crop_image, {})) + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.multiclass_scores: multiclass_scores + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_multiclass_scores = distorted_tensor_dict[ + fields.InputDataFields.multiclass_scores] + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + multiclass_scores_rank = tf.rank(multiclass_scores) + distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) + return [ + boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, + distorted_images_rank, multiclass_scores_rank, + distorted_multiclass_scores_rank, distorted_multiclass_scores + ] + + (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_, multiclass_scores_rank_, + distorted_multiclass_scores_rank_, + distorted_multiclass_scores_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + self.assertAllEqual(multiclass_scores_rank_, + distorted_multiclass_scores_rank_) + self.assertAllEqual(distorted_boxes_.shape[0], + distorted_multiclass_scores_.shape[0]) + + def testStrictRandomCropImageWithGroundtruthWeights(self): + def graph_fn(): + image = self.createColorfulTestImage()[0] + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + new_image, new_boxes, new_labels, new_groundtruth_weights = ( + preprocessor._strict_random_crop_image( + image, boxes, labels, weights)) + return [new_image, new_boxes, new_labels, new_groundtruth_weights] + (new_image, new_boxes, _, + new_groundtruth_weights) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) + self.assertAllEqual(new_image.shape, [190, 237, 3]) + self.assertAllEqual(new_groundtruth_weights, [1.0, 0.5]) + self.assertAllClose( + new_boxes.flatten(), expected_boxes.flatten()) + + def testStrictRandomCropImageWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage()[0] + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + new_image, new_boxes, new_labels, new_weights, new_masks = ( + preprocessor._strict_random_crop_image( + image, boxes, labels, weights, masks=masks)) + return [new_image, new_boxes, new_labels, new_weights, new_masks] + (new_image, new_boxes, _, _, + new_masks) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) + self.assertAllEqual(new_image.shape, [190, 237, 3]) + self.assertAllEqual(new_masks.shape, [2, 190, 237]) + self.assertAllClose( + new_boxes.flatten(), expected_boxes.flatten()) + + def testStrictRandomCropImageWithKeypoints(self): + def graph_fn(): + image = self.createColorfulTestImage()[0] + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, keypoint_visibilities = self.createTestKeypoints() + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + (new_image, new_boxes, new_labels, new_weights, new_keypoints, + new_keypoint_visibilities) = preprocessor._strict_random_crop_image( + image, boxes, labels, weights, keypoints=keypoints, + keypoint_visibilities=keypoint_visibilities) + return [new_image, new_boxes, new_labels, new_weights, new_keypoints, + new_keypoint_visibilities] + (new_image, new_boxes, _, _, new_keypoints, + new_keypoint_visibilities) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32) + expected_keypoints = np.array([ + [[np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan]], + [[0.38947368, 0.07173], + [0.49473682, 0.24050637], + [0.60000002, 0.40928277]] + ], dtype=np.float32) + expected_keypoint_visibilities = [ + [False, False, False], + [False, True, True] + ] + self.assertAllEqual(new_image.shape, [190, 237, 3]) + self.assertAllClose( + new_boxes, expected_boxes) + self.assertAllClose( + new_keypoints, expected_keypoints) + self.assertAllEqual( + new_keypoint_visibilities, expected_keypoint_visibilities) + + def testRunRandomCropImageWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_instance_masks: masks, + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_masks = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_masks] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_masks_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0], + ], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) + self.assertAllEqual(distorted_masks_.shape, [2, 190, 237]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose( + distorted_boxes_.flatten(), expected_boxes.flatten()) + + def testRunRandomCropImageWithKeypointsInsideCrop(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints = self.createTestKeypointsInsideCrop() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_keypoints: keypoints, + fields.InputDataFields.groundtruth_weights: weights + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0], + ], dtype=np.float32) + expected_keypoints = np.array([ + [[0.38947368, 0.07173], + [0.49473682, 0.24050637], + [0.60000002, 0.40928277]], + [[0.38947368, 0.07173], + [0.49473682, 0.24050637], + [0.60000002, 0.40928277]] + ]) + self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose( + distorted_boxes_.flatten(), expected_boxes.flatten()) + self.assertAllClose( + distorted_keypoints_.flatten(), expected_keypoints.flatten()) + + def testRunRandomCropImageWithKeypointsOutsideCrop(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints = self.createTestKeypointsOutsideCrop() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 143, 0], dtype=tf.int32), + tf.constant([190, 237, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + + expected_boxes = np.array([ + [0.0, 0.0, 0.75789469, 1.0], + [0.23157893, 0.24050637, 0.75789469, 1.0], + ], dtype=np.float32) + expected_keypoints = np.array([ + [[np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan]], + [[np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan]], + ]) + self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose( + distorted_boxes_.flatten(), expected_boxes.flatten()) + self.assertAllClose( + distorted_keypoints_.flatten(), expected_keypoints.flatten()) + + def testRunRandomCropImageWithDensePose(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + dp_num_points, dp_part_ids, dp_surface_coords = self.createTestDensePose() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_dp_num_points: dp_num_points, + fields.InputDataFields.groundtruth_dp_part_ids: dp_part_ids, + fields.InputDataFields.groundtruth_dp_surface_coords: + dp_surface_coords + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_dense_pose=True) + + preprocessing_options = [(preprocessor.random_crop_image, {})] + + with mock.patch.object( + tf.image, + 'sample_distorted_bounding_box' + ) as mock_sample_distorted_bounding_box: + mock_sample_distorted_bounding_box.return_value = ( + tf.constant([6, 40, 0], dtype=tf.int32), + tf.constant([134, 340, -1], dtype=tf.int32), + tf.constant([[[0.03, 0.1, 0.7, 0.95]]], dtype=tf.float32)) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_dp_num_points = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_dp_num_points] + distorted_dp_part_ids = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_dp_part_ids] + distorted_dp_surface_coords = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_dp_surface_coords] + return [distorted_image, distorted_dp_num_points, distorted_dp_part_ids, + distorted_dp_surface_coords] + (distorted_image_, distorted_dp_num_points_, distorted_dp_part_ids_, + distorted_dp_surface_coords_) = self.execute_cpu(graph_fn, []) + expected_dp_num_points = np.array([1, 1]) + expected_dp_part_ids = np.array([[4], [0]]) + expected_dp_surface_coords = np.array([ + [[0.10447761, 0.1176470, 0.6, 0.7]], + [[0.10447761, 0.2352941, 0.2, 0.8]], + ]) + self.assertAllEqual(distorted_image_.shape, [1, 134, 340, 3]) + self.assertAllEqual(distorted_dp_num_points_, expected_dp_num_points) + self.assertAllEqual(distorted_dp_part_ids_, expected_dp_part_ids) + self.assertAllClose(distorted_dp_surface_coords_, + expected_dp_surface_coords) + + def testRunRetainBoxesAboveThreshold(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + + tensor_dict = { + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + + preprocessing_options = [ + (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) + ] + preprocessor_arg_map = preprocessor.get_default_func_arg_map() + retained_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + retained_boxes = retained_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + retained_labels = retained_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + retained_weights = retained_tensor_dict[ + fields.InputDataFields.groundtruth_weights] + return [retained_boxes, retained_labels, retained_weights, + self.expectedBoxesAfterThresholding(), + self.expectedLabelsAfterThresholding(), + self.expectedLabelScoresAfterThresholding()] + + (retained_boxes_, retained_labels_, retained_weights_, + expected_retained_boxes_, expected_retained_labels_, + expected_retained_weights_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_boxes_, expected_retained_boxes_) + self.assertAllClose(retained_labels_, expected_retained_labels_) + self.assertAllClose( + retained_weights_, expected_retained_weights_) + + def testRunRetainBoxesAboveThresholdWithMasks(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = self.createTestMasks() + + tensor_dict = { + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_instance_masks: masks + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_label_weights=True, + include_instance_masks=True) + + preprocessing_options = [ + (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) + ] + + retained_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + retained_masks = retained_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [retained_masks, self.expectedMasksAfterThresholding()] + (retained_masks_, expected_masks_) = self.execute(graph_fn, []) + self.assertAllClose(retained_masks_, expected_masks_) + + def testRunRetainBoxesAboveThresholdWithKeypoints(self): + def graph_fn(): + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + + tensor_dict = { + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [ + (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) + ] + + retained_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + retained_keypoints = retained_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [retained_keypoints, self.expectedKeypointsAfterThresholding()] + (retained_keypoints_, expected_keypoints_) = self.execute_cpu(graph_fn, []) + self.assertAllClose(retained_keypoints_, expected_keypoints_) + + def testRandomCropToAspectRatioWithCache(self): + preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testRunRandomCropToAspectRatioWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_instance_masks: masks + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + + preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] + + with mock.patch.object(preprocessor, + '_random_integer') as mock_random_integer: + mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_masks = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [ + distorted_image, distorted_boxes, distorted_labels, distorted_masks + ] + + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_masks_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) + self.assertAllEqual(distorted_labels_, [1]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllEqual(distorted_masks_.shape, [1, 200, 200]) + + def testRunRandomCropToAspectRatioWithKeypoints(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + keypoints, _ = self.createTestKeypoints() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] + + with mock.patch.object(preprocessor, + '_random_integer') as mock_random_integer: + mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, + preprocessing_options, + func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints] + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) + expected_keypoints = np.array( + [[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) + self.assertAllEqual(distorted_labels_, [1]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllClose(distorted_keypoints_.flatten(), + expected_keypoints.flatten()) + + def testRandomPadToAspectRatioWithCache(self): + preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map() + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, + {'min_padded_size_ratio': (4.0, 4.0), + 'max_padded_size_ratio': (4.0, 4.0)})] + + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + return [distorted_image, distorted_boxes, distorted_labels] + + distorted_image_, distorted_boxes_, distorted_labels_ = self.execute_cpu( + graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]], + dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + + def testRunRandomPadToAspectRatioWithMasks(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_instance_masks: masks + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] + + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_masks = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + return [ + distorted_image, distorted_boxes, distorted_labels, distorted_masks + ] + + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_masks_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllEqual(distorted_masks_.shape, [2, 400, 400]) + + def testRunRandomPadToAspectRatioWithKeypoints(self): + def graph_fn(): + image = self.createColorfulTestImage() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + keypoints, _ = self.createTestKeypoints() + + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_keypoints: keypoints + } + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_keypoints=True) + + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] + + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_image = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_labels = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_classes] + distorted_keypoints = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return [ + distorted_image, distorted_boxes, distorted_labels, + distorted_keypoints + ] + + (distorted_image_, distorted_boxes_, distorted_labels_, + distorted_keypoints_) = self.execute_cpu(graph_fn, []) + expected_boxes = np.array( + [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) + expected_keypoints = np.array([ + [[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]], + [[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]], + ], dtype=np.float32) + self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) + self.assertAllEqual(distorted_labels_, [1, 2]) + self.assertAllClose(distorted_boxes_.flatten(), + expected_boxes.flatten()) + self.assertAllClose(distorted_keypoints_.flatten(), + expected_keypoints.flatten()) + + def testRandomPadImageWithCache(self): + preprocess_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1,}), (preprocessor.random_pad_image, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomPadImage(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_pad_image, {})] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [boxes_shape, padded_boxes_shape, images_shape, + padded_images_shape, boxes, padded_boxes] + (boxes_shape_, padded_boxes_shape_, images_shape_, + padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn, + []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) + self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) + self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) + self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) + self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( + padded_boxes_[:, 2] - padded_boxes_[:, 0]))) + self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( + padded_boxes_[:, 3] - padded_boxes_[:, 1]))) + + @parameterized.parameters( + {'include_dense_pose': False}, + ) + def testRandomPadImageWithKeypointsAndMasks(self, include_dense_pose): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + masks = self.createTestMasks() + keypoints, _ = self.createTestKeypoints() + _, _, dp_surface_coords = self.createTestDensePose() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_instance_masks: masks, + fields.InputDataFields.groundtruth_keypoints: keypoints, + fields.InputDataFields.groundtruth_dp_surface_coords: + dp_surface_coords + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_pad_image, {})] + func_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True, + include_keypoints=True, + include_keypoint_visibilities=True, + include_dense_pose=include_dense_pose) + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options, + func_arg_map=func_arg_map) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + padded_masks = padded_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + padded_keypoints = padded_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + padded_masks_shape = tf.shape(padded_masks) + keypoints_shape = tf.shape(keypoints) + padded_keypoints_shape = tf.shape(padded_keypoints) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + outputs = [boxes_shape, padded_boxes_shape, padded_masks_shape, + keypoints_shape, padded_keypoints_shape, images_shape, + padded_images_shape, boxes, padded_boxes, keypoints, + padded_keypoints] + if include_dense_pose: + padded_dp_surface_coords = padded_tensor_dict[ + fields.InputDataFields.groundtruth_dp_surface_coords] + outputs.extend([dp_surface_coords, padded_dp_surface_coords]) + return outputs + + outputs = self.execute_cpu(graph_fn, []) + boxes_shape_ = outputs[0] + padded_boxes_shape_ = outputs[1] + padded_masks_shape_ = outputs[2] + keypoints_shape_ = outputs[3] + padded_keypoints_shape_ = outputs[4] + images_shape_ = outputs[5] + padded_images_shape_ = outputs[6] + boxes_ = outputs[7] + padded_boxes_ = outputs[8] + keypoints_ = outputs[9] + padded_keypoints_ = outputs[10] + + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertAllEqual(keypoints_shape_, padded_keypoints_shape_) + self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) + self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) + self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) + self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) + self.assertAllEqual(padded_masks_shape_[1:3], padded_images_shape_[1:3]) + self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( + padded_boxes_[:, 2] - padded_boxes_[:, 0]))) + self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( + padded_boxes_[:, 3] - padded_boxes_[:, 1]))) + self.assertTrue(np.all((keypoints_[1, :, 0] - keypoints_[0, :, 0]) >= ( + padded_keypoints_[1, :, 0] - padded_keypoints_[0, :, 0]))) + self.assertTrue(np.all((keypoints_[1, :, 1] - keypoints_[0, :, 1]) >= ( + padded_keypoints_[1, :, 1] - padded_keypoints_[0, :, 1]))) + if include_dense_pose: + dp_surface_coords = outputs[11] + padded_dp_surface_coords = outputs[12] + self.assertAllClose(padded_dp_surface_coords[:, :, 2:], + dp_surface_coords[:, :, 2:]) + + def testRandomAbsolutePadImage(self): + height_padding = 10 + width_padding = 20 + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + } + preprocessing_options = [(preprocessor.random_absolute_pad_image, { + 'max_height_padding': height_padding, + 'max_width_padding': width_padding})] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + original_shape = tf.shape(images) + final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image]) + return original_shape, final_shape + for _ in range(100): + original_shape, output_shape = self.execute_cpu(graph_fn, []) + _, height, width, _ = original_shape + self.assertGreaterEqual(output_shape[1], height) + self.assertLess(output_shape[1], height + height_padding) + self.assertGreaterEqual(output_shape[2], width) + self.assertLess(output_shape[2], width + width_padding) + + def testRandomAbsolutePadImageWithKeypoints(self): + height_padding = 10 + width_padding = 20 + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + keypoints, _ = self.createTestKeypoints() + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_keypoints: keypoints, + } + + preprocessing_options = [(preprocessor.random_absolute_pad_image, { + 'max_height_padding': height_padding, + 'max_width_padding': width_padding + })] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + original_shape = tf.shape(images) + final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image]) + padded_keypoints = padded_tensor_dict[ + fields.InputDataFields.groundtruth_keypoints] + return (original_shape, final_shape, padded_keypoints) + for _ in range(100): + original_shape, output_shape, padded_keypoints_ = self.execute_cpu( + graph_fn, []) + _, height, width, _ = original_shape + self.assertGreaterEqual(output_shape[1], height) + self.assertLess(output_shape[1], height + height_padding) + self.assertGreaterEqual(output_shape[2], width) + self.assertLess(output_shape[2], width + width_padding) + # Verify the keypoints are populated. The correctness of the keypoint + # coordinates are already tested in random_pad_image function. + self.assertEqual(padded_keypoints_.shape, (2, 3, 2)) + + def testRandomCropPadImageWithCache(self): + preprocess_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomCropPadImageWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })] + + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_pad_image, { + 'random_coef': 1.0 + })] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [boxes_shape, padded_boxes_shape, images_shape, + padded_images_shape, boxes, padded_boxes] + (boxes_shape_, padded_boxes_shape_, images_shape_, + padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn, + []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) + self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) + self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) + self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) + self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( + padded_boxes_[:, 2] - padded_boxes_[:, 0]))) + self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( + padded_boxes_[:, 3] - padded_boxes_[:, 1]))) + + def testRandomCropToAspectRatio(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + tensor_dict = preprocessor.preprocess(tensor_dict, []) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, { + 'aspect_ratio': 2.0 + })] + cropped_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + cropped_images = cropped_tensor_dict[fields.InputDataFields.image] + cropped_boxes = cropped_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + cropped_boxes_shape = tf.shape(cropped_boxes) + images_shape = tf.shape(images) + cropped_images_shape = tf.shape(cropped_images) + return [ + boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape + ] + + (boxes_shape_, cropped_boxes_shape_, images_shape_, + cropped_images_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, cropped_boxes_shape_) + self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2) + self.assertEqual(images_shape_[2], cropped_images_shape_[2]) + + def testRandomPadToAspectRatio(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + } + tensor_dict = preprocessor.preprocess(tensor_dict, []) + images = tensor_dict[fields.InputDataFields.image] + + preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, { + 'aspect_ratio': 2.0 + })] + padded_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + + padded_images = padded_tensor_dict[fields.InputDataFields.image] + padded_boxes = padded_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + boxes_shape = tf.shape(boxes) + padded_boxes_shape = tf.shape(padded_boxes) + images_shape = tf.shape(images) + padded_images_shape = tf.shape(padded_images) + return [ + boxes_shape, padded_boxes_shape, images_shape, padded_images_shape + ] + + (boxes_shape_, padded_boxes_shape_, images_shape_, + padded_images_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_shape_, padded_boxes_shape_) + self.assertEqual(images_shape_[1], padded_images_shape_[1]) + self.assertEqual(2 * images_shape_[2], padded_images_shape_[2]) + + def testRandomBlackPatchesWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_black_patches, { + 'size_to_image_ratio': 0.5 + })) + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomBlackPatches(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_black_patches, { + 'size_to_image_ratio': 0.5 + })) + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + blacked_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + blacked_images = blacked_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + blacked_images_shape = tf.shape(blacked_images) + return [images_shape, blacked_images_shape] + (images_shape_, blacked_images_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_, blacked_images_shape_) + + def testRandomJpegQuality(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'min_jpeg_quality': 0, + 'max_jpeg_quality': 100 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + encoded_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + encoded_images_shape = tf.shape(encoded_images) + return [images_shape, encoded_images_shape] + images_shape_out, encoded_images_shape_out = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, encoded_images_shape_out) + + def testRandomJpegQualityKeepsStaticChannelShape(self): + # Set at least three weeks past the forward compatibility horizon for + # tf 1.14 of 2019/11/01. + # https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/compat/compat.py#L30 + if not tf.compat.forward_compatible(year=2019, month=12, day=1): + self.skipTest('Skipping test for future functionality.') + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'min_jpeg_quality': 0, + 'max_jpeg_quality': 100 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + encoded_images = processed_tensor_dict[fields.InputDataFields.image] + images_static_channels = images.shape[-1] + encoded_images_static_channels = encoded_images.shape[-1] + self.assertEqual(images_static_channels, encoded_images_static_channels) + + def testRandomJpegQualityWithCache(self): + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'min_jpeg_quality': 0, + 'max_jpeg_quality': 100 + })] + self._testPreprocessorCache(preprocessing_options) + + def testRandomJpegQualityWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_jpeg_quality, { + 'random_coef': 1.0 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + encoded_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + encoded_images_shape = tf.shape(encoded_images) + return [images, encoded_images, images_shape, encoded_images_shape] + + (images_out, encoded_images_out, images_shape_out, + encoded_images_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, encoded_images_shape_out) + self.assertAllEqual(images_out, encoded_images_out) + + def testRandomDownscaleToTargetPixels(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'min_target_pixels': 100, + 'max_target_pixels': 101 + })] + images = tf.random_uniform([1, 25, 100, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + downscaled_shape = tf.shape(downscaled_images) + return downscaled_shape + expected_shape = [1, 5, 20, 3] + downscaled_shape_out = self.execute_cpu(graph_fn, []) + self.assertAllEqual(downscaled_shape_out, expected_shape) + + def testRandomDownscaleToTargetPixelsWithMasks(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'min_target_pixels': 100, + 'max_target_pixels': 101 + })] + images = tf.random_uniform([1, 25, 100, 3]) + masks = tf.random_uniform([10, 25, 100]) + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_instance_masks: masks + } + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_instance_masks=True) + processed_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + downscaled_masks = processed_tensor_dict[ + fields.InputDataFields.groundtruth_instance_masks] + downscaled_images_shape = tf.shape(downscaled_images) + downscaled_masks_shape = tf.shape(downscaled_masks) + return [downscaled_images_shape, downscaled_masks_shape] + expected_images_shape = [1, 5, 20, 3] + expected_masks_shape = [10, 5, 20] + (downscaled_images_shape_out, + downscaled_masks_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(downscaled_images_shape_out, expected_images_shape) + self.assertAllEqual(downscaled_masks_shape_out, expected_masks_shape) + + @parameterized.parameters( + {'test_masks': False}, + {'test_masks': True} + ) + def testRandomDownscaleToTargetPixelsWithCache(self, test_masks): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { + 'min_target_pixels': 100, + 'max_target_pixels': 999 + })] + self._testPreprocessorCache(preprocessing_options, test_masks=test_masks) + + def testRandomDownscaleToTargetPixelsWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'random_coef': 1.0, + 'min_target_pixels': 10, + 'max_target_pixels': 20, + })] + images = tf.random_uniform([1, 25, 100, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + downscaled_images_shape = tf.shape(downscaled_images) + return [images, downscaled_images, images_shape, downscaled_images_shape] + (images_out, downscaled_images_out, images_shape_out, + downscaled_images_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, downscaled_images_shape_out) + self.assertAllEqual(images_out, downscaled_images_out) + + def testRandomDownscaleToTargetPixelsIgnoresSmallImages(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, + { + 'min_target_pixels': 1000, + 'max_target_pixels': 1001 + })] + images = tf.random_uniform([1, 10, 10, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + downscaled_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + downscaled_images_shape = tf.shape(downscaled_images) + return [images, downscaled_images, images_shape, downscaled_images_shape] + (images_out, downscaled_images_out, images_shape_out, + downscaled_images_shape_out) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_out, downscaled_images_shape_out) + self.assertAllEqual(images_out, downscaled_images_out) + + def testRandomPatchGaussianShape(self): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 1, + 'max_patch_size': 200, + 'min_gaussian_stddev': 0.0, + 'max_gaussian_stddev': 2.0 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + patched_images_shape = tf.shape(patched_images) + self.assertAllEqual(images_shape, patched_images_shape) + + def testRandomPatchGaussianClippedToLowerBound(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 20, + 'max_patch_size': 40, + 'min_gaussian_stddev': 50, + 'max_gaussian_stddev': 100 + })] + images = tf.zeros([1, 5, 4, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + return patched_images + patched_images = self.execute_cpu(graph_fn, []) + self.assertAllGreaterEqual(patched_images, 0.0) + + def testRandomPatchGaussianClippedToUpperBound(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 20, + 'max_patch_size': 40, + 'min_gaussian_stddev': 50, + 'max_gaussian_stddev': 100 + })] + images = tf.constant(255.0, shape=[1, 5, 4, 3]) + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + return patched_images + patched_images = self.execute_cpu(graph_fn, []) + self.assertAllLessEqual(patched_images, 255.0) + + def testRandomPatchGaussianWithCache(self): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'min_patch_size': 1, + 'max_patch_size': 200, + 'min_gaussian_stddev': 0.0, + 'max_gaussian_stddev': 2.0 + })] + self._testPreprocessorCache(preprocessing_options) + + def testRandomPatchGaussianWithRandomCoefOne(self): + def graph_fn(): + preprocessing_options = [(preprocessor.random_patch_gaussian, { + 'random_coef': 1.0 + })] + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + processed_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + patched_images = processed_tensor_dict[fields.InputDataFields.image] + images_shape = tf.shape(images) + patched_images_shape = tf.shape(patched_images) + return patched_images_shape, patched_images, images_shape, images + (patched_images_shape, patched_images, images_shape, + images) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape, patched_images_shape) + self.assertAllEqual(images, patched_images) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + def testAutoAugmentImage(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.autoaugment_image, { + 'policy_name': 'v1' + })) + images = self.createTestImages() + boxes = self.createTestBoxes() + tensor_dict = {fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes} + autoaugment_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options) + augmented_images = autoaugment_tensor_dict[fields.InputDataFields.image] + augmented_boxes = autoaugment_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + images_shape = tf.shape(images) + boxes_shape = tf.shape(boxes) + augmented_images_shape = tf.shape(augmented_images) + augmented_boxes_shape = tf.shape(augmented_boxes) + return [images_shape, boxes_shape, augmented_images_shape, + augmented_boxes_shape] + (images_shape_, boxes_shape_, augmented_images_shape_, + augmented_boxes_shape_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(images_shape_, augmented_images_shape_) + self.assertAllEqual(boxes_shape_, augmented_boxes_shape_) + + def testRandomResizeMethodWithCache(self): + preprocess_options = [] + preprocess_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocess_options.append((preprocessor.random_resize_method, { + 'target_size': (75, 150) + })) + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=True, + test_keypoints=True) + + def testRandomResizeMethod(self): + def graph_fn(): + preprocessing_options = [] + preprocessing_options.append((preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + })) + preprocessing_options.append((preprocessor.random_resize_method, { + 'target_size': (75, 150) + })) + images = self.createTestImages() + tensor_dict = {fields.InputDataFields.image: images} + resized_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + resized_images = resized_tensor_dict[fields.InputDataFields.image] + resized_images_shape = tf.shape(resized_images) + expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32) + return [expected_images_shape, resized_images_shape] + (expected_images_shape_, resized_images_shape_) = self.execute_cpu(graph_fn, + []) + self.assertAllEqual(expected_images_shape_, + resized_images_shape_) + + def testResizeImageWithMasks(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + height = 50 + width = 100 + expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_image( + in_image, in_masks, new_height=height, new_width=width) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return out_image_shape, out_masks_shape + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + (out_image_shape, + out_masks_shape) = self.execute_cpu(graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeImageWithMasksTensorInputHeightAndWidth(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + height = tf.constant(50, dtype=tf.int32) + width = tf.constant(100, dtype=tf.int32) + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_image( + in_image, in_masks, new_height=height, new_width=width) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return out_image_shape, out_masks_shape + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + (out_image_shape, + out_masks_shape) = self.execute_cpu(graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeImageWithNoInstanceMask(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] + height = 50 + width = 100 + expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] + expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_image( + in_image, in_masks, new_height=height, new_width=width) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return out_image_shape, out_masks_shape + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + (out_image_shape, + out_masks_shape) = self.execute_cpu(graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRangePreservesStaticSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] + min_dim = 50 + max_dim = 100 + expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] + + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + in_image = tf.random_uniform(in_shape) + out_image, _ = preprocessor.resize_to_range( + in_image, min_dimension=min_dim, max_dimension=max_dim) + self.assertAllEqual(out_image.get_shape().as_list(), expected_shape) + + def testResizeToRangeWithDynamicSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] + min_dim = 50 + max_dim = 100 + expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] + def graph_fn(in_image_shape): + in_image = tf.random_uniform(in_image_shape) + out_image, _ = preprocessor.resize_to_range( + in_image, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + return out_image_shape + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape, + np.int32)]) + self.assertAllEqual(out_image_shape, expected_shape) + + def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self): + in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] + min_dim = 50 + max_dim = 100 + expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]] + def graph_fn(in_image): + out_image, _ = preprocessor.resize_to_range( + in_image, + min_dimension=min_dim, + max_dimension=max_dim, + pad_to_max_dimension=True) + return tf.shape(out_image) + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + out_image_shape = self.execute_cpu( + graph_fn, [np.random.rand(*in_shape).astype('f')]) + self.assertAllEqual(out_image_shape, expected_shape) + + def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self): + in_image_np = np.array([[[0, 1, 2]]], np.float32) + ex_image_np = np.array( + [[[0, 1, 2], [123.68, 116.779, 103.939]], + [[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32) + min_dim = 1 + max_dim = 2 + def graph_fn(in_image): + out_image, _ = preprocessor.resize_to_range( + in_image, + min_dimension=min_dim, + max_dimension=max_dim, + pad_to_max_dimension=True, + per_channel_pad_value=(123.68, 116.779, 103.939)) + return out_image + out_image_np = self.execute_cpu(graph_fn, [in_image_np]) + self.assertAllClose(ex_image_np, out_image_np) + + def testResizeToRangeWithMasksPreservesStaticSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] + + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) + self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape) + self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape) + + def testResizeToRangeWithMasksAndPadToMaxDimension(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[100, 100, 3], [100, 100, 3]] + expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]] + def graph_fn(in_image, in_masks): + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, + max_dimension=max_dim, pad_to_max_dimension=True) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.random.rand(*in_image_shape).astype('f'), + np.random.rand(*in_masks_shape).astype('f'), + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRangeWithMasksAndDynamicSpatialShape(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] + def graph_fn(in_image, in_masks): + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.random.rand(*in_image_shape).astype('f'), + np.random.rand(*in_masks_shape).astype('f'), + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] + min_dim = 50 + max_dim = 100 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] + def graph_fn(in_image, in_masks): + out_image, out_masks, _ = preprocessor.resize_to_range( + in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.random.rand(*in_image_shape).astype('f'), + np.random.rand(*in_masks_shape).astype('f'), + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToRange4DImageTensor(self): + image = tf.random_uniform([1, 200, 300, 3]) + with self.assertRaises(ValueError): + preprocessor.resize_to_range(image, 500, 600) + + def testResizeToRangeSameMinMax(self): + """Tests image resizing, checking output sizes.""" + in_shape_list = [[312, 312, 3], [299, 299, 3]] + min_dim = 320 + max_dim = 320 + expected_shape_list = [[320, 320, 3], [320, 320, 3]] + def graph_fn(in_shape): + in_image = tf.random_uniform(in_shape) + out_image, _ = preprocessor.resize_to_range( + in_image, min_dimension=min_dim, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + return out_image_shape + for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): + out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape, + np.int32)]) + self.assertAllEqual(out_image_shape, expected_shape) + + def testResizeToMaxDimensionTensorShapes(self): + """Tests both cases where image should and shouldn't be resized.""" + in_image_shape_list = [[100, 50, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 100, 50], [10, 15, 30]] + max_dim = 50 + expected_image_shape_list = [[50, 25, 3], [15, 30, 3]] + expected_masks_shape_list = [[15, 50, 25], [10, 15, 30]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_max_dimension( + in_image, in_masks, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMaxDimensionWithInstanceMasksTensorOfSizeZero(self): + """Tests both cases where image should and shouldn't be resized.""" + in_image_shape_list = [[100, 50, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 100, 50], [0, 15, 30]] + max_dim = 50 + expected_image_shape_list = [[50, 25, 3], [15, 30, 3]] + expected_masks_shape_list = [[0, 50, 25], [0, 15, 30]] + + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_max_dimension( + in_image, in_masks, max_dimension=max_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMaxDimensionRaisesErrorOn4DImage(self): + image = tf.random_uniform([1, 200, 300, 3]) + with self.assertRaises(ValueError): + preprocessor.resize_to_max_dimension(image, 500) + + def testResizeToMinDimensionTensorShapes(self): + in_image_shape_list = [[60, 55, 3], [15, 30, 3]] + in_masks_shape_list = [[15, 60, 55], [10, 15, 30]] + min_dim = 50 + expected_image_shape_list = [[60, 55, 3], [50, 100, 3]] + expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_min_dimension( + in_image, in_masks, min_dimension=min_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self): + """Tests image resizing, checking output sizes.""" + in_image_shape_list = [[60, 40, 3], [15, 30, 3]] + in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] + min_dim = 50 + expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] + expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] + def graph_fn(in_image_shape, in_masks_shape): + in_image = tf.random_uniform(in_image_shape) + in_masks = tf.random_uniform(in_masks_shape) + out_image, out_masks, _ = preprocessor.resize_to_min_dimension( + in_image, in_masks, min_dimension=min_dim) + out_image_shape = tf.shape(out_image) + out_masks_shape = tf.shape(out_masks) + return [out_image_shape, out_masks_shape] + for (in_image_shape, expected_image_shape, in_masks_shape, + expected_mask_shape) in zip(in_image_shape_list, + expected_image_shape_list, + in_masks_shape_list, + expected_masks_shape_list): + out_image_shape, out_masks_shape = self.execute_cpu( + graph_fn, [ + np.array(in_image_shape, np.int32), + np.array(in_masks_shape, np.int32) + ]) + self.assertAllEqual(out_image_shape, expected_image_shape) + self.assertAllEqual(out_masks_shape, expected_mask_shape) + + def testResizeToMinDimensionRaisesErrorOn4DImage(self): + image = tf.random_uniform([1, 200, 300, 3]) + with self.assertRaises(ValueError): + preprocessor.resize_to_min_dimension(image, 500) + + def testResizePadToMultipleNoMasks(self): + """Tests resizing when padding to multiple without masks.""" + def graph_fn(): + image = tf.ones((200, 100, 3), dtype=tf.float32) + out_image, out_shape = preprocessor.resize_pad_to_multiple( + image, multiple=32) + return out_image, out_shape + + out_image, out_shape = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_image.sum(), 200 * 100 * 3) + self.assertAllEqual(out_shape, (200, 100, 3)) + self.assertAllEqual(out_image.shape, (224, 128, 3)) + + def testResizePadToMultipleWithMasks(self): + """Tests resizing when padding to multiple with masks.""" + def graph_fn(): + image = tf.ones((200, 100, 3), dtype=tf.float32) + masks = tf.ones((10, 200, 100), dtype=tf.float32) + + _, out_masks, out_shape = preprocessor.resize_pad_to_multiple( + image, multiple=32, masks=masks) + return [out_masks, out_shape] + + out_masks, out_shape = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_masks.sum(), 200 * 100 * 10) + self.assertAllEqual(out_shape, (200, 100, 3)) + self.assertAllEqual(out_masks.shape, (10, 224, 128)) + + def testResizePadToMultipleEmptyMasks(self): + """Tests resizing when padding to multiple with an empty mask.""" + def graph_fn(): + image = tf.ones((200, 100, 3), dtype=tf.float32) + masks = tf.ones((0, 200, 100), dtype=tf.float32) + _, out_masks, out_shape = preprocessor.resize_pad_to_multiple( + image, multiple=32, masks=masks) + return [out_masks, out_shape] + out_masks, out_shape = self.execute_cpu(graph_fn, []) + self.assertAllEqual(out_shape, (200, 100, 3)) + self.assertAllEqual(out_masks.shape, (0, 224, 128)) + + def testScaleBoxesToPixelCoordinates(self): + """Tests box scaling, checking scaled values.""" + def graph_fn(): + in_shape = [60, 40, 3] + in_boxes = [[0.1, 0.2, 0.4, 0.6], + [0.5, 0.3, 0.9, 0.7]] + in_image = tf.random_uniform(in_shape) + in_boxes = tf.constant(in_boxes) + _, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates( + in_image, boxes=in_boxes) + return out_boxes + expected_boxes = [[6., 8., 24., 24.], + [30., 12., 54., 28.]] + out_boxes = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_boxes, expected_boxes) + + def testScaleBoxesToPixelCoordinatesWithKeypoints(self): + """Tests box and keypoint scaling, checking scaled values.""" + def graph_fn(): + in_shape = [60, 40, 3] + in_boxes = self.createTestBoxes() + in_keypoints, _ = self.createTestKeypoints() + in_image = tf.random_uniform(in_shape) + (_, out_boxes, + out_keypoints) = preprocessor.scale_boxes_to_pixel_coordinates( + in_image, boxes=in_boxes, keypoints=in_keypoints) + return out_boxes, out_keypoints + expected_boxes = [[0., 10., 45., 40.], + [15., 20., 45., 40.]] + expected_keypoints = [ + [[6., 4.], [12., 8.], [18., 12.]], + [[24., 16.], [30., 20.], [36., 24.]], + ] + out_boxes_, out_keypoints_ = self.execute_cpu(graph_fn, []) + self.assertAllClose(out_boxes_, expected_boxes) + self.assertAllClose(out_keypoints_, expected_keypoints) + + def testSubtractChannelMean(self): + """Tests whether channel means have been subtracted.""" + def graph_fn(): + image = tf.zeros((240, 320, 3)) + means = [1, 2, 3] + actual = preprocessor.subtract_channel_mean(image, means=means) + return actual + actual = self.execute_cpu(graph_fn, []) + self.assertTrue((actual[:, :, 0], -1)) + self.assertTrue((actual[:, :, 1], -2)) + self.assertTrue((actual[:, :, 2], -3)) + + def testOneHotEncoding(self): + """Tests one hot encoding of multiclass labels.""" + def graph_fn(): + labels = tf.constant([1, 4, 2], dtype=tf.int32) + one_hot = preprocessor.one_hot_encoding(labels, num_classes=5) + return one_hot + one_hot = self.execute_cpu(graph_fn, []) + self.assertAllEqual([0, 1, 1, 0, 1], one_hot) + + def testRandomSelfConcatImageVertically(self): + + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + confidences = weights + scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_confidences: confidences, + fields.InputDataFields.multiclass_scores: scores, + } + + preprocessing_options = [(preprocessor.random_self_concat_image, { + 'concat_vertical_probability': 1.0, + 'concat_horizontal_probability': 0.0, + })] + func_arg_map = preprocessor.get_default_func_arg_map( + True, True, True) + output_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=func_arg_map) + + original_shape = tf.shape(images)[1:3] + final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[ + 1:3] + return [ + original_shape, + boxes, + labels, + confidences, + scores, + final_shape, + output_tensor_dict[fields.InputDataFields.groundtruth_boxes], + output_tensor_dict[fields.InputDataFields.groundtruth_classes], + output_tensor_dict[fields.InputDataFields.groundtruth_confidences], + output_tensor_dict[fields.InputDataFields.multiclass_scores], + ] + (original_shape, boxes, labels, confidences, scores, final_shape, new_boxes, + new_labels, new_confidences, new_scores) = self.execute(graph_fn, []) + self.assertAllEqual(final_shape, original_shape * np.array([2, 1])) + self.assertAllEqual(2 * boxes.size, new_boxes.size) + self.assertAllEqual(2 * labels.size, new_labels.size) + self.assertAllEqual(2 * confidences.size, new_confidences.size) + self.assertAllEqual(2 * scores.size, new_scores.size) + + def testRandomSelfConcatImageHorizontally(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + confidences = weights + scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + fields.InputDataFields.groundtruth_confidences: confidences, + fields.InputDataFields.multiclass_scores: scores, + } + + preprocessing_options = [(preprocessor.random_self_concat_image, { + 'concat_vertical_probability': 0.0, + 'concat_horizontal_probability': 1.0, + })] + func_arg_map = preprocessor.get_default_func_arg_map( + True, True, True) + output_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=func_arg_map) + + original_shape = tf.shape(images)[1:3] + final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[ + 1:3] + return [ + original_shape, + boxes, + labels, + confidences, + scores, + final_shape, + output_tensor_dict[fields.InputDataFields.groundtruth_boxes], + output_tensor_dict[fields.InputDataFields.groundtruth_classes], + output_tensor_dict[fields.InputDataFields.groundtruth_confidences], + output_tensor_dict[fields.InputDataFields.multiclass_scores], + ] + (original_shape, boxes, labels, confidences, scores, final_shape, new_boxes, + new_labels, new_confidences, new_scores) = self.execute(graph_fn, []) + self.assertAllEqual(final_shape, original_shape * np.array([1, 2])) + self.assertAllEqual(2 * boxes.size, new_boxes.size) + self.assertAllEqual(2 * labels.size, new_labels.size) + self.assertAllEqual(2 * confidences.size, new_confidences.size) + self.assertAllEqual(2 * scores.size, new_scores.size) + + def testSSDRandomCropWithCache(self): + preprocess_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def testSSDRandomCrop(self): + def graph_fn(): + preprocessing_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop, {})] + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + return [boxes_rank, distorted_boxes_rank, images_rank, + distorted_images_rank] + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testSSDRandomCropWithMultiClassScores(self): + def graph_fn(): + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), (preprocessor.ssd_random_crop, {})] + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + multiclass_scores = self.createTestMultiClassScores() + + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.multiclass_scores: multiclass_scores, + fields.InputDataFields.groundtruth_weights: weights, + } + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_multiclass_scores=True) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + distorted_multiclass_scores = distorted_tensor_dict[ + fields.InputDataFields.multiclass_scores] + + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + multiclass_scores_rank = tf.rank(multiclass_scores) + distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) + return [ + boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, + distorted_images_rank, multiclass_scores_rank, + distorted_multiclass_scores, distorted_multiclass_scores_rank + ] + + (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_, multiclass_scores_rank_, + distorted_multiclass_scores_, + distorted_multiclass_scores_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + self.assertAllEqual(multiclass_scores_rank_, + distorted_multiclass_scores_rank_) + self.assertAllEqual(distorted_boxes_.shape[0], + distorted_multiclass_scores_.shape[0]) + + def testSSDRandomCropPad(self): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + preprocessing_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop_pad, {})] + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights, + } + distorted_tensor_dict = preprocessor.preprocess(tensor_dict, + preprocessing_options) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + return [ + boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank + ] + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testSSDRandomCropFixedAspectRatioWithCache(self): + preprocess_options = [ + (preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), + (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] + self._testPreprocessorCache(preprocess_options, + test_boxes=True, + test_masks=False, + test_keypoints=False) + + def _testSSDRandomCropFixedAspectRatio(self, + include_multiclass_scores, + include_instance_masks, + include_keypoints): + def graph_fn(): + images = self.createTestImages() + boxes = self.createTestBoxes() + labels = self.createTestLabels() + weights = self.createTestGroundtruthWeights() + preprocessing_options = [(preprocessor.normalize_image, { + 'original_minval': 0, + 'original_maxval': 255, + 'target_minval': 0, + 'target_maxval': 1 + }), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] + tensor_dict = { + fields.InputDataFields.image: images, + fields.InputDataFields.groundtruth_boxes: boxes, + fields.InputDataFields.groundtruth_classes: labels, + fields.InputDataFields.groundtruth_weights: weights + } + if include_multiclass_scores: + multiclass_scores = self.createTestMultiClassScores() + tensor_dict[fields.InputDataFields.multiclass_scores] = ( + multiclass_scores) + if include_instance_masks: + masks = self.createTestMasks() + tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks + if include_keypoints: + keypoints, _ = self.createTestKeypoints() + tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints + + preprocessor_arg_map = preprocessor.get_default_func_arg_map( + include_multiclass_scores=include_multiclass_scores, + include_instance_masks=include_instance_masks, + include_keypoints=include_keypoints) + distorted_tensor_dict = preprocessor.preprocess( + tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) + distorted_images = distorted_tensor_dict[fields.InputDataFields.image] + distorted_boxes = distorted_tensor_dict[ + fields.InputDataFields.groundtruth_boxes] + images_rank = tf.rank(images) + distorted_images_rank = tf.rank(distorted_images) + boxes_rank = tf.rank(boxes) + distorted_boxes_rank = tf.rank(distorted_boxes) + return [boxes_rank, distorted_boxes_rank, images_rank, + distorted_images_rank] + + (boxes_rank_, distorted_boxes_rank_, images_rank_, + distorted_images_rank_) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) + self.assertAllEqual(images_rank_, distorted_images_rank_) + + def testSSDRandomCropFixedAspectRatio(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, + include_instance_masks=False, + include_keypoints=False) + + def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=True, + include_instance_masks=False, + include_keypoints=False) + + def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, + include_instance_masks=True, + include_keypoints=True) + + def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self): + self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, + include_instance_masks=True, + include_keypoints=True) + + def testConvertClassLogitsToSoftmax(self): + def graph_fn(): + multiclass_scores = tf.constant( + [[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32) + temperature = 2.0 + + converted_multiclass_scores = ( + preprocessor.convert_class_logits_to_softmax( + multiclass_scores=multiclass_scores, temperature=temperature)) + return converted_multiclass_scores + converted_multiclass_scores_ = self.execute_cpu(graph_fn, []) + expected_converted_multiclass_scores = [[0.62245935, 0.37754068], + [0.5, 0.5], + [1, 0]] + self.assertAllClose(converted_multiclass_scores_, + expected_converted_multiclass_scores) + + @parameterized.named_parameters( + ('scale_1', 1.0), + ('scale_1.5', 1.5), + ('scale_0.5', 0.5) + ) + def test_square_crop_by_scale(self, scale): + def graph_fn(): + image = np.random.randn(256, 256, 1) + + masks = tf.constant(image[:, :, 0].reshape(1, 256, 256)) + image = tf.constant(image) + keypoints = tf.constant([[[0.25, 0.25], [0.75, 0.75]]]) + + boxes = tf.constant([[0.25, .25, .75, .75]]) + labels = tf.constant([[1]]) + label_confidences = tf.constant([0.75]) + label_weights = tf.constant([[1.]]) + + (new_image, new_boxes, _, _, new_confidences, new_masks, + new_keypoints) = preprocessor.random_square_crop_by_scale( + image, + boxes, + labels, + label_weights, + label_confidences, + masks=masks, + keypoints=keypoints, + max_border=256, + scale_min=scale, + scale_max=scale) + return new_image, new_boxes, new_confidences, new_masks, new_keypoints + image, boxes, confidences, masks, keypoints = self.execute_cpu(graph_fn, []) + ymin, xmin, ymax, xmax = boxes[0] + self.assertAlmostEqual(ymax - ymin, 0.5 / scale) + self.assertAlmostEqual(xmax - xmin, 0.5 / scale) + + k1 = keypoints[0, 0] + k2 = keypoints[0, 1] + self.assertAlmostEqual(k2[0] - k1[0], 0.5 / scale) + self.assertAlmostEqual(k2[1] - k1[1], 0.5 / scale) + + size = max(image.shape) + self.assertAlmostEqual(scale * 256.0, size) + + self.assertAllClose(image[:, :, 0], masks[0, :, :]) + self.assertAllClose(confidences, [0.75]) + + @parameterized.named_parameters(('scale_0_1', 0.1), ('scale_1_0', 1.0), + ('scale_2_0', 2.0)) + def test_random_scale_crop_and_pad_to_square(self, scale): + + def graph_fn(): + image = np.random.randn(512, 256, 1) + box_centers = [0.25, 0.5, 0.75] + box_size = 0.1 + box_corners = [] + box_labels = [] + box_label_weights = [] + keypoints = [] + masks = [] + for center_y in box_centers: + for center_x in box_centers: + box_corners.append( + [center_y - box_size / 2.0, center_x - box_size / 2.0, + center_y + box_size / 2.0, center_x + box_size / 2.0]) + box_labels.append([1]) + box_label_weights.append([1.]) + keypoints.append( + [[center_y - box_size / 2.0, center_x - box_size / 2.0], + [center_y + box_size / 2.0, center_x + box_size / 2.0]]) + masks.append(image[:, :, 0].reshape(512, 256)) + + image = tf.constant(image) + boxes = tf.constant(box_corners) + labels = tf.constant(box_labels) + label_weights = tf.constant(box_label_weights) + keypoints = tf.constant(keypoints) + masks = tf.constant(np.stack(masks)) + + (new_image, new_boxes, _, _, new_masks, + new_keypoints) = preprocessor.random_scale_crop_and_pad_to_square( + image, + boxes, + labels, + label_weights, + masks=masks, + keypoints=keypoints, + scale_min=scale, + scale_max=scale, + output_size=512) + return new_image, new_boxes, new_masks, new_keypoints + + image, boxes, masks, keypoints = self.execute_cpu(graph_fn, []) + + # Since random_scale_crop_and_pad_to_square may prune and clip boxes, + # we only need to find one of the boxes that was not clipped and check + # that it matches the expected dimensions. Note, assertAlmostEqual(a, b) + # is equivalent to round(a-b, 7) == 0. + any_box_has_correct_size = False + effective_scale_y = int(scale * 512) / 512.0 + effective_scale_x = int(scale * 256) / 512.0 + expected_size_y = 0.1 * effective_scale_y + expected_size_x = 0.1 * effective_scale_x + for box in boxes: + ymin, xmin, ymax, xmax = box + any_box_has_correct_size |= ( + (round(ymin, 7) != 0.0) and (round(xmin, 7) != 0.0) and + (round(ymax, 7) != 1.0) and (round(xmax, 7) != 1.0) and + (round((ymax - ymin) - expected_size_y, 7) == 0.0) and + (round((xmax - xmin) - expected_size_x, 7) == 0.0)) + self.assertTrue(any_box_has_correct_size) + + # Similar to the approach above where we check for at least one box with the + # expected dimensions, we check for at least one pair of keypoints whose + # distance matches the expected dimensions. + any_keypoint_pair_has_correct_dist = False + for keypoint_pair in keypoints: + ymin, xmin = keypoint_pair[0] + ymax, xmax = keypoint_pair[1] + any_keypoint_pair_has_correct_dist |= ( + (round(ymin, 7) != 0.0) and (round(xmin, 7) != 0.0) and + (round(ymax, 7) != 1.0) and (round(xmax, 7) != 1.0) and + (round((ymax - ymin) - expected_size_y, 7) == 0.0) and + (round((xmax - xmin) - expected_size_x, 7) == 0.0)) + self.assertTrue(any_keypoint_pair_has_correct_dist) + + self.assertAlmostEqual(512.0, image.shape[0]) + self.assertAlmostEqual(512.0, image.shape[1]) + + self.assertAllClose(image[:, :, 0], + masks[0, :, :]) + + def test_random_scale_crop_and_pad_to_square_handles_confidences(self): + + def graph_fn(): + image = tf.zeros([10, 10, 1]) + boxes = tf.constant([[0, 0, 0.5, 0.5], [0.5, 0.5, 0.75, 0.75]]) + label_weights = tf.constant([1.0, 1.0]) + box_labels = tf.constant([0, 1]) + box_confidences = tf.constant([-1.0, 1.0]) + + (_, new_boxes, _, _, + new_confidences) = preprocessor.random_scale_crop_and_pad_to_square( + image, + boxes, + box_labels, + label_weights, + label_confidences=box_confidences, + scale_min=0.8, + scale_max=0.9, + output_size=10) + return new_boxes, new_confidences + + boxes, confidences = self.execute_cpu(graph_fn, []) + + self.assertLen(boxes, 2) + self.assertAllEqual(confidences, [-1.0, 1.0]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator.py new file mode 100644 index 0000000000000000000000000000000000000000..fcaba76104fbf6d706c838aa93a1c2b8db9886fe --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator.py @@ -0,0 +1,193 @@ +# Lint as: python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Region Similarity Calculators for BoxLists. + +Region Similarity Calculators compare a pairwise measure of similarity +between the boxes in two BoxLists. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import ABCMeta +from abc import abstractmethod + +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list_ops +from object_detection.core import standard_fields as fields + + +class RegionSimilarityCalculator(six.with_metaclass(ABCMeta, object)): + """Abstract base class for region similarity calculator.""" + + def compare(self, boxlist1, boxlist2, scope=None): + """Computes matrix of pairwise similarity between BoxLists. + + This op (to be overridden) computes a measure of pairwise similarity between + the boxes in the given BoxLists. Higher values indicate more similarity. + + Note that this method simply measures similarity and does not explicitly + perform a matching. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + scope: Op scope name. Defaults to 'Compare' if None. + + Returns: + a (float32) tensor of shape [N, M] with pairwise similarity score. + """ + with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope: + return self._compare(boxlist1, boxlist2) + + @abstractmethod + def _compare(self, boxlist1, boxlist2): + pass + + +class IouSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on Intersection over Union (IOU) metric. + + This class computes pairwise similarity between two BoxLists based on IOU. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOU similarity between the two BoxLists. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing pairwise iou scores. + """ + return box_list_ops.iou(boxlist1, boxlist2) + + +class DETRSimilarity(RegionSimilarityCalculator): + """Class to compute similarity for the Detection Transformer model. + + This class computes pairwise DETR similarity between two BoxLists using a + weighted combination of GIOU, classification scores, and the L1 loss. + """ + + def __init__(self, l1_weight=5, giou_weight=2): + super().__init__() + self.l1_weight = l1_weight + self.giou_weight = giou_weight + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise DETR similarity between the two BoxLists. + + Args: + boxlist1: BoxList holding N groundtruth boxes. + boxlist2: BoxList holding M predicted boxes. + + Returns: + A tensor with shape [N, M] representing pairwise DETR similarity scores. + """ + groundtruth_labels = boxlist1.get_field(fields.BoxListFields.classes) + predicted_labels = boxlist2.get_field(fields.BoxListFields.classes) + classification_scores = tf.matmul(groundtruth_labels, + predicted_labels, + transpose_b=True) + loss = self.l1_weight * box_list_ops.l1( + boxlist1, boxlist2) + self.giou_weight * (1 - box_list_ops.giou( + boxlist1, boxlist2)) - classification_scores + return -loss + + +class NegSqDistSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on the squared distance metric. + + This class computes pairwise similarity between two BoxLists based on the + negative squared distance metric. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute matrix of (negated) sq distances. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing negated pairwise squared distance. + """ + return -1 * box_list_ops.sq_dist(boxlist1, boxlist2) + + +class IoaSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on Intersection over Area (IOA) metric. + + This class computes pairwise similarity between two BoxLists based on their + pairwise intersections divided by the areas of second BoxLists. + """ + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOA similarity between the two BoxLists. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing pairwise IOA scores. + """ + return box_list_ops.ioa(boxlist1, boxlist2) + + +class ThresholdedIouSimilarity(RegionSimilarityCalculator): + """Class to compute similarity based on thresholded IOU and score. + + This class computes pairwise similarity between two BoxLists based on IOU and + a 'score' present in boxlist1. If IOU > threshold, then the entry in the + output pairwise tensor will contain `score`, otherwise 0. + """ + + def __init__(self, iou_threshold=0): + """Initialize the ThresholdedIouSimilarity. + + Args: + iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold, + then the comparison result will be the foreground probability of + the first box, otherwise it will be zero. + """ + super(ThresholdedIouSimilarity, self).__init__() + self._iou_threshold = iou_threshold + + def _compare(self, boxlist1, boxlist2): + """Compute pairwise IOU similarity between the two BoxLists and score. + + Args: + boxlist1: BoxList holding N boxes. Must have a score field. + boxlist2: BoxList holding M boxes. + + Returns: + A tensor with shape [N, M] representing scores threholded by pairwise + iou scores. + """ + ious = box_list_ops.iou(boxlist1, boxlist2) + scores = boxlist1.get_field(fields.BoxListFields.scores) + scores = tf.expand_dims(scores, axis=1) + row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]]) + thresholded_ious = tf.where(ious > self._iou_threshold, + row_replicated_scores, tf.zeros_like(ious)) + + return thresholded_ious diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e32149a84c6125e27f3bd80a67002c2aad94700c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ec1de45be14772a9f56e32eec67632095c629235 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/region_similarity_calculator_test.py @@ -0,0 +1,117 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for region_similarity_calculator.""" +import tensorflow.compat.v1 as tf + +from object_detection.core import box_list +from object_detection.core import region_similarity_calculator +from object_detection.core import standard_fields as fields +from object_detection.utils import test_case + + +class RegionSimilarityCalculatorTest(test_case.TestCase): + + def test_get_correct_pairwise_similarity_based_on_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + iou_similarity_calculator = region_similarity_calculator.IouSimilarity() + iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) + return iou_similarity + exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_get_correct_pairwise_similarity_based_on_squared_distances(self): + def graph_fn(): + corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 0.0, 2.0]]) + corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], + [-4.0, 0.0, 0.0, 3.0], + [0.0, 0.0, 0.0, 0.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + dist_similarity_calc = region_similarity_calculator.NegSqDistSimilarity() + dist_similarity = dist_similarity_calc.compare(boxes1, boxes2) + return dist_similarity + exp_output = [[-26, -25, 0], [-18, -27, -6]] + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_get_correct_pairwise_similarity_based_on_ioa(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + ioa_similarity_calculator = region_similarity_calculator.IoaSimilarity() + ioa_similarity_1 = ioa_similarity_calculator.compare(boxes1, boxes2) + ioa_similarity_2 = ioa_similarity_calculator.compare(boxes2, boxes1) + return ioa_similarity_1, ioa_similarity_2 + exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], + [1.0 / 12.0, 0.0, 5.0 / 400.0]] + exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], + [0, 0], + [6.0 / 6.0, 5.0 / 5.0]] + iou_output_1, iou_output_2 = self.execute(graph_fn, []) + self.assertAllClose(iou_output_1, exp_output_1) + self.assertAllClose(iou_output_2, exp_output_2) + + def test_get_correct_pairwise_similarity_based_on_thresholded_iou(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + scores = tf.constant([.3, .6]) + iou_threshold = .013 + boxes1 = box_list.BoxList(corners1) + boxes1.add_field(fields.BoxListFields.scores, scores) + boxes2 = box_list.BoxList(corners2) + iou_similarity_calculator = ( + region_similarity_calculator.ThresholdedIouSimilarity( + iou_threshold=iou_threshold)) + iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) + return iou_similarity + exp_output = tf.constant([[0.3, 0., 0.3], [0.6, 0., 0.]]) + iou_output = self.execute(graph_fn, []) + self.assertAllClose(iou_output, exp_output) + + def test_detr_similarity(self): + def graph_fn(): + corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]]) + corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]]) + groundtruth_labels = tf.constant([[1.0, 0.0]]) + predicted_labels = tf.constant([[0.0, 1000.0], [1000.0, 0.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + boxes1.add_field(fields.BoxListFields.classes, groundtruth_labels) + boxes2.add_field(fields.BoxListFields.classes, predicted_labels) + detr_similarity_calculator = \ + region_similarity_calculator.DETRSimilarity() + detr_similarity = detr_similarity_calculator.compare( + boxes1, boxes2, None) + return detr_similarity + exp_output = [[0.0, -20 - 8.0/3.0 + 1000.0]] + sim_output = self.execute(graph_fn, []) + self.assertAllClose(sim_output, exp_output) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/standard_fields.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/standard_fields.py new file mode 100644 index 0000000000000000000000000000000000000000..789296c38bb3186f008803d282d73e3bded1c965 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/standard_fields.py @@ -0,0 +1,338 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains classes specifying naming conventions used for object detection. + + +Specifies: + InputDataFields: standard fields used by reader/preprocessor/batcher. + DetectionResultFields: standard fields returned by object detector. + BoxListFields: standard field used by BoxList + TfExampleFields: standard fields for tf-example data format (go/tf-example). +""" + + +class InputDataFields(object): + """Names for the input tensors. + + Holds the standard data field names to use for identifying input tensors. This + should be used by the decoder to identify keys for the returned tensor_dict + containing input tensors. And it should be used by the model to identify the + tensors it needs. + + Attributes: + image: image. + image_additional_channels: additional channels. + original_image: image in the original input size. + original_image_spatial_shape: image in the original input size. + key: unique key corresponding to image. + source_id: source of the original image. + filename: original filename of the dataset (without common path). + groundtruth_image_classes: image-level class labels. + groundtruth_image_confidences: image-level class confidences. + groundtruth_labeled_classes: image-level annotation that indicates the + classes for which an image has been labeled. + groundtruth_boxes: coordinates of the ground truth boxes in the image. + groundtruth_classes: box-level class labels. + groundtruth_track_ids: box-level track ID labels. + groundtruth_temporal_offset: box-level temporal offsets, i.e., + movement of the box center in adjacent frames. + groundtruth_track_match_flags: box-level flags indicating if objects + exist in the previous frame. + groundtruth_confidences: box-level class confidences. The shape should be + the same as the shape of groundtruth_classes. + groundtruth_label_types: box-level label types (e.g. explicit negative). + groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead] + is the groundtruth a single object or a crowd. + groundtruth_area: area of a groundtruth segment. + groundtruth_difficult: is a `difficult` object + groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the + same class, forming a connected group, where instances are heavily + occluding each other. + proposal_boxes: coordinates of object proposal boxes. + proposal_objectness: objectness score of each proposal. + groundtruth_instance_masks: ground truth instance masks. + groundtruth_instance_boundaries: ground truth instance boundaries. + groundtruth_instance_classes: instance mask-level class labels. + groundtruth_keypoints: ground truth keypoints. + groundtruth_keypoint_visibilities: ground truth keypoint visibilities. + groundtruth_keypoint_weights: groundtruth weight factor for keypoints. + groundtruth_label_weights: groundtruth label weights. + groundtruth_verified_negative_classes: groundtruth verified negative classes + groundtruth_not_exhaustive_classes: groundtruth not-exhaustively labeled + classes. + groundtruth_weights: groundtruth weight factor for bounding boxes. + groundtruth_dp_num_points: The number of DensePose sampled points for each + instance. + groundtruth_dp_part_ids: Part indices for DensePose points. + groundtruth_dp_surface_coords: Image locations and UV coordinates for + DensePose points. + num_groundtruth_boxes: number of groundtruth boxes. + is_annotated: whether an image has been labeled or not. + true_image_shapes: true shapes of images in the resized images, as resized + images can be padded with zeros. + multiclass_scores: the label score per class for each box. + context_features: a flattened list of contextual features. + context_feature_length: the fixed length of each feature in + context_features, used for reshaping. + valid_context_size: the valid context size, used in filtering the padded + context features. + image_format: format for the images, used to decode + image_height: height of images, used to decode + image_width: width of images, used to decode + """ + image = 'image' + image_additional_channels = 'image_additional_channels' + original_image = 'original_image' + original_image_spatial_shape = 'original_image_spatial_shape' + key = 'key' + source_id = 'source_id' + filename = 'filename' + groundtruth_image_classes = 'groundtruth_image_classes' + groundtruth_image_confidences = 'groundtruth_image_confidences' + groundtruth_labeled_classes = 'groundtruth_labeled_classes' + groundtruth_boxes = 'groundtruth_boxes' + groundtruth_classes = 'groundtruth_classes' + groundtruth_track_ids = 'groundtruth_track_ids' + groundtruth_temporal_offset = 'groundtruth_temporal_offset' + groundtruth_track_match_flags = 'groundtruth_track_match_flags' + groundtruth_confidences = 'groundtruth_confidences' + groundtruth_label_types = 'groundtruth_label_types' + groundtruth_is_crowd = 'groundtruth_is_crowd' + groundtruth_area = 'groundtruth_area' + groundtruth_difficult = 'groundtruth_difficult' + groundtruth_group_of = 'groundtruth_group_of' + proposal_boxes = 'proposal_boxes' + proposal_objectness = 'proposal_objectness' + groundtruth_instance_masks = 'groundtruth_instance_masks' + groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' + groundtruth_instance_classes = 'groundtruth_instance_classes' + groundtruth_keypoints = 'groundtruth_keypoints' + groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' + groundtruth_keypoint_weights = 'groundtruth_keypoint_weights' + groundtruth_label_weights = 'groundtruth_label_weights' + groundtruth_verified_neg_classes = 'groundtruth_verified_neg_classes' + groundtruth_not_exhaustive_classes = 'groundtruth_not_exhaustive_classes' + groundtruth_weights = 'groundtruth_weights' + groundtruth_dp_num_points = 'groundtruth_dp_num_points' + groundtruth_dp_part_ids = 'groundtruth_dp_part_ids' + groundtruth_dp_surface_coords = 'groundtruth_dp_surface_coords' + num_groundtruth_boxes = 'num_groundtruth_boxes' + is_annotated = 'is_annotated' + true_image_shape = 'true_image_shape' + multiclass_scores = 'multiclass_scores' + context_features = 'context_features' + context_feature_length = 'context_feature_length' + valid_context_size = 'valid_context_size' + image_timestamps = 'image_timestamps' + image_format = 'image_format' + image_height = 'image_height' + image_width = 'image_width' + + +class DetectionResultFields(object): + """Naming conventions for storing the output of the detector. + + Attributes: + source_id: source of the original image. + key: unique key corresponding to image. + detection_boxes: coordinates of the detection boxes in the image. + detection_scores: detection scores for the detection boxes in the image. + detection_multiclass_scores: class score distribution (including background) + for detection boxes in the image including background class. + detection_classes: detection-level class labels. + detection_masks: contains a segmentation mask for each detection box. + detection_surface_coords: contains DensePose surface coordinates for each + box. + detection_boundaries: contains an object boundary for each detection box. + detection_keypoints: contains detection keypoints for each detection box. + detection_keypoint_scores: contains detection keypoint scores. + num_detections: number of detections in the batch. + raw_detection_boxes: contains decoded detection boxes without Non-Max + suppression. + raw_detection_scores: contains class score logits for raw detection boxes. + detection_anchor_indices: The anchor indices of the detections after NMS. + detection_features: contains extracted features for each detected box + after NMS. + """ + + source_id = 'source_id' + key = 'key' + detection_boxes = 'detection_boxes' + detection_scores = 'detection_scores' + detection_multiclass_scores = 'detection_multiclass_scores' + detection_features = 'detection_features' + detection_classes = 'detection_classes' + detection_masks = 'detection_masks' + detection_surface_coords = 'detection_surface_coords' + detection_boundaries = 'detection_boundaries' + detection_keypoints = 'detection_keypoints' + detection_keypoint_scores = 'detection_keypoint_scores' + detection_embeddings = 'detection_embeddings' + detection_offsets = 'detection_temporal_offsets' + num_detections = 'num_detections' + raw_detection_boxes = 'raw_detection_boxes' + raw_detection_scores = 'raw_detection_scores' + detection_anchor_indices = 'detection_anchor_indices' + + +class BoxListFields(object): + """Naming conventions for BoxLists. + + Attributes: + boxes: bounding box coordinates. + classes: classes per bounding box. + scores: scores per bounding box. + weights: sample weights per bounding box. + objectness: objectness score per bounding box. + masks: masks per bounding box. + boundaries: boundaries per bounding box. + keypoints: keypoints per bounding box. + keypoint_visibilities: keypoint visibilities per bounding box. + keypoint_heatmaps: keypoint heatmaps per bounding box. + densepose_num_points: number of DensePose points per bounding box. + densepose_part_ids: DensePose part ids per bounding box. + densepose_surface_coords: DensePose surface coordinates per bounding box. + is_crowd: is_crowd annotation per bounding box. + temporal_offsets: temporal center offsets per bounding box. + track_match_flags: match flags per bounding box. + """ + boxes = 'boxes' + classes = 'classes' + scores = 'scores' + weights = 'weights' + confidences = 'confidences' + objectness = 'objectness' + masks = 'masks' + boundaries = 'boundaries' + keypoints = 'keypoints' + keypoint_visibilities = 'keypoint_visibilities' + keypoint_heatmaps = 'keypoint_heatmaps' + densepose_num_points = 'densepose_num_points' + densepose_part_ids = 'densepose_part_ids' + densepose_surface_coords = 'densepose_surface_coords' + is_crowd = 'is_crowd' + group_of = 'group_of' + track_ids = 'track_ids' + temporal_offsets = 'temporal_offsets' + track_match_flags = 'track_match_flags' + + +class PredictionFields(object): + """Naming conventions for standardized prediction outputs. + + Attributes: + feature_maps: List of feature maps for prediction. + anchors: Generated anchors. + raw_detection_boxes: Decoded detection boxes without NMS. + raw_detection_feature_map_indices: Feature map indices from which each raw + detection box was produced. + """ + feature_maps = 'feature_maps' + anchors = 'anchors' + raw_detection_boxes = 'raw_detection_boxes' + raw_detection_feature_map_indices = 'raw_detection_feature_map_indices' + + +class TfExampleFields(object): + """TF-example proto feature names for object detection. + + Holds the standard feature names to load from an Example proto for object + detection. + + Attributes: + image_encoded: JPEG encoded string + image_format: image format, e.g. "JPEG" + filename: filename + channels: number of channels of image + colorspace: colorspace, e.g. "RGB" + height: height of image in pixels, e.g. 462 + width: width of image in pixels, e.g. 581 + source_id: original source of the image + image_class_text: image-level label in text format + image_class_label: image-level label in numerical format + image_class_confidence: image-level confidence of the label + object_class_text: labels in text format, e.g. ["person", "cat"] + object_class_label: labels in numbers, e.g. [16, 8] + object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30 + object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40 + object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50 + object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70 + object_view: viewpoint of object, e.g. ["frontal", "left"] + object_truncated: is object truncated, e.g. [true, false] + object_occluded: is object occluded, e.g. [true, false] + object_difficult: is object difficult, e.g. [true, false] + object_group_of: is object a single object or a group of objects + object_depiction: is object a depiction + object_is_crowd: [DEPRECATED, use object_group_of instead] + is the object a single object or a crowd + object_segment_area: the area of the segment. + object_weight: a weight factor for the object's bounding box. + instance_masks: instance segmentation masks. + instance_boundaries: instance boundaries. + instance_classes: Classes for each instance segmentation mask. + detection_class_label: class label in numbers. + detection_bbox_ymin: ymin coordinates of a detection box. + detection_bbox_xmin: xmin coordinates of a detection box. + detection_bbox_ymax: ymax coordinates of a detection box. + detection_bbox_xmax: xmax coordinates of a detection box. + detection_score: detection score for the class label and box. + """ + image_encoded = 'image/encoded' + image_format = 'image/format' # format is reserved keyword + filename = 'image/filename' + channels = 'image/channels' + colorspace = 'image/colorspace' + height = 'image/height' + width = 'image/width' + source_id = 'image/source_id' + image_class_text = 'image/class/text' + image_class_label = 'image/class/label' + image_class_confidence = 'image/class/confidence' + object_class_text = 'image/object/class/text' + object_class_label = 'image/object/class/label' + object_bbox_ymin = 'image/object/bbox/ymin' + object_bbox_xmin = 'image/object/bbox/xmin' + object_bbox_ymax = 'image/object/bbox/ymax' + object_bbox_xmax = 'image/object/bbox/xmax' + object_view = 'image/object/view' + object_truncated = 'image/object/truncated' + object_occluded = 'image/object/occluded' + object_difficult = 'image/object/difficult' + object_group_of = 'image/object/group_of' + object_depiction = 'image/object/depiction' + object_is_crowd = 'image/object/is_crowd' + object_segment_area = 'image/object/segment/area' + object_weight = 'image/object/weight' + instance_masks = 'image/segmentation/object' + instance_boundaries = 'image/boundaries/object' + instance_classes = 'image/segmentation/object/class' + detection_class_label = 'image/detection/label' + detection_bbox_ymin = 'image/detection/bbox/ymin' + detection_bbox_xmin = 'image/detection/bbox/xmin' + detection_bbox_ymax = 'image/detection/bbox/ymax' + detection_bbox_xmax = 'image/detection/bbox/xmax' + detection_score = 'image/detection/score' + +# Sequence fields for SequenceExample inputs. +# All others are considered context fields. +SEQUENCE_FIELDS = [InputDataFields.image, + InputDataFields.source_id, + InputDataFields.groundtruth_boxes, + InputDataFields.num_groundtruth_boxes, + InputDataFields.groundtruth_classes, + InputDataFields.groundtruth_weights, + InputDataFields.source_id, + InputDataFields.is_annotated] diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/standard_fields.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/standard_fields.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e18c5c1ad87b8a4dbea8ae8bbbe53a290eb94c9f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/standard_fields.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..aec2f5c697963c5138e9c5964fc8711d64cea7fe --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner.py @@ -0,0 +1,2284 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base target assigner module. + +The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and +groundtruth detections (bounding boxes), to assign classification and regression +targets to each anchor as well as weights to each anchor (specifying, e.g., +which anchors should not contribute to training loss). + +It assigns classification/regression targets by performing the following steps: +1) Computing pairwise similarity between anchors and groundtruth boxes using a + provided RegionSimilarity Calculator +2) Computing a matching based on the similarity matrix using a provided Matcher +3) Assigning regression targets based on the matching and a provided BoxCoder +4) Assigning classification targets based on the matching and groundtruth labels + +Note that TargetAssigners only operate on detections from a single +image at a time, so any logic for applying a TargetAssigner to multiple +images must be handled externally. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 + +from object_detection.box_coders import faster_rcnn_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_coder +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import densepose_ops +from object_detection.core import keypoint_ops +from object_detection.core import matcher as mat +from object_detection.core import region_similarity_calculator as sim_calc +from object_detection.core import standard_fields as fields +from object_detection.matchers import argmax_matcher +from object_detection.matchers import hungarian_matcher +from object_detection.utils import shape_utils +from object_detection.utils import target_assigner_utils as ta_utils +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + +ResizeMethod = tf2.image.ResizeMethod + +_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0 + + +class TargetAssigner(object): + """Target assigner to compute classification and regression targets.""" + + def __init__(self, + similarity_calc, + matcher, + box_coder_instance, + negative_class_weight=1.0): + """Construct Object Detection Target Assigner. + + Args: + similarity_calc: a RegionSimilarityCalculator + matcher: an object_detection.core.Matcher used to match groundtruth to + anchors. + box_coder_instance: an object_detection.core.BoxCoder used to encode + matching groundtruth boxes with respect to anchors. + negative_class_weight: classification weight to be associated to negative + anchors (default: 1.0). The weight must be in [0., 1.]. + + Raises: + ValueError: if similarity_calc is not a RegionSimilarityCalculator or + if matcher is not a Matcher or if box_coder is not a BoxCoder + """ + if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator): + raise ValueError('similarity_calc must be a RegionSimilarityCalculator') + if not isinstance(matcher, mat.Matcher): + raise ValueError('matcher must be a Matcher') + if not isinstance(box_coder_instance, box_coder.BoxCoder): + raise ValueError('box_coder must be a BoxCoder') + self._similarity_calc = similarity_calc + self._matcher = matcher + self._box_coder = box_coder_instance + self._negative_class_weight = negative_class_weight + + @property + def box_coder(self): + return self._box_coder + + # TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields. + def assign(self, + anchors, + groundtruth_boxes, + groundtruth_labels=None, + unmatched_class_label=None, + groundtruth_weights=None): + """Assign classification and regression targets to each anchor. + + For a given set of anchors and groundtruth detections, match anchors + to groundtruth_boxes and assign classification and regression targets to + each anchor as well as weights based on the resulting match (specifying, + e.g., which anchors should not contribute to training loss). + + Anchors that are not matched to anything are given a classification target + of self._unmatched_cls_target which can be specified via the constructor. + + Args: + anchors: a BoxList representing N anchors + groundtruth_boxes: a BoxList representing M groundtruth boxes + groundtruth_labels: a tensor of shape [M, d_1, ... d_k] + with labels for each of the ground_truth boxes. The subshape + [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set + to None, groundtruth_labels assumes a binary problem where all + ground_truth boxes get a positive label (of 1). + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + If set to None, unmatched_cls_target is set to be [0] for each anchor. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. The weights + must be in [0., 1.]. If None, all weights are set to 1. Generally no + groundtruth boxes with zero weight match to any anchors as matchers are + aware of groundtruth weights. Additionally, `cls_weights` and + `reg_weights` are calculated using groundtruth weights as an added + safety. + + Returns: + cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], + where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels + which has shape [num_gt_boxes, d_1, d_2, ... d_k]. + cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], + representing weights for each element in cls_targets. + reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension] + reg_weights: a float32 tensor with shape [num_anchors] + match: an int32 tensor of shape [num_anchors] containing result of anchor + groundtruth matching. Each position in the tensor indicates an anchor + and holds the following meaning: + (1) if match[i] >= 0, anchor i is matched with groundtruth match[i]. + (2) if match[i]=-1, anchor i is marked to be background . + (3) if match[i]=-2, anchor i is ignored since it is not background and + does not have sufficient overlap to call it a foreground. + + Raises: + ValueError: if anchors or groundtruth_boxes are not of type + box_list.BoxList + """ + if not isinstance(anchors, box_list.BoxList): + raise ValueError('anchors must be an BoxList') + if not isinstance(groundtruth_boxes, box_list.BoxList): + raise ValueError('groundtruth_boxes must be an BoxList') + + if unmatched_class_label is None: + unmatched_class_label = tf.constant([0], tf.float32) + + if groundtruth_labels is None: + groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), + 0)) + groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) + + unmatched_shape_assert = shape_utils.assert_shape_equal( + shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], + shape_utils.combined_static_and_dynamic_shape(unmatched_class_label)) + labels_and_box_shapes_assert = shape_utils.assert_shape_equal( + shape_utils.combined_static_and_dynamic_shape( + groundtruth_labels)[:1], + shape_utils.combined_static_and_dynamic_shape( + groundtruth_boxes.get())[:1]) + + if groundtruth_weights is None: + num_gt_boxes = groundtruth_boxes.num_boxes_static() + if not num_gt_boxes: + num_gt_boxes = groundtruth_boxes.num_boxes() + groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32) + + # set scores on the gt boxes + scores = 1 - groundtruth_labels[:, 0] + groundtruth_boxes.add_field(fields.BoxListFields.scores, scores) + + with tf.control_dependencies( + [unmatched_shape_assert, labels_and_box_shapes_assert]): + match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, + anchors) + match = self._matcher.match(match_quality_matrix, + valid_rows=tf.greater(groundtruth_weights, 0)) + reg_targets = self._create_regression_targets(anchors, + groundtruth_boxes, + match) + cls_targets = self._create_classification_targets(groundtruth_labels, + unmatched_class_label, + match) + reg_weights = self._create_regression_weights(match, groundtruth_weights) + + cls_weights = self._create_classification_weights(match, + groundtruth_weights) + # convert cls_weights from per-anchor to per-class. + class_label_shape = tf.shape(cls_targets)[1:] + weights_shape = tf.shape(cls_weights) + weights_multiple = tf.concat( + [tf.ones_like(weights_shape), class_label_shape], + axis=0) + for _ in range(len(cls_targets.get_shape()[1:])): + cls_weights = tf.expand_dims(cls_weights, -1) + cls_weights = tf.tile(cls_weights, weights_multiple) + + num_anchors = anchors.num_boxes_static() + if num_anchors is not None: + reg_targets = self._reset_target_shape(reg_targets, num_anchors) + cls_targets = self._reset_target_shape(cls_targets, num_anchors) + reg_weights = self._reset_target_shape(reg_weights, num_anchors) + cls_weights = self._reset_target_shape(cls_weights, num_anchors) + + return (cls_targets, cls_weights, reg_targets, reg_weights, + match.match_results) + + def _reset_target_shape(self, target, num_anchors): + """Sets the static shape of the target. + + Args: + target: the target tensor. Its first dimension will be overwritten. + num_anchors: the number of anchors, which is used to override the target's + first dimension. + + Returns: + A tensor with the shape info filled in. + """ + target_shape = target.get_shape().as_list() + target_shape[0] = num_anchors + target.set_shape(target_shape) + return target + + def _create_regression_targets(self, anchors, groundtruth_boxes, match): + """Returns a regression target for each anchor. + + Args: + anchors: a BoxList representing N anchors + groundtruth_boxes: a BoxList representing M groundtruth_boxes + match: a matcher.Match object + + Returns: + reg_targets: a float32 tensor with shape [N, box_code_dimension] + """ + matched_gt_boxes = match.gather_based_on_match( + groundtruth_boxes.get(), + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) + if groundtruth_boxes.has_field(fields.BoxListFields.keypoints): + groundtruth_keypoints = groundtruth_boxes.get_field( + fields.BoxListFields.keypoints) + matched_keypoints = match.gather_based_on_match( + groundtruth_keypoints, + unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), + ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])) + matched_gt_boxlist.add_field(fields.BoxListFields.keypoints, + matched_keypoints) + matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors) + match_results_shape = shape_utils.combined_static_and_dynamic_shape( + match.match_results) + + # Zero out the unmatched and ignored regression targets. + unmatched_ignored_reg_targets = tf.tile( + self._default_regression_target(), [match_results_shape[0], 1]) + matched_anchors_mask = match.matched_column_indicator() + reg_targets = tf.where(matched_anchors_mask, + matched_reg_targets, + unmatched_ignored_reg_targets) + return reg_targets + + def _default_regression_target(self): + """Returns the default target for anchors to regress to. + + Default regression targets are set to zero (though in + this implementation what these targets are set to should + not matter as the regression weight of any box set to + regress to the default target is zero). + + Returns: + default_target: a float32 tensor with shape [1, box_code_dimension] + """ + return tf.constant([self._box_coder.code_size*[0]], tf.float32) + + def _create_classification_targets(self, groundtruth_labels, + unmatched_class_label, match): + """Create classification targets for each anchor. + + Assign a classification target of for each anchor to the matching + groundtruth label that is provided by match. Anchors that are not matched + to anything are given the target self._unmatched_cls_target + + Args: + groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] + with labels for each of the ground_truth boxes. The subshape + [d_1, ... d_k] can be empty (corresponding to scalar labels). + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + + Returns: + a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the + subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has + shape [num_gt_boxes, d_1, d_2, ... d_k]. + """ + return match.gather_based_on_match( + groundtruth_labels, + unmatched_value=unmatched_class_label, + ignored_value=unmatched_class_label) + + def _create_regression_weights(self, match, groundtruth_weights): + """Set regression weight for each anchor. + + Only positive anchors are set to contribute to the regression loss, so this + method returns a weight of 1 for every positive anchor and 0 for every + negative anchor. + + Args: + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. + + Returns: + a float32 tensor with shape [num_anchors] representing regression weights. + """ + return match.gather_based_on_match( + groundtruth_weights, ignored_value=0., unmatched_value=0.) + + def _create_classification_weights(self, + match, + groundtruth_weights): + """Create classification weights for each anchor. + + Positive (matched) anchors are associated with a weight of + positive_class_weight and negative (unmatched) anchors are associated with + a weight of negative_class_weight. When anchors are ignored, weights are set + to zero. By default, both positive/negative weights are set to 1.0, + but they can be adjusted to handle class imbalance (which is almost always + the case in object detection). + + Args: + match: a matcher.Match object that provides a matching between anchors + and groundtruth boxes. + groundtruth_weights: a float tensor of shape [M] indicating the weight to + assign to all anchors match to a particular groundtruth box. + + Returns: + a float32 tensor with shape [num_anchors] representing classification + weights. + """ + return match.gather_based_on_match( + groundtruth_weights, + ignored_value=0., + unmatched_value=self._negative_class_weight) + + def get_box_coder(self): + """Get BoxCoder of this TargetAssigner. + + Returns: + BoxCoder object. + """ + return self._box_coder + + +# TODO(rathodv): This method pulls in all the implementation dependencies into +# core. Therefore its best to have this factory method outside of core. +def create_target_assigner(reference, stage=None, + negative_class_weight=1.0, use_matmul_gather=False): + """Factory function for creating standard target assigners. + + Args: + reference: string referencing the type of TargetAssigner. + stage: string denoting stage: {proposal, detection}. + negative_class_weight: classification weight to be associated to negative + anchors (default: 1.0) + use_matmul_gather: whether to use matrix multiplication based gather which + are better suited for TPUs. + + Returns: + TargetAssigner: desired target assigner. + + Raises: + ValueError: if combination reference+stage is invalid. + """ + if reference == 'Multibox' and stage == 'proposal': + if tf_version.is_tf2(): + raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.') + similarity_calc = sim_calc.NegSqDistSimilarity() + matcher = bipartite_matcher.GreedyBipartiteMatcher() + box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder() + + elif reference == 'FasterRCNN' and stage == 'proposal': + similarity_calc = sim_calc.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7, + unmatched_threshold=0.3, + force_match_for_each_row=True, + use_matmul_gather=use_matmul_gather) + box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=[10.0, 10.0, 5.0, 5.0]) + + elif reference == 'FasterRCNN' and stage == 'detection': + similarity_calc = sim_calc.IouSimilarity() + # Uses all proposals with IOU < 0.5 as candidate negatives. + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + negatives_lower_than_unmatched=True, + use_matmul_gather=use_matmul_gather) + box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( + scale_factors=[10.0, 10.0, 5.0, 5.0]) + + elif reference == 'FastRCNN': + similarity_calc = sim_calc.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.1, + force_match_for_each_row=False, + negatives_lower_than_unmatched=False, + use_matmul_gather=use_matmul_gather) + box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder() + + else: + raise ValueError('No valid combination of reference and stage.') + + return TargetAssigner(similarity_calc, matcher, box_coder_instance, + negative_class_weight=negative_class_weight) + + +def batch_assign(target_assigner, + anchors_batch, + gt_box_batch, + gt_class_targets_batch, + unmatched_class_label=None, + gt_weights_batch=None): + """Batched assignment of classification and regression targets. + + Args: + target_assigner: a target assigner. + anchors_batch: BoxList representing N box anchors or list of BoxList objects + with length batch_size representing anchor sets. + gt_box_batch: a list of BoxList objects with length batch_size + representing groundtruth boxes for each image in the batch + gt_class_targets_batch: a list of tensors with length batch_size, where + each tensor has shape [num_gt_boxes_i, classification_target_size] and + num_gt_boxes_i is the number of boxes in the ith boxlist of + gt_box_batch. + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + gt_weights_batch: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_reg_targets: a tensor with shape [batch_size, num_anchors, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_anchors], + match: an int32 tensor of shape [batch_size, num_anchors] containing result + of anchor groundtruth matching. Each position in the tensor indicates an + anchor and holds the following meaning: + (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. + (2) if match[x, i]=-1, anchor i is marked to be background . + (3) if match[x, i]=-2, anchor i is ignored since it is not background and + does not have sufficient overlap to call it a foreground. + + Raises: + ValueError: if input list lengths are inconsistent, i.e., + batch_size == len(gt_box_batch) == len(gt_class_targets_batch) + and batch_size == len(anchors_batch) unless anchors_batch is a single + BoxList. + """ + if not isinstance(anchors_batch, list): + anchors_batch = len(gt_box_batch) * [anchors_batch] + if not all( + isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): + raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') + if not (len(anchors_batch) + == len(gt_box_batch) + == len(gt_class_targets_batch)): + raise ValueError('batch size incompatible with lengths of anchors_batch, ' + 'gt_box_batch and gt_class_targets_batch.') + cls_targets_list = [] + cls_weights_list = [] + reg_targets_list = [] + reg_weights_list = [] + match_list = [] + if gt_weights_batch is None: + gt_weights_batch = [None] * len(gt_class_targets_batch) + for anchors, gt_boxes, gt_class_targets, gt_weights in zip( + anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch): + (cls_targets, cls_weights, + reg_targets, reg_weights, match) = target_assigner.assign( + anchors, gt_boxes, gt_class_targets, unmatched_class_label, + gt_weights) + cls_targets_list.append(cls_targets) + cls_weights_list.append(cls_weights) + reg_targets_list.append(reg_targets) + reg_weights_list.append(reg_weights) + match_list.append(match) + batch_cls_targets = tf.stack(cls_targets_list) + batch_cls_weights = tf.stack(cls_weights_list) + batch_reg_targets = tf.stack(reg_targets_list) + batch_reg_weights = tf.stack(reg_weights_list) + batch_match = tf.stack(match_list) + return (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) + + +# Assign an alias to avoid large refactor of existing users. +batch_assign_targets = batch_assign + + +def batch_get_targets(batch_match, groundtruth_tensor_list, + groundtruth_weights_list, unmatched_value, + unmatched_weight): + """Returns targets based on anchor-groundtruth box matching results. + + Args: + batch_match: An int32 tensor of shape [batch, num_anchors] containing the + result of target assignment returned by TargetAssigner.assign(..). + groundtruth_tensor_list: A list of groundtruth tensors of shape + [num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type. + groundtruth_weights_list: A list of weights, one per groundtruth tensor, of + shape [num_groundtruth]. + unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as + groundtruth tensor containing target value for anchors that remain + unmatched. + unmatched_weight: Scalar weight to assign to anchors that remain unmatched. + + Returns: + targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k] + containing targets for anchors. + weights: A float tensor of shape [batch, num_anchors] containing the weights + to assign to each target. + """ + match_list = tf.unstack(batch_match) + targets_list = [] + weights_list = [] + for match_tensor, groundtruth_tensor, groundtruth_weight in zip( + match_list, groundtruth_tensor_list, groundtruth_weights_list): + match_object = mat.Match(match_tensor) + targets = match_object.gather_based_on_match( + groundtruth_tensor, + unmatched_value=unmatched_value, + ignored_value=unmatched_value) + targets_list.append(targets) + weights = match_object.gather_based_on_match( + groundtruth_weight, + unmatched_value=unmatched_weight, + ignored_value=tf.zeros_like(unmatched_weight)) + weights_list.append(weights) + return tf.stack(targets_list), tf.stack(weights_list) + + +def batch_assign_confidences(target_assigner, + anchors_batch, + gt_box_batch, + gt_class_confidences_batch, + gt_weights_batch=None, + unmatched_class_label=None, + include_background_class=True, + implicit_class_weight=1.0): + """Batched assignment of classification and regression targets. + + This differences between batch_assign_confidences and batch_assign_targets: + - 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and + tensor (high-dimensional) targets. 'batch_assign_confidences' only support + scalar (agnostic) and vector (multiclass) targets. + - 'batch_assign_targets' assumes the input class tensor using the binary + one/K-hot encoding. 'batch_assign_confidences' takes the class confidence + scores as the input, where 1 means positive classes, 0 means implicit + negative classes, and -1 means explicit negative classes. + - 'batch_assign_confidences' assigns the targets in the similar way as + 'batch_assign_targets' except that it gives different weights for implicit + and explicit classes. This allows user to control the negative gradients + pushed differently for implicit and explicit examples during the training. + + Args: + target_assigner: a target assigner. + anchors_batch: BoxList representing N box anchors or list of BoxList objects + with length batch_size representing anchor sets. + gt_box_batch: a list of BoxList objects with length batch_size + representing groundtruth boxes for each image in the batch + gt_class_confidences_batch: a list of tensors with length batch_size, where + each tensor has shape [num_gt_boxes_i, classification_target_size] and + num_gt_boxes_i is the number of boxes in the ith boxlist of + gt_box_batch. Note that in this tensor, 1 means explicit positive class, + -1 means explicit negative class, and 0 means implicit negative class. + gt_weights_batch: A list of 1-D tf.float32 tensors of shape + [num_gt_boxes_i] containing weights for groundtruth boxes. + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + include_background_class: whether or not gt_class_confidences_batch includes + the background class. + implicit_class_weight: the weight assigned to implicit examples. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_reg_targets: a tensor with shape [batch_size, num_anchors, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_anchors], + match: an int32 tensor of shape [batch_size, num_anchors] containing result + of anchor groundtruth matching. Each position in the tensor indicates an + anchor and holds the following meaning: + (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. + (2) if match[x, i]=-1, anchor i is marked to be background . + (3) if match[x, i]=-2, anchor i is ignored since it is not background and + does not have sufficient overlap to call it a foreground. + + Raises: + ValueError: if input list lengths are inconsistent, i.e., + batch_size == len(gt_box_batch) == len(gt_class_targets_batch) + and batch_size == len(anchors_batch) unless anchors_batch is a single + BoxList, or if any element in gt_class_confidences_batch has rank > 2. + """ + if not isinstance(anchors_batch, list): + anchors_batch = len(gt_box_batch) * [anchors_batch] + if not all( + isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): + raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') + if not (len(anchors_batch) + == len(gt_box_batch) + == len(gt_class_confidences_batch)): + raise ValueError('batch size incompatible with lengths of anchors_batch, ' + 'gt_box_batch and gt_class_confidences_batch.') + + cls_targets_list = [] + cls_weights_list = [] + reg_targets_list = [] + reg_weights_list = [] + match_list = [] + if gt_weights_batch is None: + gt_weights_batch = [None] * len(gt_class_confidences_batch) + for anchors, gt_boxes, gt_class_confidences, gt_weights in zip( + anchors_batch, gt_box_batch, gt_class_confidences_batch, + gt_weights_batch): + + if (gt_class_confidences is not None and + len(gt_class_confidences.get_shape().as_list()) > 2): + raise ValueError('The shape of the class target is not supported. ', + gt_class_confidences.get_shape()) + + cls_targets, _, reg_targets, _, match = target_assigner.assign( + anchors, gt_boxes, gt_class_confidences, unmatched_class_label, + groundtruth_weights=gt_weights) + + if include_background_class: + cls_targets_without_background = tf.slice( + cls_targets, [0, 1], [-1, -1]) + else: + cls_targets_without_background = cls_targets + + positive_mask = tf.greater(cls_targets_without_background, 0.0) + negative_mask = tf.less(cls_targets_without_background, 0.0) + explicit_example_mask = tf.logical_or(positive_mask, negative_mask) + positive_anchors = tf.reduce_any(positive_mask, axis=-1) + + regression_weights = tf.cast(positive_anchors, dtype=tf.float32) + regression_targets = ( + reg_targets * tf.expand_dims(regression_weights, axis=-1)) + regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1) + + cls_targets_without_background = ( + cls_targets_without_background * + (1 - tf.cast(negative_mask, dtype=tf.float32))) + cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast( + explicit_example_mask, dtype=tf.float32) + implicit_class_weight) + + if include_background_class: + cls_weights_background = ( + (1 - implicit_class_weight) * regression_weights_expanded + + implicit_class_weight) + classification_weights = tf.concat( + [cls_weights_background, cls_weights_without_background], axis=-1) + cls_targets_background = 1 - regression_weights_expanded + classification_targets = tf.concat( + [cls_targets_background, cls_targets_without_background], axis=-1) + else: + classification_targets = cls_targets_without_background + classification_weights = cls_weights_without_background + + cls_targets_list.append(classification_targets) + cls_weights_list.append(classification_weights) + reg_targets_list.append(regression_targets) + reg_weights_list.append(regression_weights) + match_list.append(match) + batch_cls_targets = tf.stack(cls_targets_list) + batch_cls_weights = tf.stack(cls_weights_list) + batch_reg_targets = tf.stack(reg_targets_list) + batch_reg_weights = tf.stack(reg_weights_list) + batch_match = tf.stack(match_list) + return (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) + + +def _smallest_positive_root(a, b, c): + """Returns the smallest positive root of a quadratic equation.""" + + discriminant = tf.sqrt(b ** 2 - 4 * a * c) + + # TODO(vighneshb) We are currently using the slightly incorrect + # CenterNet implementation. The commented lines implement the fixed version + # in https://github.com/princeton-vl/CornerNet. Change the implementation + # after verifying it has no negative impact. + # root1 = (-b - discriminant) / (2 * a) + # root2 = (-b + discriminant) / (2 * a) + + # return tf.where(tf.less(root1, 0), root2, root1) + + return (-b + discriminant) / (2.0) + + +def max_distance_for_overlap(height, width, min_iou): + """Computes how far apart bbox corners can lie while maintaining the iou. + + Given a bounding box size, this function returns a lower bound on how far + apart the corners of another box can lie while still maintaining the given + IoU. The implementation is based on the `gaussian_radius` function in the + Objects as Points github repo: https://github.com/xingyizhou/CenterNet + + Args: + height: A 1-D float Tensor representing height of the ground truth boxes. + width: A 1-D float Tensor representing width of the ground truth boxes. + min_iou: A float representing the minimum IoU desired. + + Returns: + distance: A 1-D Tensor of distances, of the same length as the input + height and width tensors. + """ + + # Given that the detected box is displaced at a distance `d`, the exact + # IoU value will depend on the angle at which each corner is displaced. + # We simplify our computation by assuming that each corner is displaced by + # a distance `d` in both x and y direction. This gives us a lower IoU than + # what is actually realizable and ensures that any box with corners less + # than `d` distance apart will always have an IoU greater than or equal + # to `min_iou` + + # The following 3 cases can be worked on geometrically and come down to + # solving a quadratic inequality. In each case, to ensure `min_iou` we use + # the smallest positive root of the equation. + + # Case where detected box is offset from ground truth and no box completely + # contains the other. + + distance_detection_offset = _smallest_positive_root( + a=1, b=-(height + width), + c=width * height * ((1 - min_iou) / (1 + min_iou)) + ) + + # Case where detection is smaller than ground truth and completely contained + # in it. + distance_detection_in_gt = _smallest_positive_root( + a=4, b=-2 * (height + width), + c=(1 - min_iou) * width * height + ) + + # Case where ground truth is smaller than detection and completely contained + # in it. + distance_gt_in_detection = _smallest_positive_root( + a=4 * min_iou, b=(2 * min_iou) * (width + height), + c=(min_iou - 1) * width * height + ) + + return tf.reduce_min([distance_detection_offset, + distance_gt_in_detection, + distance_detection_in_gt], axis=0) + + +def get_batch_predictions_from_indices(batch_predictions, indices): + """Gets the values of predictions in a batch at the given indices. + + The indices are expected to come from the offset targets generation functions + in this library. The returned value is intended to be used inside a loss + function. + + Args: + batch_predictions: A tensor of shape [batch_size, height, width, channels] + or [batch_size, height, width, class, channels] for class-specific + features (e.g. keypoint joint offsets). + indices: A tensor of shape [num_instances, 3] for single class features or + [num_instances, 4] for multiple classes features. + + Returns: + values: A tensor of shape [num_instances, channels] holding the predicted + values at the given indices. + """ + return tf.gather_nd(batch_predictions, indices) + + +def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap): + """Computes the standard deviation of the Gaussian kernel from box size. + + Args: + boxes_height: A 1D tensor with shape [num_instances] representing the height + of each box. + boxes_width: A 1D tensor with shape [num_instances] representing the width + of each box. + min_overlap: The minimum IOU overlap that boxes need to have to not be + penalized. + + Returns: + A 1D tensor with shape [num_instances] representing the computed Gaussian + sigma for each of the box. + """ + # We are dividing by 3 so that points closer than the computed + # distance have a >99% CDF. + sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap) + sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0 + return sigma + + +class CenterNetCenterHeatmapTargetAssigner(object): + """Wrapper to compute the object center heatmap.""" + + def __init__(self, stride, min_overlap=0.7, compute_heatmap_sparse=False): + """Initializes the target assigner. + + Args: + stride: int, the stride of the network in output pixels. + min_overlap: The minimum IOU overlap that boxes need to have to not be + penalized. + compute_heatmap_sparse: bool, indicating whether or not to use the sparse + version of the Op that computes the heatmap. The sparse version scales + better with number of classes, but in some cases is known to cause + OOM error. See (b/170989061). + """ + + self._stride = stride + self._min_overlap = min_overlap + self._compute_heatmap_sparse = compute_heatmap_sparse + + def assign_center_targets_from_boxes(self, + height, + width, + gt_boxes_list, + gt_classes_list, + gt_weights_list=None): + """Computes the object center heatmap target. + + Args: + height: int, height of input to the model. This is used to + determine the height of the output. + width: int, width of the input to the model. This is used to + determine the width of the output. + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The box coordinates are expected in normalized coordinates. + gt_classes_list: A list of float tensors with shape [num_boxes, + num_classes] representing the one-hot encoded class labels for each box + in the gt_boxes_list. + gt_weights_list: A list of float tensors with shape [num_boxes] + representing the weight of each groundtruth detection box. + + Returns: + heatmap: A Tensor of size [batch_size, output_height, output_width, + num_classes] representing the per class center heatmap. output_height + and output_width are computed by dividing the input height and width by + the stride specified during initialization. + """ + + out_height = tf.cast(height // self._stride, tf.float32) + out_width = tf.cast(width // self._stride, tf.float32) + # Compute the yx-grid to be used to generate the heatmap. Each returned + # tensor has shape of [out_height, out_width] + (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width) + + heatmaps = [] + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_boxes_list) + # TODO(vighneshb) Replace the for loop with a batch version. + for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list, + gt_weights_list): + boxes = box_list.BoxList(boxes) + # Convert the box coordinates to absolute output image dimension space. + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box center coordinates. Each returned tensors have the shape of + # [num_instances] + (y_center, x_center, boxes_height, + boxes_width) = boxes.get_center_coordinates_and_sizes() + + # Compute the sigma from box size. The tensor shape: [num_instances]. + sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, + self._min_overlap) + # Apply the Gaussian kernel to the center coordinates. Returned heatmap + # has shape of [out_height, out_width, num_classes] + heatmap = ta_utils.coordinates_to_heatmap( + y_grid=y_grid, + x_grid=x_grid, + y_coordinates=y_center, + x_coordinates=x_center, + sigma=sigma, + channel_onehot=class_targets, + channel_weights=weights, + sparse=self._compute_heatmap_sparse) + heatmaps.append(heatmap) + + # Return the stacked heatmaps over the batch. + return tf.stack(heatmaps, axis=0) + + +class CenterNetBoxTargetAssigner(object): + """Wrapper to compute target tensors for the object detection task. + + This class has methods that take as input a batch of ground truth tensors + (in the form of a list) and return the targets required to train the object + detection task. + """ + + def __init__(self, stride): + """Initializes the target assigner. + + Args: + stride: int, the stride of the network in output pixels. + """ + + self._stride = stride + + def assign_size_and_offset_targets(self, + height, + width, + gt_boxes_list, + gt_weights_list=None): + """Returns the box height/width and center offset targets and their indices. + + The returned values are expected to be used with predicted tensors + of size (batch_size, height//self._stride, width//self._stride, 2). The + predicted values at the relevant indices can be retrieved with the + get_batch_predictions_from_indices function. + + Args: + height: int, height of input to the model. This is used to determine the + height of the output. + width: int, width of the input to the model. This is used to determine the + width of the output. + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The coordinates are expected in normalized coordinates. + gt_weights_list: A list of tensors with shape [num_boxes] corresponding to + the weight of each groundtruth detection box. + + Returns: + batch_indices: an integer tensor of shape [num_boxes, 3] holding the + indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively. + batch_box_height_width: a float tensor of shape [num_boxes, 2] holding + expected height and width of each box in the output space. + batch_offsets: a float tensor of shape [num_boxes, 2] holding the + expected y and x offset of each box in the output space. + batch_weights: a float tensor of shape [num_boxes] indicating the + weight of each prediction. + """ + + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_boxes_list) + + batch_indices = [] + batch_box_height_width = [] + batch_weights = [] + batch_offsets = [] + + for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): + boxes = box_list.BoxList(boxes) + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box center coordinates. Each returned tensors have the shape of + # [num_boxes] + (y_center, x_center, boxes_height, + boxes_width) = boxes.get_center_coordinates_and_sizes() + num_boxes = tf.shape(x_center) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_boxes, 2] + # indices: [num_boxes, 2] + (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( + y_source=y_center, x_source=x_center) + + # Assign ones if weights are not provided. + if weights is None: + weights = tf.ones(num_boxes, dtype=tf.float32) + + # Shape of [num_boxes, 1] integer tensor filled with current batch index. + batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) + batch_indices.append(tf.concat([batch_index, indices], axis=1)) + batch_box_height_width.append( + tf.stack([boxes_height, boxes_width], axis=1)) + batch_weights.append(weights) + batch_offsets.append(offsets) + + batch_indices = tf.concat(batch_indices, axis=0) + batch_box_height_width = tf.concat(batch_box_height_width, axis=0) + batch_weights = tf.concat(batch_weights, axis=0) + batch_offsets = tf.concat(batch_offsets, axis=0) + return (batch_indices, batch_box_height_width, batch_offsets, batch_weights) + + +# TODO(yuhuic): Update this class to handle the instance/keypoint weights. +# Currently those weights are used as "mask" to indicate whether an +# instance/keypoint should be considered or not (expecting only either 0 or 1 +# value). In reality, the weights can be any value and this class should handle +# those values properly. +class CenterNetKeypointTargetAssigner(object): + """Wrapper to compute target tensors for the CenterNet keypoint estimation. + + This class has methods that take as input a batch of groundtruth tensors + (in the form of a list) and returns the targets required to train the + CenterNet model for keypoint estimation. Specifically, the class methods + expect the groundtruth in the following formats (consistent with the + standard Object Detection API). Note that usually the groundtruth tensors are + packed with a list which represents the batch dimension: + + gt_classes_list: [Required] a list of 2D tf.float32 one-hot + (or k-hot) tensors of shape [num_instances, num_classes] containing the + class targets with the 0th index assumed to map to the first non-background + class. + gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of + shape [num_instances, num_total_keypoints, 2] containing keypoint + coordinates. Note that the "num_total_keypoints" should be the sum of the + num_keypoints over all possible keypoint types, e.g. human pose, face. + For example, if a dataset contains both 17 human pose keypoints and 5 face + keypoints, then num_total_keypoints = 17 + 5 = 22. + If an intance contains only a subet of keypoints (e.g. human pose keypoints + but not face keypoints), the face keypoints will be filled with zeros. + Also note that keypoints are assumed to be provided in normalized + coordinates and missing keypoints should be encoded as NaN. + gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape + [num_instances, num_total_keypoints] representing the weights of each + keypoints. If not provided, then all not NaN keypoints will be equally + weighted. + gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape + [num_instances, 4] containing coordinates of the groundtruth boxes. + Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and + assumed to be normalized and clipped relative to the image window with + y_min <= y_max and x_min <= x_max. + Note that the boxes are only used to compute the center targets but are not + considered as required output of the keypoint task. If the boxes were not + provided, the center targets will be inferred from the keypoints + [not implemented yet]. + gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape + [num_instances] containing weights for groundtruth boxes. Only useful when + gt_boxes_list is also provided. + """ + + def __init__(self, + stride, + class_id, + keypoint_indices, + keypoint_std_dev=None, + per_keypoint_offset=False, + peak_radius=0, + compute_heatmap_sparse=False): + """Initializes a CenterNet keypoints target assigner. + + Args: + stride: int, the stride of the network in output pixels. + class_id: int, the ID of the class (0-indexed) that contains the target + keypoints to consider in this task. For example, if the task is human + pose estimation, the class id should correspond to the "human" class. + keypoint_indices: A list of integers representing the indices of the + keypoints to be considered in this task. This is used to retrieve the + subset of the keypoints from gt_keypoints that should be considered in + this task. + keypoint_std_dev: A list of floats represent the standard deviation of the + Gaussian kernel used to generate the keypoint heatmap (in the unit of + output pixels). It is to provide the flexibility of using different + sizes of Gaussian kernel for each keypoint type. If not provided, then + all standard deviation will be the same as the default value (10.0 in + the output pixel space). If provided, the length of keypoint_std_dev + needs to be the same as the length of keypoint_indices, indicating the + standard deviation of each keypoint type. + per_keypoint_offset: boolean, indicating whether to assign offset for + each keypoint channel. If set False, the output offset target will have + the shape [batch_size, out_height, out_width, 2]. If set True, the + output offset target will have the shape [batch_size, out_height, + out_width, 2 * num_keypoints]. + peak_radius: int, the radius (in the unit of output pixel) around heatmap + peak to assign the offset targets. + compute_heatmap_sparse: bool, indicating whether or not to use the sparse + version of the Op that computes the heatmap. The sparse version scales + better with number of keypoint types, but in some cases is known to + cause an OOM error. See (b/170989061). + """ + + self._stride = stride + self._class_id = class_id + self._keypoint_indices = keypoint_indices + self._per_keypoint_offset = per_keypoint_offset + self._peak_radius = peak_radius + self._compute_heatmap_sparse = compute_heatmap_sparse + if keypoint_std_dev is None: + self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] * + len(keypoint_indices)) + else: + assert len(keypoint_indices) == len(keypoint_std_dev) + self._keypoint_std_dev = keypoint_std_dev + + def _preprocess_keypoints_and_weights(self, out_height, out_width, keypoints, + class_onehot, class_weights, + keypoint_weights): + """Preprocesses the keypoints and the corresponding keypoint weights. + + This function performs several common steps to preprocess the keypoints and + keypoint weights features, including: + 1) Select the subset of keypoints based on the keypoint indices, fill the + keypoint NaN values with zeros and convert to absoluate coordinates. + 2) Generate the weights of the keypoint using the following information: + a. The class of the instance. + b. The NaN value of the keypoint coordinates. + c. The provided keypoint weights. + + Args: + out_height: An integer or an interger tensor indicating the output height + of the model. + out_width: An integer or an interger tensor indicating the output width of + the model. + keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2] + representing the original keypoint grountruth coordinates. + class_onehot: A float tensor of shape [num_instances, num_classes] + containing the class targets with the 0th index assumed to map to the + first non-background class. + class_weights: A float tensor of shape [num_instances] containing weights + for groundtruth instances. + keypoint_weights: A float tensor of shape + [num_instances, num_total_keypoints] representing the weights of each + keypoints. + + Returns: + A tuple of two tensors: + keypoint_absolute: A float tensor of shape + [num_instances, num_keypoints, 2] which is the selected and updated + keypoint coordinates. + keypoint_weights: A float tensor of shape [num_instances, num_keypoints] + representing the updated weight of each keypoint. + """ + # Select the targets keypoints by their type ids and generate the mask + # of valid elements. + valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class( + keypoint_coordinates=keypoints, + class_id=self._class_id, + class_onehot=class_onehot, + class_weights=class_weights, + keypoint_indices=self._keypoint_indices) + # Keypoint coordinates in absolute coordinate system. + # The shape of the tensors: [num_instances, num_keypoints, 2]. + keypoints_absolute = keypoint_ops.to_absolute_coordinates( + keypoints, out_height, out_width) + # Assign default weights for the keypoints. + if keypoint_weights is None: + keypoint_weights = tf.ones_like(keypoints[:, :, 0]) + else: + keypoint_weights = tf.gather( + keypoint_weights, indices=self._keypoint_indices, axis=1) + keypoint_weights = keypoint_weights * valid_mask + return keypoints_absolute, keypoint_weights + + def assign_keypoint_heatmap_targets(self, + height, + width, + gt_keypoints_list, + gt_classes_list, + gt_keypoints_weights_list=None, + gt_weights_list=None, + gt_boxes_list=None): + """Returns the keypoint heatmap targets for the CenterNet model. + + Args: + height: int, height of input to the CenterNet model. This is used to + determine the height of the output. + width: int, width of the input to the CenterNet model. This is used to + determine the width of the output. + gt_keypoints_list: A list of float tensors with shape [num_instances, + num_total_keypoints, 2]. See class-level description for more detail. + gt_classes_list: A list of float tensors with shape [num_instances, + num_classes]. See class-level description for more detail. + gt_keypoints_weights_list: A list of tensors with shape [num_instances, + num_total_keypoints] corresponding to the weight of each keypoint. + gt_weights_list: A list of float tensors with shape [num_instances]. See + class-level description for more detail. + gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See + class-level description for more detail. If provided, the keypoint + standard deviations will be scaled based on the box sizes. + + Returns: + heatmap: A float tensor of shape [batch_size, output_height, output_width, + num_keypoints] representing the per keypoint type center heatmap. + output_height and output_width are computed by dividing the input height + and width by the stride specified during initialization. Note that the + "num_keypoints" is defined by the length of keypoint_indices, which is + not necessarily equal to "num_total_keypoints". + num_instances_batch: A 2D int tensor of shape + [batch_size, num_keypoints] representing number of instances for each + keypoint type. + valid_mask: A float tensor with shape [batch_size, output_height, + output_width] where all values within the regions of the blackout boxes + are 0.0 and 1.0 else where. + """ + out_width = tf.cast(width // self._stride, tf.float32) + out_height = tf.cast(height // self._stride, tf.float32) + # Compute the yx-grid to be used to generate the heatmap. Each returned + # tensor has shape of [out_height, out_width] + y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width) + + if gt_keypoints_weights_list is None: + gt_keypoints_weights_list = [None] * len(gt_keypoints_list) + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_classes_list) + if gt_boxes_list is None: + gt_boxes_list = [None] * len(gt_keypoints_list) + + heatmaps = [] + num_instances_list = [] + valid_mask_list = [] + for keypoints, classes, kp_weights, weights, boxes in zip( + gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, + gt_weights_list, gt_boxes_list): + keypoints_absolute, kp_weights = self._preprocess_keypoints_and_weights( + out_height=out_height, + out_width=out_width, + keypoints=keypoints, + class_onehot=classes, + class_weights=weights, + keypoint_weights=kp_weights) + num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) + + # A tensor of shape [num_instances, num_keypoints] with + # each element representing the type dimension for each corresponding + # keypoint: + # [[0, 1, ..., k-1], + # [0, 1, ..., k-1], + # : + # [0, 1, ..., k-1]] + keypoint_types = tf.tile( + input=tf.expand_dims(tf.range(num_keypoints), axis=0), + multiples=[num_instances, 1]) + + # A tensor of shape [num_instances, num_keypoints] with + # each element representing the sigma of the Gaussian kernel for each + # keypoint. + keypoint_std_dev = tf.tile( + input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0), + multiples=[num_instances, 1]) + + # If boxes is not None, then scale the standard deviation based on the + # size of the object bounding boxes similar to object center heatmap. + if boxes is not None: + boxes = box_list.BoxList(boxes) + # Convert the box coordinates to absolute output image dimension space. + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box height and width. Each returned tensors have the shape + # of [num_instances] + (_, _, boxes_height, + boxes_width) = boxes.get_center_coordinates_and_sizes() + + # Compute the sigma from box size. The tensor shape: [num_instances]. + sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7) + keypoint_std_dev = keypoint_std_dev * tf.stack( + [sigma] * num_keypoints, axis=1) + + # Generate the valid region mask to ignore regions with target class but + # no corresponding keypoints. + # Shape: [num_instances]. + blackout = tf.logical_and(classes[:, self._class_id] > 0, + tf.reduce_max(kp_weights, axis=1) < 1e-3) + valid_mask = ta_utils.blackout_pixel_weights_by_box_regions( + out_height, out_width, boxes.get(), blackout) + valid_mask_list.append(valid_mask) + + # Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap + # has shape of [out_height, out_width, num_keypoints]. + heatmap = ta_utils.coordinates_to_heatmap( + y_grid=y_grid, + x_grid=x_grid, + y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), + x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]), + sigma=tf.keras.backend.flatten(keypoint_std_dev), + channel_onehot=tf.one_hot( + tf.keras.backend.flatten(keypoint_types), depth=num_keypoints), + channel_weights=tf.keras.backend.flatten(kp_weights)) + num_instances_list.append( + tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32)) + heatmaps.append(heatmap) + return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0), + tf.stack(valid_mask_list, axis=0)) + + def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors): + """Gets keypoint type index tensor. + + The function prepares the tensor of keypoint indices with shape + [num_instances, num_keypoints, num_neighbors]. Each element represents the + keypoint type index for each corresponding keypoint and tiled along the 3rd + axis: + [[0, 1, ..., num_keypoints - 1], + [0, 1, ..., num_keypoints - 1], + : + [0, 1, ..., num_keypoints - 1]] + + Args: + num_instances: int, the number of instances, used to define the 1st + dimension. + num_keypoints: int, the number of keypoint types, used to define the 2nd + dimension. + num_neighbors: int, the number of neighborhood pixels to consider for each + keypoint, used to define the 3rd dimension. + + Returns: + A integer tensor of shape [num_instances, num_keypoints, num_neighbors]. + """ + keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis] + tiled_keypoint_types = tf.tile(keypoint_types, + multiples=[num_instances, 1, num_neighbors]) + return tiled_keypoint_types + + def assign_keypoints_offset_targets(self, + height, + width, + gt_keypoints_list, + gt_classes_list, + gt_keypoints_weights_list=None, + gt_weights_list=None): + """Returns the offsets and indices of the keypoints for location refinement. + + The returned values are used to refine the location of each keypoints in the + heatmap. The predicted values at the relevant indices can be retrieved with + the get_batch_predictions_from_indices function. + + Args: + height: int, height of input to the CenterNet model. This is used to + determine the height of the output. + width: int, width of the input to the CenterNet model. This is used to + determine the width of the output. + gt_keypoints_list: A list of tensors with shape [num_instances, + num_total_keypoints]. See class-level description for more detail. + gt_classes_list: A list of tensors with shape [num_instances, + num_classes]. See class-level description for more detail. + gt_keypoints_weights_list: A list of tensors with shape [num_instances, + num_total_keypoints] corresponding to the weight of each keypoint. + gt_weights_list: A list of float tensors with shape [num_instances]. See + class-level description for more detail. + + Returns: + batch_indices: an integer tensor of shape [num_total_instances, 3] (or + [num_total_instances, 4] if 'per_keypoint_offset' is set True) holding + the indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively. The fourth column corresponds to the channel + dimension (if 'per_keypoint_offset' is set True). + batch_offsets: a float tensor of shape [num_total_instances, 2] holding + the expected y and x offset of each box in the output space. + batch_weights: a float tensor of shape [num_total_instances] indicating + the weight of each prediction. + Note that num_total_instances = batch_size * num_instances * + num_keypoints * num_neighbors + """ + + batch_indices = [] + batch_offsets = [] + batch_weights = [] + + if gt_keypoints_weights_list is None: + gt_keypoints_weights_list = [None] * len(gt_keypoints_list) + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_classes_list) + for i, (keypoints, classes, kp_weights, weights) in enumerate( + zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, + gt_weights_list)): + keypoints_absolute, kp_weights = self._preprocess_keypoints_and_weights( + out_height=height // self._stride, + out_width=width // self._stride, + keypoints=keypoints, + class_onehot=classes, + class_weights=weights, + keypoint_weights=kp_weights) + num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) + + # [num_instances * num_keypoints] + y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0]) + x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1]) + + # All keypoint coordinates and their neighbors: + # [num_instance * num_keypoints, num_neighbors] + (y_source_neighbors, x_source_neighbors, + valid_sources) = ta_utils.get_surrounding_grids(height // self._stride, + width // self._stride, + y_source, x_source, + self._peak_radius) + _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( + y_source_neighbors) + + # Update the valid keypoint weights. + # [num_instance * num_keypoints, num_neighbors] + valid_keypoints = tf.cast( + valid_sources, dtype=tf.float32) * tf.stack( + [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_instances * num_keypoints, num_neighbors, 2] + # indices: [num_instances * num_keypoints, num_neighbors, 2] + offsets, indices = ta_utils.compute_floor_offsets_with_indices( + y_source=y_source_neighbors, + x_source=x_source_neighbors, + y_target=y_source, + x_target=x_source) + # Reshape to: + # offsets: [num_instances * num_keypoints * num_neighbors, 2] + # indices: [num_instances * num_keypoints * num_neighbors, 2] + offsets = tf.reshape(offsets, [-1, 2]) + indices = tf.reshape(indices, [-1, 2]) + + # Prepare the batch indices to be prepended. + batch_index = tf.fill( + [num_instances * num_keypoints * num_neighbors, 1], i) + if self._per_keypoint_offset: + tiled_keypoint_types = self._get_keypoint_types( + num_instances, num_keypoints, num_neighbors) + batch_indices.append( + tf.concat([batch_index, indices, + tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) + else: + batch_indices.append(tf.concat([batch_index, indices], axis=1)) + batch_offsets.append(offsets) + batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) + + # Concatenate the tensors in the batch in the first dimension: + # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or + # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if + # 'per_keypoint_offset' is set to True. + batch_indices = tf.concat(batch_indices, axis=0) + # shape: [batch_size * num_instances * num_keypoints * num_neighbors] + batch_weights = tf.concat(batch_weights, axis=0) + # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2] + batch_offsets = tf.concat(batch_offsets, axis=0) + return (batch_indices, batch_offsets, batch_weights) + + def assign_joint_regression_targets(self, + height, + width, + gt_keypoints_list, + gt_classes_list, + gt_boxes_list=None, + gt_keypoints_weights_list=None, + gt_weights_list=None): + """Returns the joint regression from center grid to keypoints. + + The joint regression is used as the grouping cue from the estimated + keypoints to instance center. The offsets are the vectors from the floored + object center coordinates to the keypoint coordinates. + + Args: + height: int, height of input to the CenterNet model. This is used to + determine the height of the output. + width: int, width of the input to the CenterNet model. This is used to + determine the width of the output. + gt_keypoints_list: A list of float tensors with shape [num_instances, + num_total_keypoints]. See class-level description for more detail. + gt_classes_list: A list of float tensors with shape [num_instances, + num_classes]. See class-level description for more detail. + gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See + class-level description for more detail. If provided, then the center + targets will be computed based on the center of the boxes. + gt_keypoints_weights_list: A list of float tensors with shape + [num_instances, num_total_keypoints] representing to the weight of each + keypoint. + gt_weights_list: A list of float tensors with shape [num_instances]. See + class-level description for more detail. + + Returns: + batch_indices: an integer tensor of shape [num_instances, 4] holding the + indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively, the last dimension refers to the keypoint type + dimension. + batch_offsets: a float tensor of shape [num_instances, 2] holding the + expected y and x offset of each box in the output space. + batch_weights: a float tensor of shape [num_instances] indicating the + weight of each prediction. + Note that num_total_instances = batch_size * num_instances * num_keypoints + + Raises: + NotImplementedError: currently the object center coordinates need to be + computed from groundtruth bounding boxes. The functionality of + generating the object center coordinates from keypoints is not + implemented yet. + """ + + batch_indices = [] + batch_offsets = [] + batch_weights = [] + batch_size = len(gt_keypoints_list) + if gt_keypoints_weights_list is None: + gt_keypoints_weights_list = [None] * batch_size + if gt_boxes_list is None: + gt_boxes_list = [None] * batch_size + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_classes_list) + for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate( + zip(gt_keypoints_list, gt_classes_list, + gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)): + keypoints_absolute, kp_weights = self._preprocess_keypoints_and_weights( + out_height=height // self._stride, + out_width=width // self._stride, + keypoints=keypoints, + class_onehot=classes, + class_weights=weights, + keypoint_weights=kp_weights) + num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) + + # If boxes are provided, compute the joint center from it. + if boxes is not None: + # Compute joint center from boxes. + boxes = box_list.BoxList(boxes) + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes() + else: + # TODO(yuhuic): Add the logic to generate object centers from keypoints. + raise NotImplementedError(( + 'The functionality of generating object centers from keypoints is' + ' not implemented yet. Please provide groundtruth bounding boxes.' + )) + + # Tile the yx center coordinates to be the same shape as keypoints. + y_center_tiled = tf.tile( + tf.reshape(y_center, shape=[num_instances, 1]), + multiples=[1, num_keypoints]) + x_center_tiled = tf.tile( + tf.reshape(x_center, shape=[num_instances, 1]), + multiples=[1, num_keypoints]) + # [num_instance * num_keypoints, num_neighbors] + (y_source_neighbors, x_source_neighbors, + valid_sources) = ta_utils.get_surrounding_grids( + height // self._stride, width // self._stride, + tf.keras.backend.flatten(y_center_tiled), + tf.keras.backend.flatten(x_center_tiled), self._peak_radius) + + _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( + y_source_neighbors) + valid_keypoints = tf.cast( + valid_sources, dtype=tf.float32) * tf.stack( + [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_instances * num_keypoints, 2] + # indices: [num_instances * num_keypoints, 2] + (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( + y_source=y_source_neighbors, + x_source=x_source_neighbors, + y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), + x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1])) + # Reshape to: + # offsets: [num_instances * num_keypoints * num_neighbors, 2] + # indices: [num_instances * num_keypoints * num_neighbors, 2] + offsets = tf.reshape(offsets, [-1, 2]) + indices = tf.reshape(indices, [-1, 2]) + + # keypoint type tensor: [num_instances, num_keypoints, num_neighbors]. + tiled_keypoint_types = self._get_keypoint_types( + num_instances, num_keypoints, num_neighbors) + + batch_index = tf.fill( + [num_instances * num_keypoints * num_neighbors, 1], i) + batch_indices.append( + tf.concat([batch_index, indices, + tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) + batch_offsets.append(offsets) + batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) + + # Concatenate the tensors in the batch in the first dimension: + # shape: [batch_size * num_instances * num_keypoints, 4] + batch_indices = tf.concat(batch_indices, axis=0) + # shape: [batch_size * num_instances * num_keypoints] + batch_weights = tf.concat(batch_weights, axis=0) + # shape: [batch_size * num_instances * num_keypoints, 2] + batch_offsets = tf.concat(batch_offsets, axis=0) + return (batch_indices, batch_offsets, batch_weights) + + +def _resize_masks(masks, height, width, method): + # Resize segmentation masks to conform to output dimensions. Use TF2 + # image resize because TF1's version is buggy: + # https://yaqs.corp.google.com/eng/q/4970450458378240 + masks = tf2.image.resize( + masks[:, :, :, tf.newaxis], + size=(height, width), + method=method) + return masks[:, :, :, 0] + + +class CenterNetMaskTargetAssigner(object): + """Wrapper to compute targets for segmentation masks.""" + + def __init__(self, stride): + self._stride = stride + + def assign_segmentation_targets( + self, gt_masks_list, gt_classes_list, + mask_resize_method=ResizeMethod.BILINEAR): + """Computes the segmentation targets. + + This utility produces a semantic segmentation mask for each class, starting + with whole image instance segmentation masks. Effectively, each per-class + segmentation target is the union of all masks from that class. + + Args: + gt_masks_list: A list of float tensors with shape [num_boxes, + input_height, input_width] with values in {0, 1} representing instance + masks for each object. + gt_classes_list: A list of float tensors with shape [num_boxes, + num_classes] representing the one-hot encoded class labels for each box + in the gt_boxes_list. + mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use + when resizing masks from input resolution to output resolution. + + Returns: + segmentation_targets: An int32 tensor of size [batch_size, output_height, + output_width, num_classes] representing the class of each location in + the output space. + """ + # TODO(ronnyvotel): Handle groundtruth weights. + _, num_classes = shape_utils.combined_static_and_dynamic_shape( + gt_classes_list[0]) + + _, input_height, input_width = ( + shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) + output_height = input_height // self._stride + output_width = input_width // self._stride + + segmentation_targets_list = [] + for gt_masks, gt_classes in zip(gt_masks_list, gt_classes_list): + gt_masks = _resize_masks(gt_masks, output_height, output_width, + mask_resize_method) + gt_masks = gt_masks[:, :, :, tf.newaxis] + gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes]) + # Shape: [h, w, num_classes]. + segmentations_for_image = tf.reduce_max( + gt_masks * gt_classes_reshaped, axis=0) + # Avoid the case where max of an empty array is -inf. + segmentations_for_image = tf.maximum(segmentations_for_image, 0.0) + segmentation_targets_list.append(segmentations_for_image) + + segmentation_target = tf.stack(segmentation_targets_list, axis=0) + return segmentation_target + + +class CenterNetDensePoseTargetAssigner(object): + """Wrapper to compute targets for DensePose task.""" + + def __init__(self, stride, num_parts=24): + self._stride = stride + self._num_parts = num_parts + + def assign_part_and_coordinate_targets(self, + height, + width, + gt_dp_num_points_list, + gt_dp_part_ids_list, + gt_dp_surface_coords_list, + gt_weights_list=None): + """Returns the DensePose part_id and coordinate targets and their indices. + + The returned values are expected to be used with predicted tensors + of size (batch_size, height//self._stride, width//self._stride, 2). The + predicted values at the relevant indices can be retrieved with the + get_batch_predictions_from_indices function. + + Args: + height: int, height of input to the model. This is used to determine the + height of the output. + width: int, width of the input to the model. This is used to determine the + width of the output. + gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes] + containing the number of DensePose sampled points per box. + gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape + [num_boxes, max_sampled_points] containing the DensePose part ids + (0-indexed) for each sampled point. Note that there may be padding, as + boxes may contain a different number of sampled points. + gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape + [num_boxes, max_sampled_points, 4] containing the DensePose surface + coordinates (normalized) for each sampled point. Note that there may be + padding. + gt_weights_list: A list of 1-D tensors with shape [num_boxes] + corresponding to the weight of each groundtruth detection box. + + Returns: + batch_indices: an integer tensor of shape [num_total_points, 4] holding + the indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively. The fourth column is the part index. + batch_part_ids: an int tensor of shape [num_total_points, num_parts] + holding 1-hot encodings of parts for each sampled point. + batch_surface_coords: a float tensor of shape [num_total_points, 2] + holding the expected (v, u) coordinates for each sampled point. + batch_weights: a float tensor of shape [num_total_points] indicating the + weight of each prediction. + Note that num_total_points = batch_size * num_boxes * max_sampled_points. + """ + + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_dp_num_points_list) + + batch_indices = [] + batch_part_ids = [] + batch_surface_coords = [] + batch_weights = [] + + for i, (num_points, part_ids, surface_coords, weights) in enumerate( + zip(gt_dp_num_points_list, gt_dp_part_ids_list, + gt_dp_surface_coords_list, gt_weights_list)): + num_boxes, max_sampled_points = ( + shape_utils.combined_static_and_dynamic_shape(part_ids)) + part_ids_flattened = tf.reshape(part_ids, [-1]) + part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts) + # Get DensePose coordinates in the output space. + surface_coords_abs = densepose_ops.to_absolute_coordinates( + surface_coords, height // self._stride, width // self._stride) + surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4]) + # Each tensor has shape [num_boxes * max_sampled_points]. + yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1) + + # Get the indices (in output space) for the DensePose coordinates. Note + # that if self._stride is larger than 1, this will have the effect of + # reducing spatial resolution of the groundtruth points. + indices_y = tf.cast(yabs, tf.int32) + indices_x = tf.cast(xabs, tf.int32) + + # Assign ones if weights are not provided. + if weights is None: + weights = tf.ones(num_boxes, dtype=tf.float32) + # Create per-point weights. + weights_per_point = tf.reshape( + tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]), + shape=[-1]) + # Mask out invalid (i.e. padded) DensePose points. + num_points_tiled = tf.tile(num_points[:, tf.newaxis], + multiples=[1, max_sampled_points]) + range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :], + multiples=[num_boxes, 1]) + valid_points = tf.math.less(range_tiled, num_points_tiled) + valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32) + weights_per_point = weights_per_point * valid_points + + # Shape of [num_boxes * max_sampled_points] integer tensor filled with + # current batch index. + batch_index = i * tf.ones_like(indices_y, dtype=tf.int32) + batch_indices.append( + tf.stack([batch_index, indices_y, indices_x, part_ids_flattened], + axis=1)) + batch_part_ids.append(part_ids_one_hot) + batch_surface_coords.append(tf.stack([v, u], axis=1)) + batch_weights.append(weights_per_point) + + batch_indices = tf.concat(batch_indices, axis=0) + batch_part_ids = tf.concat(batch_part_ids, axis=0) + batch_surface_coords = tf.concat(batch_surface_coords, axis=0) + batch_weights = tf.concat(batch_weights, axis=0) + return batch_indices, batch_part_ids, batch_surface_coords, batch_weights + + +class CenterNetTrackTargetAssigner(object): + """Wrapper to compute targets for tracking task. + + Reference paper: A Simple Baseline for Multi-Object Tracking [1] + [1]: https://arxiv.org/abs/2004.01888 + """ + + def __init__(self, stride, num_track_ids): + self._stride = stride + self._num_track_ids = num_track_ids + + def assign_track_targets(self, + height, + width, + gt_track_ids_list, + gt_boxes_list, + gt_weights_list=None): + """Computes the track ID targets. + + Args: + height: int, height of input to the model. This is used to determine the + height of the output. + width: int, width of the input to the model. This is used to determine the + width of the output. + gt_track_ids_list: A list of 1-D tensors with shape [num_boxes] + corresponding to the track ID of each groundtruth detection box. + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The coordinates are expected in normalized coordinates. + gt_weights_list: A list of 1-D tensors with shape [num_boxes] + corresponding to the weight of each groundtruth detection box. + + Returns: + batch_indices: an integer tensor of shape [batch_size, num_boxes, 3] + holding the indices inside the predicted tensor which should be + penalized. The first column indicates the index along the batch + dimension and the second and third columns indicate the index + along the y and x dimensions respectively. + batch_weights: a float tensor of shape [batch_size, num_boxes] indicating + the weight of each prediction. + track_id_targets: An int32 tensor of size [batch_size, num_boxes, + num_track_ids] containing the one-hot track ID vector of each + groundtruth detection box. + """ + track_id_targets = tf.one_hot( + gt_track_ids_list, depth=self._num_track_ids, axis=-1) + + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_boxes_list) + + batch_indices = [] + batch_weights = [] + + for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): + boxes = box_list.BoxList(boxes) + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box center coordinates. Each returned tensors have the shape of + # [num_boxes] + (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes() + num_boxes = tf.shape(x_center) + + # Compute the indices of the box centers. Shape: + # indices: [num_boxes, 2] + (_, indices) = ta_utils.compute_floor_offsets_with_indices( + y_source=y_center, x_source=x_center) + + # Assign ones if weights are not provided. + if weights is None: + weights = tf.ones(num_boxes, dtype=tf.float32) + + # Shape of [num_boxes, 1] integer tensor filled with current batch index. + batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) + batch_indices.append(tf.concat([batch_index, indices], axis=1)) + batch_weights.append(weights) + + batch_indices = tf.stack(batch_indices, axis=0) + batch_weights = tf.stack(batch_weights, axis=0) + + return batch_indices, batch_weights, track_id_targets + + +def filter_mask_overlap_min_area(masks): + """If a pixel belongs to 2 instances, remove it from the larger instance.""" + + num_instances = tf.shape(masks)[0] + def _filter_min_area(): + """Helper function to filter non empty masks.""" + areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True) + per_pixel_area = masks * areas + # Make sure background is ignored in argmin. + per_pixel_area = (masks * per_pixel_area + + (1 - masks) * per_pixel_area.dtype.max) + min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32) + + filtered_masks = ( + tf.range(num_instances)[:, tf.newaxis, tf.newaxis] + == + min_index[tf.newaxis, :, :] + ) + + return tf.cast(filtered_masks, tf.float32) * masks + + return tf.cond(num_instances > 0, _filter_min_area, + lambda: masks) + + +def filter_mask_overlap(masks, method='min_area'): + + if method == 'min_area': + return filter_mask_overlap_min_area(masks) + else: + raise ValueError('Unknown mask overlap filter type - {}'.format(method)) + + +class CenterNetCornerOffsetTargetAssigner(object): + """Wrapper to compute corner offsets for boxes using masks.""" + + def __init__(self, stride, overlap_resolution='min_area'): + """Initializes the corner offset target assigner. + + Args: + stride: int, the stride of the network in output pixels. + overlap_resolution: string, specifies how we handle overlapping + instance masks. Currently only 'min_area' is supported which assigns + overlapping pixels to the instance with the minimum area. + """ + + self._stride = stride + self._overlap_resolution = overlap_resolution + + def assign_corner_offset_targets( + self, gt_boxes_list, gt_masks_list): + """Computes the corner offset targets and foreground map. + + For each pixel that is part of any object's foreground, this function + computes the relative offsets to the top-left and bottom-right corners of + that instance's bounding box. It also returns a foreground map to indicate + which pixels contain valid corner offsets. + + Args: + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The coordinates are expected in normalized coordinates. + gt_masks_list: A list of float tensors with shape [num_boxes, + input_height, input_width] with values in {0, 1} representing instance + masks for each object. + + Returns: + corner_offsets: A float tensor of shape [batch_size, height, width, 4] + containing, in order, the (y, x) offsets to the top left corner and + the (y, x) offsets to the bottom right corner for each foregroung pixel + foreground: A float tensor of shape [batch_size, height, width] in which + each pixel is set to 1 if it is a part of any instance's foreground + (and thus contains valid corner offsets) and 0 otherwise. + + """ + _, input_height, input_width = ( + shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) + output_height = input_height // self._stride + output_width = input_width // self._stride + y_grid, x_grid = tf.meshgrid( + tf.range(output_height), tf.range(output_width), + indexing='ij') + y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32) + + corner_targets = [] + foreground_targets = [] + for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list): + gt_masks = _resize_masks(gt_masks, output_height, output_width, + method=ResizeMethod.NEAREST_NEIGHBOR) + gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution) + + ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1) + ymin, ymax = ymin * output_height, ymax * output_height + xmin, xmax = xmin * output_width, xmax * output_width + + top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis] + left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis] + bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis] + right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis] + + foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5, + tf.float32) + foreground_targets.append(foreground_target) + + corner_target = tf.stack([ + tf.reduce_sum(top_y * gt_masks, axis=0), + tf.reduce_sum(left_x * gt_masks, axis=0), + tf.reduce_sum(bottom_y * gt_masks, axis=0), + tf.reduce_sum(right_x * gt_masks, axis=0), + ], axis=2) + + corner_targets.append(corner_target) + + return (tf.stack(corner_targets, axis=0), + tf.stack(foreground_targets, axis=0)) + + +class CenterNetTemporalOffsetTargetAssigner(object): + """Wrapper to compute target tensors for the temporal offset task. + + This class has methods that take as input a batch of ground truth tensors + (in the form of a list) and returns the targets required to train the + temporal offset task. + """ + + def __init__(self, stride): + """Initializes the target assigner. + + Args: + stride: int, the stride of the network in output pixels. + """ + + self._stride = stride + + def assign_temporal_offset_targets(self, + height, + width, + gt_boxes_list, + gt_offsets_list, + gt_match_list, + gt_weights_list=None): + """Returns the temporal offset targets and their indices. + + For each ground truth box, this function assigns it the corresponding + temporal offset to train the model. + + Args: + height: int, height of input to the model. This is used to determine the + height of the output. + width: int, width of the input to the model. This is used to determine the + width of the output. + gt_boxes_list: A list of float tensors with shape [num_boxes, 4] + representing the groundtruth detection bounding boxes for each sample in + the batch. The coordinates are expected in normalized coordinates. + gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2] + containing the spatial offsets of objects' centers compared with the + previous frame. + gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes] + containing flags that indicate if an object has existed in the + previous frame. + gt_weights_list: A list of tensors with shape [num_boxes] corresponding to + the weight of each groundtruth detection box. + + Returns: + batch_indices: an integer tensor of shape [num_boxes, 3] holding the + indices inside the predicted tensor which should be penalized. The + first column indicates the index along the batch dimension and the + second and third columns indicate the index along the y and x + dimensions respectively. + batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the + expected y and x temporal offset of each object center in the + output space. + batch_weights: a float tensor of shape [num_boxes] indicating the + weight of each prediction. + """ + + if gt_weights_list is None: + gt_weights_list = [None] * len(gt_boxes_list) + + batch_indices = [] + batch_weights = [] + batch_temporal_offsets = [] + + for i, (boxes, offsets, match_flags, weights) in enumerate(zip( + gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)): + boxes = box_list.BoxList(boxes) + boxes = box_list_ops.to_absolute_coordinates(boxes, + height // self._stride, + width // self._stride) + # Get the box center coordinates. Each returned tensors have the shape of + # [num_boxes] + (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes() + num_boxes = tf.shape(x_center) + + # Compute the offsets and indices of the box centers. Shape: + # offsets: [num_boxes, 2] + # indices: [num_boxes, 2] + (_, indices) = ta_utils.compute_floor_offsets_with_indices( + y_source=y_center, x_source=x_center) + + # Assign ones if weights are not provided. + # if an object is not matched, its weight becomes zero. + if weights is None: + weights = tf.ones(num_boxes, dtype=tf.float32) + weights *= match_flags + + # Shape of [num_boxes, 1] integer tensor filled with current batch index. + batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) + batch_indices.append(tf.concat([batch_index, indices], axis=1)) + batch_weights.append(weights) + batch_temporal_offsets.append(offsets) + + batch_indices = tf.concat(batch_indices, axis=0) + batch_weights = tf.concat(batch_weights, axis=0) + batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0) + return (batch_indices, batch_temporal_offsets, batch_weights) + + +class DETRTargetAssigner(object): + """Target assigner for DETR (https://arxiv.org/abs/2005.12872). + + Detection Transformer (DETR) matches predicted boxes to groundtruth directly + to determine targets instead of matching anchors to groundtruth. Hence, the + new target assigner. + """ + + def __init__(self): + """Construct Object Detection Target Assigner.""" + self._similarity_calc = sim_calc.DETRSimilarity() + self._matcher = hungarian_matcher.HungarianBipartiteMatcher() + + def batch_assign(self, + pred_box_batch, + gt_box_batch, + pred_class_batch, + gt_class_targets_batch, + gt_weights_batch=None, + unmatched_class_label_batch=None): + """Batched assignment of classification and regression targets. + + Args: + pred_box_batch: a tensor of shape [batch_size, num_queries, 4] + representing predicted bounding boxes. + gt_box_batch: a tensor of shape [batch_size, num_queries, 4] + representing groundtruth bounding boxes. + pred_class_batch: A list of tensors with length batch_size, where each + each tensor has shape [num_queries, num_classes] to be used + by certain similarity calculators. + gt_class_targets_batch: a list of tensors with length batch_size, where + each tensor has shape [num_gt_boxes_i, num_classes] and + num_gt_boxes_i is the number of boxes in the ith boxlist of + gt_box_batch. + gt_weights_batch: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + unmatched_class_label_batch: a float32 tensor with shape + [d_1, d_2, ..., d_k] which is consistent with the classification target + for each anchor (and can be empty for scalar targets). This shape must + thus be compatible with the `gt_class_targets_batch`. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes, + num_classes], + batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes]. + """ + pred_box_batch = [ + box_list.BoxList(pred_box) + for pred_box in tf.unstack(pred_box_batch)] + gt_box_batch = [ + box_list.BoxList(gt_box) + for gt_box in tf.unstack(gt_box_batch)] + + cls_targets_list = [] + cls_weights_list = [] + reg_targets_list = [] + reg_weights_list = [] + if gt_weights_batch is None: + gt_weights_batch = [None] * len(gt_class_targets_batch) + if unmatched_class_label_batch is None: + unmatched_class_label_batch = [None] * len(gt_class_targets_batch) + pred_class_batch = tf.unstack(pred_class_batch) + for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights, + unmatched_class_label) in zip(pred_box_batch, gt_box_batch, + pred_class_batch, gt_class_targets_batch, + gt_weights_batch, + unmatched_class_label_batch): + (cls_targets, cls_weights, reg_targets, + reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch, + gt_class_targets, gt_weights, + unmatched_class_label) + cls_targets_list.append(cls_targets) + cls_weights_list.append(cls_weights) + reg_targets_list.append(reg_targets) + reg_weights_list.append(reg_weights) + batch_cls_targets = tf.stack(cls_targets_list) + batch_cls_weights = tf.stack(cls_weights_list) + batch_reg_targets = tf.stack(reg_targets_list) + batch_reg_weights = tf.stack(reg_weights_list) + return (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights) + + def assign(self, + pred_boxes, + gt_boxes, + pred_classes, + gt_labels, + gt_weights=None, + unmatched_class_label=None): + """Assign classification and regression targets to each box_pred. + + For a given set of pred_boxes and groundtruth detections, match pred_boxes + to gt_boxes and assign classification and regression targets to + each box_pred as well as weights based on the resulting match (specifying, + e.g., which pred_boxes should not contribute to training loss). + + pred_boxes that are not matched to anything are given a classification + target of `unmatched_cls_target`. + + Args: + pred_boxes: a BoxList representing N pred_boxes + gt_boxes: a BoxList representing M groundtruth boxes + pred_classes: A tensor with shape [max_num_boxes, num_classes] + to be used by certain similarity calculators. + gt_labels: a tensor of shape [M, num_classes] + with labels for each of the ground_truth boxes. The subshape + [num_classes] can be empty (corresponding to scalar inputs). When set + to None, gt_labels assumes a binary problem where all + ground_truth boxes get a positive label (of 1). + gt_weights: a float tensor of shape [M] indicating the weight to + assign to all pred_boxes match to a particular groundtruth box. The + weights must be in [0., 1.]. If None, all weights are set to 1. + Generally no groundtruth boxes with zero weight match to any pred_boxes + as matchers are aware of groundtruth weights. Additionally, + `cls_weights` and `reg_weights` are calculated using groundtruth + weights as an added safety. + unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] + which is consistent with the classification target for each + anchor (and can be empty for scalar targets). This shape must thus be + compatible with the groundtruth labels that are passed to the "assign" + function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). + + Returns: + cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes], + where the subshape [num_classes] is compatible with gt_labels + which has shape [num_gt_boxes, num_classes]. + cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes], + representing weights for each element in cls_targets. + reg_targets: a float32 tensor with shape [num_pred_boxes, + box_code_dimension] + reg_weights: a float32 tensor with shape [num_pred_boxes] + + """ + if not unmatched_class_label: + unmatched_class_label = tf.constant( + [1] + [0] * (gt_labels.shape[1] - 1), tf.float32) + + if gt_weights is None: + num_gt_boxes = gt_boxes.num_boxes_static() + if not num_gt_boxes: + num_gt_boxes = gt_boxes.num_boxes() + gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32) + + gt_boxes.add_field(fields.BoxListFields.classes, gt_labels) + pred_boxes.add_field(fields.BoxListFields.classes, pred_classes) + + match_quality_matrix = self._similarity_calc.compare( + gt_boxes, + pred_boxes) + match = self._matcher.match(match_quality_matrix, + valid_rows=tf.greater(gt_weights, 0)) + + matched_gt_boxes = match.gather_based_on_match( + gt_boxes.get(), + unmatched_value=tf.zeros(4), + ignored_value=tf.zeros(4)) + matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) + ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes() + reg_targets = tf.transpose(tf.stack([ty, tx, th, tw])) + cls_targets = match.gather_based_on_match( + gt_labels, + unmatched_value=unmatched_class_label, + ignored_value=unmatched_class_label) + reg_weights = match.gather_based_on_match( + gt_weights, + ignored_value=0., + unmatched_value=0.) + cls_weights = match.gather_based_on_match( + gt_weights, + ignored_value=0., + unmatched_value=1) + + # convert cls_weights from per-box_pred to per-class. + class_label_shape = tf.shape(cls_targets)[1:] + weights_multiple = tf.concat( + [tf.constant([1]), class_label_shape], + axis=0) + cls_weights = tf.expand_dims(cls_weights, -1) + cls_weights = tf.tile(cls_weights, weights_multiple) + + return (cls_targets, cls_weights, reg_targets, reg_weights) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f903a36622531607d30bd4349d665567c51456d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner_test.py new file mode 100644 index 0000000000000000000000000000000000000000..09ccab5b814932ee1ccbd8c58171492c1cc593e8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/core/target_assigner_test.py @@ -0,0 +1,2505 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.target_assigner.""" +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import keypoint_box_coder +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_list +from object_detection.core import region_similarity_calculator +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner as targetassigner +from object_detection.matchers import argmax_matcher +from object_detection.utils import np_box_ops +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +class TargetAssignerTest(test_case.TestCase): + + def test_assign_agnostic(self): + def graph_fn(anchor_means, groundtruth_box_corners): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9]], + dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [1]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_class_agnostic_with_ignored_matches(self): + # Note: test is very similar to above. The third box matched with an IOU + # of 0.35, which is between the matched and unmatched threshold. This means + # That like above the expected classification targets are [1, 1, 0]. + # Unlike above, the third target is ignored and therefore expected + # classification weights are [1, 1, 0]. + def graph_fn(anchor_means, groundtruth_box_corners): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.3) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0.0, 0.5, .9, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [0]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_agnostic_with_keypoints(self): + + def graph_fn(anchor_means, groundtruth_box_corners, + groundtruth_keypoints): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, + groundtruth_keypoints) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, .9, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.45, 0.45, 0.95, 0.95]], + dtype=np.float32) + groundtruth_keypoints = np.array( + [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], + [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], + dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [1]] + exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, + -5], + [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, + -11, -7], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [anchor_means, + groundtruth_box_corners, + groundtruth_keypoints]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self): + # Note: test is very similar to above. The third box matched with an IOU + # of 0.35, which is between the matched and unmatched threshold. This means + # That like above the expected classification targets are [1, 1, 0]. + # Unlike above, the third target is ignored and therefore expected + # classification weights are [1, 1, 0]. + def graph_fn(anchor_means, groundtruth_box_corners, + groundtruth_keypoints): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = keypoint_box_coder.KeypointBoxCoder( + num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, + groundtruth_keypoints) + result = target_assigner.assign( + anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, .9, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.45, 0.45, 0.95, 0.95]], + dtype=np.float32) + groundtruth_keypoints = np.array( + [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], + [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], + dtype=np.float32) + exp_cls_targets = [[1], [1], [0]] + exp_cls_weights = [[1], [1], [1]] + exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, + -5], + [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, + -11, -7], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [anchor_means, + groundtruth_box_corners, + groundtruth_keypoints]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_multiclass(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]], dtype=np.float32) + groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) + + exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0]] + exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0], + [0, 0, -.5, .2]] + exp_reg_weights = [1, 1, 0, 1] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_multiclass_with_groundtruth_weights(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels, + groundtruth_weights): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label, + groundtruth_weights=groundtruth_weights) + (_, cls_weights, _, reg_weights, _) = result + return (cls_weights, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]], dtype=np.float32) + groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) + groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32) + + # background class gets weight of 1. + exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3], + [0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1], + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]] + exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0. + + (cls_weights_out, reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_box_corners, groundtruth_labels, + groundtruth_weights + ]) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_assign_multidimensional_class_targets(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + + unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]], dtype=np.float32) + + groundtruth_labels = np.array([[[0, 1], [1, 0]], + [[1, 0], [0, 1]], + [[0, 1], [1, .5]]], np.float32) + + exp_cls_targets = [[[0, 1], [1, 0]], + [[1, 0], [0, 1]], + [[0, 0], [0, 0]], + [[0, 1], [1, .5]]] + exp_cls_weights = [[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, -1, 1], + [0, 0, 0, 0], + [0, 0, -.5, .2]] + exp_reg_weights = [1, 1, 0, 1] + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_assign_empty_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + unmatched_class_label = tf.constant([0, 0, 0], tf.float32) + anchors_boxlist = box_list.BoxList(anchor_means) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + result = target_assigner.assign( + anchors_boxlist, + groundtruth_boxlist, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + (cls_targets, cls_weights, reg_targets, reg_weights, _) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) + groundtruth_labels = np.zeros((0, 3), dtype=np.float32) + anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]], + dtype=np.float32) + exp_cls_targets = [[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]] + exp_cls_weights = [[1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]] + exp_reg_targets = [[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]] + exp_reg_weights = [0, 0, 0, 0] + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self): + similarity_calc = region_similarity_calculator.NegSqDistSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder() + unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 0.8], + [0, 0.5, .5, 1.0], + [.75, 0, 1.0, .25]]) + priors = box_list.BoxList(prior_means) + + box_corners = [[0.0, 0.0, 0.5, 0.5], + [0.0, 0.0, 0.5, 0.8], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]] + boxes = box_list.BoxList(tf.constant(box_corners)) + + groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0, 0]], tf.float32) + with self.assertRaisesRegexp(ValueError, 'Unequal shapes'): + target_assigner.assign( + priors, + boxes, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + + def test_raises_error_on_invalid_groundtruth_labels(self): + similarity_calc = region_similarity_calculator.NegSqDistSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0) + unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32) + target_assigner = targetassigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]]) + priors = box_list.BoxList(prior_means) + + box_corners = [[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9], + [.75, 0, .95, .27]] + boxes = box_list.BoxList(tf.constant(box_corners)) + groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32) + + with self.assertRaises(ValueError): + target_assigner.assign( + priors, + boxes, + groundtruth_labels, + unmatched_class_label=unmatched_class_label) + + +class BatchTargetAssignerTest(test_case.TestCase): + + def _get_target_assigner(self): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) + + def test_batch_assign_targets(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [None, None] + anchors_boxlist = box_list.BoxList(anchor_means) + agnostic_target_assigner = self._get_target_assigner() + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + agnostic_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[1], [0], [0], [0]], + [[0], [1], [1], [0]]] + exp_cls_weights = [[[1], [1], [1], [1]], + [[1], [1], [1], [1]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_multiclass_targets(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets, unmatched_class_label) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, 1, 0]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_multiclass_targets_with_padded_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [class_targets1, class_targets2] + gt_weights = [groundtruth_weights1, groundtruth_weights2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets, unmatched_class_label, gt_weights) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], + [0., 0., 0., 0.]], dtype=np.float32) + groundtruth_weights1 = np.array([1, 0], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842], + [0, 0, 0, 0]], + dtype=np.float32) + groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 1]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_multidimensional_targets(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_targets = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + target_dimensions = (2, 3) + unmatched_class_label = tf.constant(np.zeros(target_dimensions), + tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, gt_box_batch, + gt_class_targets, unmatched_class_label) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[[0, 1, 1], + [1, 1, 0]]], dtype=np.float32) + class_targets2 = np.array([[[0, 1, 1], + [1, 1, 0]], + [[0, 0, 1], + [0, 0, 1]]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[[0., 1., 1.], + [1., 1., 0.]], + [[0., 0., 0.], + [0., 0., 0.]], + [[0., 0., 0.], + [0., 0., 0.]], + [[0., 0., 0.], + [0., 0., 0.]]], + [[[0., 0., 0.], + [0., 0., 0.]], + [[0., 1., 1.], + [1., 1., 0.]], + [[0., 0., 1.], + [0., 0., 1.]], + [[0., 0., 0.], + [0., 0., 0.]]]] + exp_cls_weights = [[[[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]]], + [[[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]], + [[1., 1., 1.], + [1., 1., 1.]]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_empty_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets): + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + gt_box_batch = [groundtruth_boxlist] + gt_class_targets_batch = [gt_class_targets] + anchors_boxlist = box_list.BoxList(anchor_means) + + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_targets( + multiclass_target_assigner, anchors_boxlist, + gt_box_batch, gt_class_targets_batch, unmatched_class_label) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1]], dtype=np.float32) + exp_cls_targets = [[[1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 1, 1], + [1, 1, 1, 1]]] + exp_reg_targets = [[[0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[0, 0]] + num_classes = 3 + pad = 1 + gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32) + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + +class BatchGetTargetsTest(test_case.TestCase): + + def test_scalar_targets(self): + batch_match = np.array([[1, 0, 1], + [-2, -1, 1]], dtype=np.int32) + groundtruth_tensors_list = np.array([[11, 12], [13, 14]], dtype=np.int32) + groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]], + dtype=np.float32) + unmatched_value = np.array(99, dtype=np.int32) + unmatched_weight = np.array(0.0, dtype=np.float32) + + def graph_fn(batch_match, groundtruth_tensors_list, + groundtruth_weights_list, unmatched_value, unmatched_weight): + targets, weights = targetassigner.batch_get_targets( + batch_match, tf.unstack(groundtruth_tensors_list), + tf.unstack(groundtruth_weights_list), + unmatched_value, unmatched_weight) + return (targets, weights) + + (targets_np, weights_np) = self.execute(graph_fn, [ + batch_match, groundtruth_tensors_list, groundtruth_weights_list, + unmatched_value, unmatched_weight + ]) + self.assertAllEqual([[12, 11, 12], + [99, 99, 14]], targets_np) + self.assertAllClose([[1.0, 1.0, 1.0], + [0.0, 0.0, 0.5]], weights_np) + + def test_1d_targets(self): + batch_match = np.array([[1, 0, 1], + [-2, -1, 1]], dtype=np.int32) + groundtruth_tensors_list = np.array([[[11, 12], [12, 13]], + [[13, 14], [14, 15]]], + dtype=np.float32) + groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]], + dtype=np.float32) + unmatched_value = np.array([99, 99], dtype=np.float32) + unmatched_weight = np.array(0.0, dtype=np.float32) + + def graph_fn(batch_match, groundtruth_tensors_list, + groundtruth_weights_list, unmatched_value, unmatched_weight): + targets, weights = targetassigner.batch_get_targets( + batch_match, tf.unstack(groundtruth_tensors_list), + tf.unstack(groundtruth_weights_list), + unmatched_value, unmatched_weight) + return (targets, weights) + + (targets_np, weights_np) = self.execute(graph_fn, [ + batch_match, groundtruth_tensors_list, groundtruth_weights_list, + unmatched_value, unmatched_weight + ]) + self.assertAllClose([[[12, 13], [11, 12], [12, 13]], + [[99, 99], [99, 99], [14, 15]]], targets_np) + self.assertAllClose([[1.0, 1.0, 1.0], + [0.0, 0.0, 0.5]], weights_np) + + +class BatchTargetAssignConfidencesTest(test_case.TestCase): + + def _get_target_assigner(self): + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, + unmatched_threshold=0.5) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) + + def test_batch_assign_empty_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences): + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + gt_box_batch = [groundtruth_boxlist] + gt_class_confidences_batch = [gt_class_confidences] + anchors_boxlist = box_list.BoxList(anchor_means) + + num_classes = 3 + implicit_class_weight = 0.5 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + multiclass_target_assigner = self._get_target_assigner() + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1]], dtype=np.float32) + num_classes = 3 + pad = 1 + gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32) + + exp_cls_targets = [[[1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5]]] + exp_reg_targets = [[[0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[0, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, + [anchor_means, groundtruth_box_corners, gt_class_confidences]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_agnostic(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [None, None] + anchors_boxlist = box_list.BoxList(anchor_means) + agnostic_target_assigner = self._get_target_assigner() + implicit_class_weight = 0.5 + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + agnostic_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + include_background_class=False, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[1], [0], [0], [0]], + [[0], [1], [1], [0]]] + exp_cls_weights = [[[1], [0.5], [0.5], [0.5]], + [[0.5], [1], [1], [0.5]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0.15789001, -0.01500003, 0.57889998, -1.15799987], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 1, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( + graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_multiclass(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + implicit_class_weight = 0.5 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, -1, 0]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5]], + [[0.5, 0.5, 0.5, 0.5], + [1, 0.5, 0.5, 1], + [0.5, 0.5, 1, 0.5], + [0.5, 0.5, 0.5, 0.5]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 0, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [class_targets1, class_targets2] + gt_weights = [groundtruth_weights1, groundtruth_weights2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + num_classes = 3 + unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) + implicit_class_weight = 0.5 + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + gt_weights, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], + [0., 0., 0., 0.]], dtype=np.float32) + groundtruth_weights1 = np.array([1, 0], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842], + [0, 0, 0, 0]], + dtype=np.float32) + groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, -1, 0], + [0, 0, 0, 0]], dtype=np.float32) + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + exp_cls_targets = [[[0, 1, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0], + [1, 0, 0, 0]], + [[1, 0, 0, 0], + [0, 0, 0, 1], + [1, 0, 0, 0], + [1, 0, 0, 0]]] + exp_cls_weights = [[[1, 1, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5]], + [[0.5, 0.5, 0.5, 0.5], + [1, 0.5, 0.5, 1], + [0.5, 0.5, 1, 0.5], + [0.5, 0.5, 0.5, 0.5]]] + exp_reg_targets = [[[0, 0, -0.5, -0.5], + [0, 0, 0, 0], + [0, 0, 0, 0,], + [0, 0, 0, 0,],], + [[0, 0, 0, 0,], + [0, 0.01231521, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 0, 0, 0], + [0, 1, 0, 0]] + + (cls_targets_out, cls_weights_out, reg_targets_out, + reg_weights_out) = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2, groundtruth_weights1, + groundtruth_weights2 + ]) + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + + def test_batch_assign_confidences_multidimensional(self): + + def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2): + box_list1 = box_list.BoxList(groundtruth_boxlist1) + box_list2 = box_list.BoxList(groundtruth_boxlist2) + gt_box_batch = [box_list1, box_list2] + gt_class_confidences_batch = [class_targets1, class_targets2] + anchors_boxlist = box_list.BoxList(anchor_means) + multiclass_target_assigner = self._get_target_assigner() + target_dimensions = (2, 3) + unmatched_class_label = tf.constant(np.zeros(target_dimensions), + tf.float32) + implicit_class_weight = 0.5 + (cls_targets, cls_weights, reg_targets, reg_weights, + _) = targetassigner.batch_assign_confidences( + multiclass_target_assigner, + anchors_boxlist, + gt_box_batch, + gt_class_confidences_batch, + unmatched_class_label=unmatched_class_label, + include_background_class=True, + implicit_class_weight=implicit_class_weight) + return (cls_targets, cls_weights, reg_targets, reg_weights) + + groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) + groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], + [0.015789, 0.0985, 0.55789, 0.3842]], + dtype=np.float32) + class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) + class_targets2 = np.array([[0, 0, 0, 1], + [0, 0, 1, 0]], dtype=np.float32) + class_targets1 = np.array([[[0, 1, 1], + [1, 1, 0]]], dtype=np.float32) + class_targets2 = np.array([[[0, 1, 1], + [1, 1, 0]], + [[0, 0, 1], + [0, 0, 1]]], dtype=np.float32) + + anchor_means = np.array([[0, 0, .25, .25], + [0, .25, 1, 1], + [0, .1, .5, .5], + [.75, .75, 1, 1]], dtype=np.float32) + + with self.assertRaises(ValueError): + _, _, _, _ = self.execute(graph_fn, [ + anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, + class_targets1, class_targets2 + ]) + + +class CreateTargetAssignerTest(test_case.TestCase): + + def test_create_target_assigner(self): + """Tests that named constructor gives working target assigners. + + TODO(rathodv): Make this test more general. + """ + corners = [[0.0, 0.0, 1.0, 1.0]] + groundtruth = box_list.BoxList(tf.constant(corners)) + + priors = box_list.BoxList(tf.constant(corners)) + if tf_version.is_tf1(): + multibox_ta = (targetassigner + .create_target_assigner('Multibox', stage='proposal')) + multibox_ta.assign(priors, groundtruth) + # No tests on output, as that may vary arbitrarily as new target assigners + # are added. As long as it is constructed correctly and runs without errors, + # tests on the individual assigners cover correctness of the assignments. + + anchors = box_list.BoxList(tf.constant(corners)) + faster_rcnn_proposals_ta = (targetassigner + .create_target_assigner('FasterRCNN', + stage='proposal')) + faster_rcnn_proposals_ta.assign(anchors, groundtruth) + + fast_rcnn_ta = (targetassigner + .create_target_assigner('FastRCNN')) + fast_rcnn_ta.assign(anchors, groundtruth) + + faster_rcnn_detection_ta = (targetassigner + .create_target_assigner('FasterRCNN', + stage='detection')) + faster_rcnn_detection_ta.assign(anchors, groundtruth) + + with self.assertRaises(ValueError): + targetassigner.create_target_assigner('InvalidDetector', + stage='invalid_stage') + + +def _array_argmax(array): + return np.unravel_index(np.argmax(array), array.shape) + + +class CenterNetCenterHeatmapTargetAssignerTest(test_case.TestCase): + + def setUp(self): + super(CenterNetCenterHeatmapTargetAssignerTest, self).setUp() + + self._box_center = [0.0, 0.0, 1.0, 1.0] + self._box_center_small = [0.25, 0.25, 0.75, 0.75] + self._box_lower_left = [0.5, 0.0, 1.0, 0.5] + self._box_center_offset = [0.1, 0.05, 1.0, 1.0] + self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] + + def test_center_location(self): + """Test that the centers are at the correct location.""" + def graph_fn(): + box_batch = [tf.constant([self._box_center, self._box_lower_left])] + classes = [ + tf.one_hot([0, 1], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0])) + self.assertAlmostEqual(1.0, targets[0, 10, 10, 0]) + self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1])) + self.assertAlmostEqual(1.0, targets[0, 15, 5, 1]) + + def test_center_batch_shape(self): + """Test that the shape of the target for a batch is correct.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center]), + tf.constant([self._box_center_small]), + ] + classes = [ + tf.one_hot([0, 1], depth=4), + tf.one_hot([2], depth=4), + tf.one_hot([3], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + self.assertEqual((3, 20, 20, 4), targets.shape) + + def test_center_overlap_maximum(self): + """Test that when boxes overlap we, are computing the maximum.""" + def graph_fn(): + box_batch = [ + tf.constant([ + self._box_center, self._box_center_offset, self._box_center, + self._box_center_offset + ]) + ] + classes = [ + tf.one_hot([0, 0, 1, 2], depth=4), + ] + + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + class0_targets = targets[0, :, :, 0] + class1_targets = targets[0, :, :, 1] + class2_targets = targets[0, :, :, 2] + np.testing.assert_allclose(class0_targets, + np.maximum(class1_targets, class2_targets)) + + def test_size_blur(self): + """Test that the heatmap of a larger box is more blurred.""" + def graph_fn(): + box_batch = [tf.constant([self._box_center, self._box_center_small])] + + classes = [ + tf.one_hot([0, 1], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + targets = self.execute(graph_fn, []) + self.assertGreater( + np.count_nonzero(targets[:, :, :, 0]), + np.count_nonzero(targets[:, :, :, 1])) + + def test_weights(self): + """Test that the weights correctly ignore ground truth.""" + def graph1_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center]), + tf.constant([self._box_center_small]), + ] + classes = [ + tf.one_hot([0, 1], depth=4), + tf.one_hot([2], depth=4), + tf.one_hot([3], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes) + return targets + + targets = self.execute(graph1_fn, []) + self.assertAlmostEqual(1.0, targets[0, :, :, 0].max()) + self.assertAlmostEqual(1.0, targets[0, :, :, 1].max()) + self.assertAlmostEqual(1.0, targets[1, :, :, 2].max()) + self.assertAlmostEqual(1.0, targets[2, :, :, 3].max()) + self.assertAlmostEqual(0.0, targets[0, :, :, [2, 3]].max()) + self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max()) + self.assertAlmostEqual(0.0, targets[2, :, :, :3].max()) + + def graph2_fn(): + weights = [ + tf.constant([0., 1.]), + tf.constant([1.]), + tf.constant([1.]), + ] + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center]), + tf.constant([self._box_center_small]), + ] + classes = [ + tf.one_hot([0, 1], depth=4), + tf.one_hot([2], depth=4), + tf.one_hot([3], depth=4), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) + targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, + classes, + weights) + return targets + targets = self.execute(graph2_fn, []) + self.assertAlmostEqual(1.0, targets[0, :, :, 1].max()) + self.assertAlmostEqual(1.0, targets[1, :, :, 2].max()) + self.assertAlmostEqual(1.0, targets[2, :, :, 3].max()) + self.assertAlmostEqual(0.0, targets[0, :, :, [0, 2, 3]].max()) + self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max()) + self.assertAlmostEqual(0.0, targets[2, :, :, :3].max()) + + def test_low_overlap(self): + def graph1_fn(): + box_batch = [tf.constant([self._box_center])] + classes = [ + tf.one_hot([0], depth=2), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.1) + targets_low_overlap = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets_low_overlap + targets_low_overlap = self.execute(graph1_fn, []) + self.assertLess(1, np.count_nonzero(targets_low_overlap)) + + def graph2_fn(): + box_batch = [tf.constant([self._box_center])] + classes = [ + tf.one_hot([0], depth=2), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.6) + targets_medium_overlap = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets_medium_overlap + targets_medium_overlap = self.execute(graph2_fn, []) + self.assertLess(1, np.count_nonzero(targets_medium_overlap)) + + def graph3_fn(): + box_batch = [tf.constant([self._box_center])] + classes = [ + tf.one_hot([0], depth=2), + ] + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.99) + targets_high_overlap = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets_high_overlap + + targets_high_overlap = self.execute(graph3_fn, []) + self.assertTrue(np.all(targets_low_overlap >= targets_medium_overlap)) + self.assertTrue(np.all(targets_medium_overlap >= targets_high_overlap)) + + def test_empty_box_list(self): + """Test that an empty box list gives an all 0 heatmap.""" + def graph_fn(): + box_batch = [ + tf.zeros((0, 4), dtype=tf.float32), + ] + + classes = [ + tf.zeros((0, 5), dtype=tf.float32), + ] + + assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( + 4, min_overlap=0.1) + targets = assigner.assign_center_targets_from_boxes( + 80, 80, box_batch, classes) + return targets + targets = self.execute(graph_fn, []) + np.testing.assert_allclose(targets, 0.) + + +class CenterNetBoxTargetAssignerTest(test_case.TestCase): + + def setUp(self): + super(CenterNetBoxTargetAssignerTest, self).setUp() + self._box_center = [0.0, 0.0, 1.0, 1.0] + self._box_center_small = [0.25, 0.25, 0.75, 0.75] + self._box_lower_left = [0.5, 0.0, 1.0, 0.5] + self._box_center_offset = [0.1, 0.05, 1.0, 1.0] + self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] + + def test_max_distance_for_overlap(self): + """Test that the distance ensures the IoU with random boxes.""" + + # TODO(vighneshb) remove this after the `_smallest_positive_root` + # function if fixed. + self.skipTest(('Skipping test because we are using an incorrect version of' + 'the `max_distance_for_overlap` function to reproduce' + ' results.')) + + rng = np.random.RandomState(0) + n_samples = 100 + + width = rng.uniform(1, 100, size=n_samples) + height = rng.uniform(1, 100, size=n_samples) + min_iou = rng.uniform(0.1, 1.0, size=n_samples) + + def graph_fn(): + max_dist = targetassigner.max_distance_for_overlap(height, width, min_iou) + return max_dist + max_dist = self.execute(graph_fn, []) + xmin1 = np.zeros(n_samples) + ymin1 = np.zeros(n_samples) + xmax1 = np.zeros(n_samples) + width + ymax1 = np.zeros(n_samples) + height + + xmin2 = max_dist * np.cos(rng.uniform(0, 2 * np.pi)) + ymin2 = max_dist * np.sin(rng.uniform(0, 2 * np.pi)) + xmax2 = width + max_dist * np.cos(rng.uniform(0, 2 * np.pi)) + ymax2 = height + max_dist * np.sin(rng.uniform(0, 2 * np.pi)) + + boxes1 = np.vstack([ymin1, xmin1, ymax1, xmax1]).T + boxes2 = np.vstack([ymin2, xmin2, ymax2, xmax2]).T + + iou = np.diag(np_box_ops.iou(boxes1, boxes2)) + + self.assertTrue(np.all(iou >= min_iou)) + + def test_max_distance_for_overlap_centernet(self): + """Test the version of the function used in the CenterNet paper.""" + + def graph_fn(): + distance = targetassigner.max_distance_for_overlap(10, 5, 0.5) + return distance + distance = self.execute(graph_fn, []) + self.assertAlmostEqual(2.807764064, distance) + + def test_assign_size_and_offset_targets(self): + """Test the assign_size_and_offset_targets function.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center_offset]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + assigner = targetassigner.CenterNetBoxTargetAssigner(4) + indices, hw, yx_offset, weights = assigner.assign_size_and_offset_targets( + 80, 80, box_batch) + return indices, hw, yx_offset, weights + indices, hw, yx_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (5, 3)) + self.assertEqual(hw.shape, (5, 2)) + self.assertEqual(yx_offset.shape, (5, 2)) + self.assertEqual(weights.shape, (5,)) + np.testing.assert_array_equal( + indices, + [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) + np.testing.assert_array_equal( + hw, [[20, 20], [10, 10], [18, 19], [10, 10], [8, 15]]) + np.testing.assert_array_equal( + yx_offset, [[0, 0], [0, 0], [0, 0.5], [0, 0], [0.25, 0.75]]) + np.testing.assert_array_equal(weights, 1) + + def test_assign_size_and_offset_targets_weights(self): + """Test the assign_size_and_offset_targets function with box weights.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_lower_left, self._box_center_small]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4) + weights_batch = [ + tf.constant([0.0, 1.0]), + tf.constant([1.0, 1.0]), + tf.constant([0.0, 0.0]) + ] + indices, hw, yx_offset, weights = cn_assigner.assign_size_and_offset_targets( + 80, 80, box_batch, weights_batch) + return indices, hw, yx_offset, weights + indices, hw, yx_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (6, 3)) + self.assertEqual(hw.shape, (6, 2)) + self.assertEqual(yx_offset.shape, (6, 2)) + self.assertEqual(weights.shape, (6,)) + np.testing.assert_array_equal(indices, + [[0, 10, 10], [0, 15, 5], [1, 15, 5], + [1, 10, 10], [2, 10, 10], [2, 7, 11]]) + np.testing.assert_array_equal( + hw, [[20, 20], [10, 10], [10, 10], [10, 10], [10, 10], [8, 15]]) + np.testing.assert_array_equal( + yx_offset, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0.25, 0.75]]) + np.testing.assert_array_equal(weights, [0, 1, 1, 1, 0, 0]) + + def test_get_batch_predictions_from_indices(self): + """Test the get_batch_predictions_from_indices function. + + This test verifies that the indices returned by + assign_size_and_offset_targets function work as expected with a predicted + tensor. + + """ + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + pred_array = np.ones((2, 40, 20, 2), dtype=np.int32) * -1000 + pred_array[0, 20, 10] = [1, 2] + pred_array[0, 30, 5] = [3, 4] + pred_array[1, 20, 10] = [5, 6] + pred_array[1, 14, 11] = [7, 8] + + pred_tensor = tf.constant(pred_array) + + cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4) + indices, _, _, _ = cn_assigner.assign_size_and_offset_targets( + 160, 80, box_batch) + + preds = targetassigner.get_batch_predictions_from_indices( + pred_tensor, indices) + return preds + preds = self.execute(graph_fn, []) + np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]]) + + +class CenterNetKeypointTargetAssignerTest(test_case.TestCase): + + def test_keypoint_heatmap_targets(self): + def graph_fn(): + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 1.0], + [0.4, 0.1, 0.4, 0.2, 0.1], + [float('nan'), 0.1, 0.5, 0.7, 0.6]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + gt_boxes_list = [ + tf.constant( + np.array([[0.0, 0.0, 0.3, 0.3], + [0.0, 0.0, 0.5, 0.5], + [0.0, 0.0, 0.5, 0.5], + [0.0, 0.0, 1.0, 1.0]]), + dtype=tf.float32) + ] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2]) + (targets, num_instances_batch, + valid_mask) = cn_assigner.assign_keypoint_heatmap_targets( + 120, + 80, + gt_keypoints_list, + gt_classes_list, + gt_boxes_list=gt_boxes_list) + return targets, num_instances_batch, valid_mask + + targets, num_instances_batch, valid_mask = self.execute(graph_fn, []) + # keypoint (0.5, 0.5) is selected. The peak is expected to appear at the + # center of the image. + self.assertEqual((15, 10), _array_argmax(targets[0, :, :, 1])) + self.assertAlmostEqual(1.0, targets[0, 15, 10, 1]) + # No peak for the first class since NaN is selected. + self.assertAlmostEqual(0.0, targets[0, 15, 10, 0]) + # Verify the output heatmap shape. + self.assertAllEqual([1, 30, 20, 2], targets.shape) + # Verify the number of instances is correct. + np.testing.assert_array_almost_equal([[0, 1]], + num_instances_batch) + # When calling the function, we specify the class id to be 1 (1th and 3rd) + # instance and the keypoint indices to be [0, 2], meaning that the 1st + # instance is the target class with no valid keypoints in it. As a result, + # the region of the 1st instance boxing box should be blacked out + # (0.0, 0.0, 0.5, 0.5), transfering to (0, 0, 15, 10) in absolute output + # space. + self.assertAlmostEqual(np.sum(valid_mask[:, 0:16, 0:11]), 0.0) + # All other values are 1.0 so the sum is: 30 * 20 - 16 * 11 = 424. + self.assertAlmostEqual(np.sum(valid_mask), 424.0) + + def test_assign_keypoints_offset_targets(self): + def graph_fn(): + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2]) + (indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list) + return indices, weights, offsets + indices, weights, offsets = self.execute(graph_fn, []) + # Only the last element has positive weight. + np.testing.assert_array_almost_equal( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights) + # Validate the last element's indices and offsets. + np.testing.assert_array_equal([0, 3, 2], indices[7, :]) + np.testing.assert_array_almost_equal([0.6, 0.4], offsets[7, :]) + + def test_assign_keypoints_offset_targets_radius(self): + def graph_fn(): + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2], + peak_radius=1, + per_keypoint_offset=True) + (indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list) + return indices, weights, offsets + indices, weights, offsets = self.execute(graph_fn, []) + + # There are total 8 * 5 (neighbors) = 40 targets. + self.assertAllEqual(indices.shape, [40, 4]) + self.assertAllEqual(offsets.shape, [40, 2]) + self.assertAllEqual(weights.shape, [40]) + # Only the last 5 (radius 1 generates 5 valid points) element has positive + # weight. + np.testing.assert_array_almost_equal([ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], weights) + # Validate the last element's (with neighbors) indices and offsets. + np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :]) + np.testing.assert_array_equal([0, 3, 1, 1], indices[36, :]) + np.testing.assert_array_equal([0, 3, 2, 1], indices[37, :]) + np.testing.assert_array_equal([0, 3, 3, 1], indices[38, :]) + np.testing.assert_array_equal([0, 4, 2, 1], indices[39, :]) + np.testing.assert_array_almost_equal([1.6, 0.4], offsets[35, :]) + np.testing.assert_array_almost_equal([0.6, 1.4], offsets[36, :]) + np.testing.assert_array_almost_equal([0.6, 0.4], offsets[37, :]) + np.testing.assert_array_almost_equal([0.6, -0.6], offsets[38, :]) + np.testing.assert_array_almost_equal([-0.4, 0.4], offsets[39, :]) + + def test_assign_joint_regression_targets(self): + def graph_fn(): + gt_boxes_list = [ + tf.constant( + np.array([[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0]]), + dtype=tf.float32) + ] + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2]) + (indices, offsets, weights) = cn_assigner.assign_joint_regression_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list, + gt_boxes_list=gt_boxes_list) + return indices, offsets, weights + indices, offsets, weights = self.execute(graph_fn, []) + np.testing.assert_array_almost_equal( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights) + np.testing.assert_array_equal([0, 15, 10, 1], indices[7, :]) + np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[7, :]) + + def test_assign_joint_regression_targets_radius(self): + def graph_fn(): + gt_boxes_list = [ + tf.constant( + np.array([[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 1.0]]), + dtype=tf.float32) + ] + gt_classes_list = [ + tf.one_hot([0, 1, 0, 1], depth=4), + ] + coordinates = tf.expand_dims( + tf.constant( + np.array([[0.1, 0.2, 0.3, 0.4, 0.5], + [float('nan'), 0.7, float('nan'), 0.9, 0.4], + [0.4, 0.1, 0.4, 0.2, 0.0], + [float('nan'), 0.0, 0.12, 0.7, 0.4]]), + dtype=tf.float32), + axis=2) + gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] + + cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( + stride=4, + class_id=1, + keypoint_indices=[0, 2], + peak_radius=1) + (indices, offsets, weights) = cn_assigner.assign_joint_regression_targets( + height=120, + width=80, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list, + gt_boxes_list=gt_boxes_list) + return indices, offsets, weights + indices, offsets, weights = self.execute(graph_fn, []) + + # There are total 8 * 5 (neighbors) = 40 targets. + self.assertAllEqual(indices.shape, [40, 4]) + self.assertAllEqual(offsets.shape, [40, 2]) + self.assertAllEqual(weights.shape, [40]) + # Only the last 5 (radius 1 generates 5 valid points) element has positive + # weight. + np.testing.assert_array_almost_equal([ + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], weights) + # Test the values of the indices and offsets of the last 5 elements. + np.testing.assert_array_equal([0, 14, 10, 1], indices[35, :]) + np.testing.assert_array_equal([0, 15, 9, 1], indices[36, :]) + np.testing.assert_array_equal([0, 15, 10, 1], indices[37, :]) + np.testing.assert_array_equal([0, 15, 11, 1], indices[38, :]) + np.testing.assert_array_equal([0, 16, 10, 1], indices[39, :]) + np.testing.assert_array_almost_equal([-10.4, -7.6], offsets[35, :]) + np.testing.assert_array_almost_equal([-11.4, -6.6], offsets[36, :]) + np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[37, :]) + np.testing.assert_array_almost_equal([-11.4, -8.6], offsets[38, :]) + np.testing.assert_array_almost_equal([-12.4, -7.6], offsets[39, :]) + + +class CenterNetMaskTargetAssignerTest(test_case.TestCase): + + def test_assign_segmentation_targets(self): + def graph_fn(): + gt_masks_list = [ + # Example 0. + tf.constant([ + [ + [1., 0., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + ], + [ + [0., 0., 0., 0.], + [0., 0., 0., 1.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + ], + [ + [1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.], + ] + ], dtype=tf.float32), + # Example 1. + tf.constant([ + [ + [1., 1., 0., 1.], + [1., 1., 1., 1.], + [0., 0., 1., 1.], + [0., 0., 0., 1.], + ], + [ + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [1., 1., 0., 0.], + [1., 1., 0., 0.], + ], + ], dtype=tf.float32), + ] + gt_classes_list = [ + # Example 0. + tf.constant([[1., 0., 0.], + [0., 1., 0.], + [1., 0., 0.]], dtype=tf.float32), + # Example 1. + tf.constant([[0., 1., 0.], + [0., 1., 0.]], dtype=tf.float32) + ] + cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=2) + segmentation_target = cn_assigner.assign_segmentation_targets( + gt_masks_list=gt_masks_list, + gt_classes_list=gt_classes_list, + mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR) + return segmentation_target + segmentation_target = self.execute(graph_fn, []) + + expected_seg_target = np.array([ + # Example 0 [[class 0, class 1], [background, class 0]] + [[[1, 0, 0], [0, 1, 0]], + [[0, 0, 0], [1, 0, 0]]], + # Example 1 [[class 1, class 1], [class 1, class 1]] + [[[0, 1, 0], [0, 1, 0]], + [[0, 1, 0], [0, 1, 0]]], + ], dtype=np.float32) + np.testing.assert_array_almost_equal( + expected_seg_target, segmentation_target) + + def test_assign_segmentation_targets_no_objects(self): + def graph_fn(): + gt_masks_list = [tf.zeros((0, 5, 5))] + gt_classes_list = [tf.zeros((0, 10))] + cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=1) + segmentation_target = cn_assigner.assign_segmentation_targets( + gt_masks_list=gt_masks_list, + gt_classes_list=gt_classes_list, + mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR) + return segmentation_target + + segmentation_target = self.execute(graph_fn, []) + expected_seg_target = np.zeros((1, 5, 5, 10)) + np.testing.assert_array_almost_equal( + expected_seg_target, segmentation_target) + + +class CenterNetDensePoseTargetAssignerTest(test_case.TestCase): + + def test_assign_part_and_coordinate_targets(self): + def graph_fn(): + gt_dp_num_points_list = [ + # Example 0. + tf.constant([2, 0, 3], dtype=tf.int32), + # Example 1. + tf.constant([1, 1], dtype=tf.int32), + ] + gt_dp_part_ids_list = [ + # Example 0. + tf.constant([[1, 6, 0], + [0, 0, 0], + [0, 2, 3]], dtype=tf.int32), + # Example 1. + tf.constant([[7, 0, 0], + [0, 0, 0]], dtype=tf.int32), + ] + gt_dp_surface_coords_list = [ + # Example 0. + tf.constant( + [[[0.11, 0.2, 0.3, 0.4], # Box 0. + [0.6, 0.4, 0.1, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], # Box 1. + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.22, 0.1, 0.6, 0.8], # Box 2. + [0.0, 0.4, 0.5, 1.0], + [0.3, 0.2, 0.4, 0.1]]], + dtype=tf.float32), + # Example 1. + tf.constant( + [[[0.5, 0.5, 0.3, 1.0], # Box 0. + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.2, 0.2, 0.5, 0.8], # Box 1. + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]]], + dtype=tf.float32), + ] + gt_weights_list = [ + # Example 0. + tf.constant([1.0, 1.0, 0.5], dtype=tf.float32), + # Example 1. + tf.constant([0.0, 1.0], dtype=tf.float32), + ] + cn_assigner = targetassigner.CenterNetDensePoseTargetAssigner(stride=4) + batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( + cn_assigner.assign_part_and_coordinate_targets( + height=120, + width=80, + gt_dp_num_points_list=gt_dp_num_points_list, + gt_dp_part_ids_list=gt_dp_part_ids_list, + gt_dp_surface_coords_list=gt_dp_surface_coords_list, + gt_weights_list=gt_weights_list)) + + return batch_indices, batch_part_ids, batch_surface_coords, batch_weights + batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( + self.execute(graph_fn, [])) + + expected_batch_indices = np.array([ + # Example 0. e.g. + # The first set of indices is calculated as follows: + # floor(0.11*120/4) = 3, floor(0.2*80/4) = 4. + [0, 3, 4, 1], [0, 18, 8, 6], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], + [0, 0, 0, 0], [0, 6, 2, 0], [0, 0, 8, 2], [0, 9, 4, 3], + # Example 1. + [1, 15, 10, 7], [1, 0, 0, 0], [1, 0, 0, 0], [1, 6, 4, 0], [1, 0, 0, 0], + [1, 0, 0, 0] + ], dtype=np.int32) + expected_batch_part_ids = tf.one_hot( + [1, 6, 0, 0, 0, 0, 0, 2, 3, 7, 0, 0, 0, 0, 0], depth=24).numpy() + expected_batch_surface_coords = np.array([ + # Box 0. + [0.3, 0.4], [0.1, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], + [0.6, 0.8], [0.5, 1.0], [0.4, 0.1], + # Box 1. + [0.3, 1.0], [0.0, 0.0], [0.0, 0.0], [0.5, 0.8], [0.0, 0.0], [0.0, 0.0], + ], np.float32) + expected_batch_weights = np.array([ + # Box 0. + 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, + # Box 1. + 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 + ], dtype=np.float32) + self.assertAllEqual(expected_batch_indices, batch_indices) + self.assertAllEqual(expected_batch_part_ids, batch_part_ids) + self.assertAllClose(expected_batch_surface_coords, batch_surface_coords) + self.assertAllClose(expected_batch_weights, batch_weights) + + +class CenterNetTrackTargetAssignerTest(test_case.TestCase): + + def setUp(self): + super(CenterNetTrackTargetAssignerTest, self).setUp() + self._box_center = [0.0, 0.0, 1.0, 1.0] + self._box_center_small = [0.25, 0.25, 0.75, 0.75] + self._box_lower_left = [0.5, 0.0, 1.0, 0.5] + self._box_center_offset = [0.1, 0.05, 1.0, 1.0] + self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] + + def test_assign_track_targets(self): + """Test the assign_track_targets function.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_lower_left, self._box_center_small]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + track_id_batch = [ + tf.constant([0, 1]), + tf.constant([1, 0]), + tf.constant([0, 2]), + ] + + assigner = targetassigner.CenterNetTrackTargetAssigner( + stride=4, num_track_ids=3) + + (batch_indices, batch_weights, + track_targets) = assigner.assign_track_targets( + height=80, + width=80, + gt_track_ids_list=track_id_batch, + gt_boxes_list=box_batch) + return batch_indices, batch_weights, track_targets + + indices, weights, track_ids = self.execute(graph_fn, []) + + self.assertEqual(indices.shape, (3, 2, 3)) + self.assertEqual(track_ids.shape, (3, 2, 3)) + self.assertEqual(weights.shape, (3, 2)) + + np.testing.assert_array_equal(indices, + [[[0, 10, 10], [0, 15, 5]], + [[1, 15, 5], [1, 10, 10]], + [[2, 10, 10], [2, 7, 11]]]) + np.testing.assert_array_equal(track_ids, + [[[1, 0, 0], [0, 1, 0]], + [[0, 1, 0], [1, 0, 0]], + [[1, 0, 0], [0, 0, 1]]]) + np.testing.assert_array_equal(weights, [[1, 1], [1, 1], [1, 1]]) + + def test_assign_track_targets_weights(self): + """Test the assign_track_targets function with box weights.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_lower_left, self._box_center_small]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + track_id_batch = [ + tf.constant([0, 1]), + tf.constant([1, 0]), + tf.constant([0, 2]), + ] + weights_batch = [ + tf.constant([0.0, 1.0]), + tf.constant([1.0, 1.0]), + tf.constant([0.0, 0.0]) + ] + + assigner = targetassigner.CenterNetTrackTargetAssigner( + stride=4, num_track_ids=3) + + (batch_indices, batch_weights, + track_targets) = assigner.assign_track_targets( + height=80, + width=80, + gt_track_ids_list=track_id_batch, + gt_boxes_list=box_batch, + gt_weights_list=weights_batch) + return batch_indices, batch_weights, track_targets + + indices, weights, track_ids = self.execute(graph_fn, []) + + self.assertEqual(indices.shape, (3, 2, 3)) + self.assertEqual(track_ids.shape, (3, 2, 3)) + self.assertEqual(weights.shape, (3, 2)) + + np.testing.assert_array_equal(indices, + [[[0, 10, 10], [0, 15, 5]], + [[1, 15, 5], [1, 10, 10]], + [[2, 10, 10], [2, 7, 11]]]) + np.testing.assert_array_equal(track_ids, + [[[1, 0, 0], [0, 1, 0]], + [[0, 1, 0], [1, 0, 0]], + [[1, 0, 0], [0, 0, 1]]]) + np.testing.assert_array_equal(weights, [[0, 1], [1, 1], [0, 0]]) + # TODO(xwwang): Add a test for the case when no objects are detected. + + +class CornerOffsetTargetAssignerTest(test_case.TestCase): + + def test_filter_overlap_min_area_empty(self): + """Test that empty masks work on CPU.""" + def graph_fn(masks): + return targetassigner.filter_mask_overlap_min_area(masks) + + masks = self.execute_cpu(graph_fn, [np.zeros((0, 5, 5), dtype=np.float32)]) + self.assertEqual(masks.shape, (0, 5, 5)) + + def test_filter_overlap_min_area(self): + """Test the object with min. area is selected instead of overlap.""" + def graph_fn(masks): + return targetassigner.filter_mask_overlap_min_area(masks) + + masks = np.zeros((3, 4, 4), dtype=np.float32) + masks[0, :2, :2] = 1.0 + masks[1, :3, :3] = 1.0 + masks[2, 3, 3] = 1.0 + + masks = self.execute(graph_fn, [masks]) + + self.assertAllClose(masks[0], + [[1, 1, 0, 0], + [1, 1, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]) + self.assertAllClose(masks[1], + [[0, 0, 1, 0], + [0, 0, 1, 0], + [1, 1, 1, 0], + [0, 0, 0, 0]]) + + self.assertAllClose(masks[2], + [[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + def test_assign_corner_offset_single_object(self): + """Test that corner offsets are correct with a single object.""" + assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1) + + def graph_fn(): + boxes = [ + tf.constant([[0., 0., 1., 1.]]) + ] + mask = np.zeros((1, 4, 4), dtype=np.float32) + mask[0, 1:3, 1:3] = 1.0 + + masks = [tf.constant(mask)] + return assigner.assign_corner_offset_targets(boxes, masks) + + corner_offsets, foreground = self.execute(graph_fn, []) + self.assertAllClose(foreground[0], + [[0, 0, 0, 0], + [0, 1, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 0]]) + + self.assertAllClose(corner_offsets[0, :, :, 0], + [[0, 0, 0, 0], + [0, -1, -1, 0], + [0, -2, -2, 0], + [0, 0, 0, 0]]) + self.assertAllClose(corner_offsets[0, :, :, 1], + [[0, 0, 0, 0], + [0, -1, -2, 0], + [0, -1, -2, 0], + [0, 0, 0, 0]]) + self.assertAllClose(corner_offsets[0, :, :, 2], + [[0, 0, 0, 0], + [0, 3, 3, 0], + [0, 2, 2, 0], + [0, 0, 0, 0]]) + self.assertAllClose(corner_offsets[0, :, :, 3], + [[0, 0, 0, 0], + [0, 3, 2, 0], + [0, 3, 2, 0], + [0, 0, 0, 0]]) + + def test_assign_corner_offset_multiple_objects(self): + """Test corner offsets are correct with multiple objects.""" + assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1) + + def graph_fn(): + boxes = [ + tf.constant([[0., 0., 1., 1.], [0., 0., 0., 0.]]), + tf.constant([[0., 0., .25, .25], [.25, .25, 1., 1.]]) + ] + mask1 = np.zeros((2, 4, 4), dtype=np.float32) + mask1[0, 0, 0] = 1.0 + mask1[0, 3, 3] = 1.0 + + mask2 = np.zeros((2, 4, 4), dtype=np.float32) + mask2[0, :2, :2] = 1.0 + mask2[1, 1:, 1:] = 1.0 + + masks = [tf.constant(mask1), tf.constant(mask2)] + return assigner.assign_corner_offset_targets(boxes, masks) + + corner_offsets, foreground = self.execute(graph_fn, []) + self.assertEqual(corner_offsets.shape, (2, 4, 4, 4)) + self.assertEqual(foreground.shape, (2, 4, 4)) + + self.assertAllClose(foreground[0], + [[1, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + self.assertAllClose(corner_offsets[0, :, :, 0], + [[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, -3]]) + self.assertAllClose(corner_offsets[0, :, :, 1], + [[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, -3]]) + self.assertAllClose(corner_offsets[0, :, :, 2], + [[4, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + self.assertAllClose(corner_offsets[0, :, :, 3], + [[4, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + self.assertAllClose(foreground[1], + [[1, 1, 0, 0], + [1, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1]]) + + self.assertAllClose(corner_offsets[1, :, :, 0], + [[0, 0, 0, 0], + [-1, -1, 0, 0], + [0, -1, -1, -1], + [0, -2, -2, -2]]) + self.assertAllClose(corner_offsets[1, :, :, 1], + [[0, -1, 0, 0], + [0, -1, -1, -2], + [0, 0, -1, -2], + [0, 0, -1, -2]]) + self.assertAllClose(corner_offsets[1, :, :, 2], + [[1, 1, 0, 0], + [0, 0, 3, 3], + [0, 2, 2, 2], + [0, 1, 1, 1]]) + self.assertAllClose(corner_offsets[1, :, :, 3], + [[1, 0, 0, 0], + [1, 0, 2, 1], + [0, 3, 2, 1], + [0, 3, 2, 1]]) + + def test_assign_corner_offsets_no_objects(self): + """Test assignment works with empty input on cpu.""" + assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1) + + def graph_fn(): + boxes = [ + tf.zeros((0, 4), dtype=tf.float32) + ] + masks = [tf.zeros((0, 5, 5), dtype=tf.float32)] + return assigner.assign_corner_offset_targets(boxes, masks) + + corner_offsets, foreground = self.execute_cpu(graph_fn, []) + self.assertAllClose(corner_offsets, np.zeros((1, 5, 5, 4))) + self.assertAllClose(foreground, np.zeros((1, 5, 5))) + + +class CenterNetTemporalOffsetTargetAssigner(test_case.TestCase): + + def setUp(self): + super(CenterNetTemporalOffsetTargetAssigner, self).setUp() + self._box_center = [0.0, 0.0, 1.0, 1.0] + self._box_center_small = [0.25, 0.25, 0.75, 0.75] + self._box_lower_left = [0.5, 0.0, 1.0, 0.5] + self._box_center_offset = [0.1, 0.05, 1.0, 1.0] + self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] + self._offset_center = [0.5, 0.4] + self._offset_center_small = [0.1, 0.1] + self._offset_lower_left = [-0.1, 0.1] + self._offset_center_offset = [0.4, 0.3] + self._offset_odd_coord = [0.125, -0.125] + + def test_assign_empty_groundtruths(self): + """Tests the assign_offset_targets function with empty inputs.""" + def graph_fn(): + box_batch = [ + tf.zeros((0, 4), dtype=tf.float32), + ] + + offset_batch = [ + tf.zeros((0, 2), dtype=tf.float32), + ] + + match_flag_batch = [ + tf.zeros((0), dtype=tf.float32), + ] + + assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4) + indices, temporal_offset, weights = assigner.assign_temporal_offset_targets( + 80, 80, box_batch, offset_batch, match_flag_batch) + return indices, temporal_offset, weights + indices, temporal_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (0, 3)) + self.assertEqual(temporal_offset.shape, (0, 2)) + self.assertEqual(weights.shape, (0,)) + + def test_assign_offset_targets(self): + """Tests the assign_offset_targets function.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center_offset]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + offset_batch = [ + tf.constant([self._offset_center, self._offset_lower_left]), + tf.constant([self._offset_center_offset]), + tf.constant([self._offset_center_small, self._offset_odd_coord]), + ] + + match_flag_batch = [ + tf.constant([1.0, 1.0]), + tf.constant([1.0]), + tf.constant([1.0, 1.0]), + ] + + assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4) + indices, temporal_offset, weights = assigner.assign_temporal_offset_targets( + 80, 80, box_batch, offset_batch, match_flag_batch) + return indices, temporal_offset, weights + indices, temporal_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (5, 3)) + self.assertEqual(temporal_offset.shape, (5, 2)) + self.assertEqual(weights.shape, (5,)) + np.testing.assert_array_equal( + indices, + [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) + np.testing.assert_array_almost_equal( + temporal_offset, + [[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]]) + np.testing.assert_array_equal(weights, 1) + + def test_assign_offset_targets_with_match_flags(self): + """Tests the assign_offset_targets function with match flags.""" + def graph_fn(): + box_batch = [ + tf.constant([self._box_center, self._box_lower_left]), + tf.constant([self._box_center_offset]), + tf.constant([self._box_center_small, self._box_odd_coordinates]), + ] + + offset_batch = [ + tf.constant([self._offset_center, self._offset_lower_left]), + tf.constant([self._offset_center_offset]), + tf.constant([self._offset_center_small, self._offset_odd_coord]), + ] + + match_flag_batch = [ + tf.constant([0.0, 1.0]), + tf.constant([1.0]), + tf.constant([1.0, 1.0]), + ] + + cn_assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4) + weights_batch = [ + tf.constant([1.0, 0.0]), + tf.constant([1.0]), + tf.constant([1.0, 1.0]) + ] + indices, temporal_offset, weights = cn_assigner.assign_temporal_offset_targets( + 80, 80, box_batch, offset_batch, match_flag_batch, weights_batch) + return indices, temporal_offset, weights + indices, temporal_offset, weights = self.execute(graph_fn, []) + self.assertEqual(indices.shape, (5, 3)) + self.assertEqual(temporal_offset.shape, (5, 2)) + self.assertEqual(weights.shape, (5,)) + + np.testing.assert_array_equal( + indices, + [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) + np.testing.assert_array_almost_equal( + temporal_offset, + [[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]]) + np.testing.assert_array_equal(weights, [0, 0, 1, 1, 1]) + + +class DETRTargetAssignerTest(test_case.TestCase): + + def test_assign_detr(self): + def graph_fn(pred_corners, groundtruth_box_corners, + groundtruth_labels, predicted_labels): + detr_target_assigner = targetassigner.DETRTargetAssigner() + pred_boxlist = box_list.BoxList(pred_corners) + groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) + result = detr_target_assigner.assign( + pred_boxlist, groundtruth_boxlist, + predicted_labels, groundtruth_labels) + (cls_targets, cls_weights, reg_targets, reg_weights) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + pred_corners = np.array([[0.25, 0.25, 0.4, 0.2], + [0.5, 0.8, 1.0, 0.8], + [0.9, 0.5, 0.1, 1.0]], dtype=np.float32) + groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9]], + dtype=np.float32) + predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]], + dtype=np.float32) + groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]], + dtype=np.float32) + + exp_cls_targets = [[0, 1], [0, 1], [1, 0]] + exp_cls_weights = [[1, 1], [1, 1], [1, 1]] + exp_reg_targets = [[0.25, 0.25, 0.5, 0.5], + [0.7, 0.7, 0.4, 0.4], + [0, 0, 0, 0]] + exp_reg_weights = [1, 1, 0] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu( + graph_fn, [pred_corners, groundtruth_box_corners, + groundtruth_labels, predicted_labels]) + + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + def test_batch_assign_detr(self): + def graph_fn(pred_corners, groundtruth_box_corners, + groundtruth_labels, predicted_labels): + detr_target_assigner = targetassigner.DETRTargetAssigner() + result = detr_target_assigner.batch_assign( + pred_corners, groundtruth_box_corners, + [predicted_labels], [groundtruth_labels]) + (cls_targets, cls_weights, reg_targets, reg_weights) = result + return (cls_targets, cls_weights, reg_targets, reg_weights) + + pred_corners = np.array([[[0.25, 0.25, 0.4, 0.2], + [0.5, 0.8, 1.0, 0.8], + [0.9, 0.5, 0.1, 1.0]]], dtype=np.float32) + groundtruth_box_corners = np.array([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.9, 0.9]]], + dtype=np.float32) + predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]], + dtype=np.float32) + groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]], + dtype=np.float32) + + exp_cls_targets = [[[0, 1], [0, 1], [1, 0]]] + exp_cls_weights = [[[1, 1], [1, 1], [1, 1]]] + exp_reg_targets = [[[0.25, 0.25, 0.5, 0.5], + [0.7, 0.7, 0.4, 0.4], + [0, 0, 0, 0]]] + exp_reg_weights = [[1, 1, 0]] + + (cls_targets_out, + cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu( + graph_fn, [pred_corners, groundtruth_box_corners, + groundtruth_labels, predicted_labels]) + + self.assertAllClose(cls_targets_out, exp_cls_targets) + self.assertAllClose(cls_weights_out, exp_cls_weights) + self.assertAllClose(reg_targets_out, exp_reg_targets) + self.assertAllClose(reg_weights_out, exp_reg_weights) + self.assertEqual(cls_targets_out.dtype, np.float32) + self.assertEqual(cls_weights_out.dtype, np.float32) + self.assertEqual(reg_targets_out.dtype, np.float32) + self.assertEqual(reg_weights_out.dtype, np.float32) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/ava_label_map_v2.1.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/ava_label_map_v2.1.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..5e2c485682830919a09300ac851e6b0e4bdf3efb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/ava_label_map_v2.1.pbtxt @@ -0,0 +1,240 @@ +item { + name: "bend/bow (at the waist)" + id: 1 +} +item { + name: "crouch/kneel" + id: 3 +} +item { + name: "dance" + id: 4 +} +item { + name: "fall down" + id: 5 +} +item { + name: "get up" + id: 6 +} +item { + name: "jump/leap" + id: 7 +} +item { + name: "lie/sleep" + id: 8 +} +item { + name: "martial art" + id: 9 +} +item { + name: "run/jog" + id: 10 +} +item { + name: "sit" + id: 11 +} +item { + name: "stand" + id: 12 +} +item { + name: "swim" + id: 13 +} +item { + name: "walk" + id: 14 +} +item { + name: "answer phone" + id: 15 +} +item { + name: "carry/hold (an object)" + id: 17 +} +item { + name: "climb (e.g., a mountain)" + id: 20 +} +item { + name: "close (e.g., a door, a box)" + id: 22 +} +item { + name: "cut" + id: 24 +} +item { + name: "dress/put on clothing" + id: 26 +} +item { + name: "drink" + id: 27 +} +item { + name: "drive (e.g., a car, a truck)" + id: 28 +} +item { + name: "eat" + id: 29 +} +item { + name: "enter" + id: 30 +} +item { + name: "hit (an object)" + id: 34 +} +item { + name: "lift/pick up" + id: 36 +} +item { + name: "listen (e.g., to music)" + id: 37 +} +item { + name: "open (e.g., a window, a car door)" + id: 38 +} +item { + name: "play musical instrument" + id: 41 +} +item { + name: "point to (an object)" + id: 43 +} +item { + name: "pull (an object)" + id: 45 +} +item { + name: "push (an object)" + id: 46 +} +item { + name: "put down" + id: 47 +} +item { + name: "read" + id: 48 +} +item { + name: "ride (e.g., a bike, a car, a horse)" + id: 49 +} +item { + name: "sail boat" + id: 51 +} +item { + name: "shoot" + id: 52 +} +item { + name: "smoke" + id: 54 +} +item { + name: "take a photo" + id: 56 +} +item { + name: "text on/look at a cellphone" + id: 57 +} +item { + name: "throw" + id: 58 +} +item { + name: "touch (an object)" + id: 59 +} +item { + name: "turn (e.g., a screwdriver)" + id: 60 +} +item { + name: "watch (e.g., TV)" + id: 61 +} +item { + name: "work on a computer" + id: 62 +} +item { + name: "write" + id: 63 +} +item { + name: "fight/hit (a person)" + id: 64 +} +item { + name: "give/serve (an object) to (a person)" + id: 65 +} +item { + name: "grab (a person)" + id: 66 +} +item { + name: "hand clap" + id: 67 +} +item { + name: "hand shake" + id: 68 +} +item { + name: "hand wave" + id: 69 +} +item { + name: "hug (a person)" + id: 70 +} +item { + name: "kiss (a person)" + id: 72 +} +item { + name: "lift (a person)" + id: 73 +} +item { + name: "listen to (a person)" + id: 74 +} +item { + name: "push (another person)" + id: 76 +} +item { + name: "sing to (e.g., self, a person, a group)" + id: 77 +} +item { + name: "take (an object) from (a person)" + id: 78 +} +item { + name: "talk to (e.g., self, a person, a group)" + id: 79 +} +item { + name: "watch (a person)" + id: 80 +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/face_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/face_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..1c7355db1fd0d0bc468e42f881e65d1dc6d8b8e3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/face_label_map.pbtxt @@ -0,0 +1,6 @@ +item { + name: "face" + id: 1 + display_name: "face" +} + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/face_person_with_keypoints_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/face_person_with_keypoints_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..181f11b289b7e0520ccc012514cf11338875a635 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/face_person_with_keypoints_label_map.pbtxt @@ -0,0 +1,102 @@ +item: { + id: 1 + name: 'face' + display_name: 'face' + keypoints { + id: 0 + label: "left_eye_center" + } + keypoints { + id: 1 + label: "right_eye_center" + } + keypoints { + id: 2 + label: "nose_tip" + } + keypoints { + id: 3 + label: "mouth_center" + } + keypoints { + id: 4 + label: "left_ear_tragion" + } + keypoints { + id: 5 + label: "right_ear_tragion" + } +} +item: { + id: 2 + name: 'Person' + display_name: 'PERSON' + keypoints { + id: 6 + label: "NOSE_TIP" + } + keypoints { + id: 7 + label: "LEFT_EYE" + } + keypoints { + id: 8 + label: "RIGHT_EYE" + } + keypoints { + id: 9 + label: "LEFT_EAR_TRAGION" + } + keypoints { + id: 10 + label: "RIGHT_EAR_TRAGION" + } + keypoints { + id: 11 + label: "LEFT_SHOULDER" + } + keypoints { + id: 12 + label: "RIGHT_SHOULDER" + } + keypoints { + id: 13 + label: "LEFT_ELBOW" + } + keypoints { + id: 14 + label: "RIGHT_ELBOW" + } + keypoints { + id: 15 + label: "LEFT_WRIST" + } + keypoints { + id: 16 + label: "RIGHT_WRIST" + } + keypoints { + id: 17 + label: "LEFT_HIP" + } + keypoints { + id: 18 + label: "RIGHT_HIP" + } + keypoints { + id: 19 + label: "LEFT_KNEE" + } + keypoints { + id: 20 + label: "RIGHT_KNEE" + } + keypoints { + id: 21 + label: "LEFT_ANKLE" + } + keypoints { + id: 22 + label: "RIGHT_ANKLE" + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/fgvc_2854_classes_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/fgvc_2854_classes_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..009797f046a136ba45aa224f74e420973af19527 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/fgvc_2854_classes_label_map.pbtxt @@ -0,0 +1,14270 @@ +item { + name: "147457" + id: 1 + display_name: "Nicrophorus tomentosus" +} +item { + name: "81923" + id: 2 + display_name: "Halyomorpha halys" +} +item { + name: "7" + id: 3 + display_name: "Aramus guarauna" +} +item { + name: "201041" + id: 4 + display_name: "Rupornis magnirostris" +} +item { + name: "65551" + id: 5 + display_name: "Hyla eximia" +} +item { + name: "106516" + id: 6 + display_name: "Nannothemis bella" +} +item { + name: "154287" + id: 7 + display_name: "Acalymma vittatum" +} +item { + name: "32798" + id: 8 + display_name: "Ramphotyphlops braminus" +} +item { + name: "8229" + id: 9 + display_name: "Cyanocitta cristata" +} +item { + name: "73766" + id: 10 + display_name: "Drymarchon melanurus" +} +item { + name: "409639" + id: 11 + display_name: "Aenetus virescens" +} +item { + name: "8234" + id: 12 + display_name: "Cyanocitta stelleri" +} +item { + name: "228593" + id: 13 + display_name: "Polygrammate hebraeicum" +} +item { + name: "53" + id: 14 + display_name: "Balearica regulorum" +} +item { + name: "57399" + id: 15 + display_name: "Fistularia commersonii" +} +item { + name: "81979" + id: 16 + display_name: "Syritta pipiens" +} +item { + name: "73788" + id: 17 + display_name: "Plestiodon fasciatus" +} +item { + name: "73790" + id: 18 + display_name: "Plestiodon inexpectatus" +} +item { + name: "16447" + id: 19 + display_name: "Pyrocephalus rubinus" +} +item { + name: "73792" + id: 20 + display_name: "Plestiodon laticeps" +} +item { + name: "49219" + id: 21 + display_name: "Anguilla rostrata" +} +item { + name: "73797" + id: 22 + display_name: "Plestiodon obsoletus" +} +item { + name: "73803" + id: 23 + display_name: "Plestiodon tetragrammus" +} +item { + name: "122956" + id: 24 + display_name: "Syntomoides imaon" +} +item { + name: "82003" + id: 25 + display_name: "Arion ater" +} +item { + name: "32854" + id: 26 + display_name: "Chamaeleo dilepis" +} +item { + name: "42341" + id: 27 + display_name: "Tragelaphus scriptus" +} +item { + name: "82018" + id: 28 + display_name: "Taeniopoda eques" +} +item { + name: "57443" + id: 29 + display_name: "Libellula quadrimaculata" +} +item { + name: "4885" + id: 30 + display_name: "Recurvirostra americana" +} +item { + name: "178403" + id: 31 + display_name: "Phalaenophana pyramusalis" +} +item { + name: "135027" + id: 32 + display_name: "Agalychnis dacnicolor" +} +item { + name: "49262" + id: 33 + display_name: "Haemulon sciurus" +} +item { + name: "98417" + id: 34 + display_name: "Cordulegaster diastatops" +} +item { + name: "57458" + id: 35 + display_name: "Ladona julia" +} +item { + name: "115" + id: 36 + display_name: "Ardeotis kori" +} +item { + name: "49269" + id: 37 + display_name: "Diodon holocanthus" +} +item { + name: "57463" + id: 38 + display_name: "Papilio canadensis" +} +item { + name: "82043" + id: 39 + display_name: "Monochamus scutellatus" +} +item { + name: "147580" + id: 40 + display_name: "Ceratotherium simum simum" +} +item { + name: "98430" + id: 41 + display_name: "Cordulia shurtleffii" +} +item { + name: "8319" + id: 42 + display_name: "Pica nuttalli" +} +item { + name: "43712" + id: 43 + display_name: "Dasyprocta punctata" +} +item { + name: "8335" + id: 44 + display_name: "Perisoreus canadensis" +} +item { + name: "508048" + id: 45 + display_name: "Antigone canadensis" +} +item { + name: "49297" + id: 46 + display_name: "Aetobatus narinari" +} +item { + name: "82069" + id: 47 + display_name: "Phyciodes pulchella" +} +item { + name: "73149" + id: 48 + display_name: "Parkesia noveboracensis" +} +item { + name: "180379" + id: 49 + display_name: "Ardea herodias occidentalis" +} +item { + name: "73884" + id: 50 + display_name: "Pantherophis emoryi" +} +item { + name: "106653" + id: 51 + display_name: "Nehalennia irene" +} +item { + name: "73887" + id: 52 + display_name: "Pantherophis guttatus" +} +item { + name: "73888" + id: 53 + display_name: "Pantherophis obsoletus" +} +item { + name: "162" + id: 54 + display_name: "Porzana carolina" +} +item { + name: "245925" + id: 55 + display_name: "Siproeta stelenes biplagiata" +} +item { + name: "117302" + id: 56 + display_name: "Physalia physalis" +} +item { + name: "57516" + id: 57 + display_name: "Bombus terrestris" +} +item { + name: "204995" + id: 58 + display_name: "Anas platyrhynchos diazi" +} +item { + name: "49348" + id: 59 + display_name: "Hyles lineata" +} +item { + name: "82117" + id: 60 + display_name: "Dolomedes tenebrosus" +} +item { + name: "114891" + id: 61 + display_name: "Varanus salvator" +} +item { + name: "319695" + id: 62 + display_name: "Epilachna mexicana" +} +item { + name: "41168" + id: 63 + display_name: "Desmodus rotundus" +} +item { + name: "13688" + id: 64 + display_name: "Motacilla cinerea" +} +item { + name: "57556" + id: 65 + display_name: "Papio ursinus" +} +item { + name: "16598" + id: 66 + display_name: "Empidonax difficilis" +} +item { + name: "16602" + id: 67 + display_name: "Empidonax minimus" +} +item { + name: "16604" + id: 68 + display_name: "Empidonax fulvifrons" +} +item { + name: "409181" + id: 69 + display_name: "Trite planiceps" +} +item { + name: "82144" + id: 70 + display_name: "Hemileuca eglanterina" +} +item { + name: "16611" + id: 71 + display_name: "Empidonax traillii" +} +item { + name: "82153" + id: 72 + display_name: "Ceratomia undulosa" +} +item { + name: "82155" + id: 73 + display_name: "Bittacomorpha clavipes" +} +item { + name: "205036" + id: 74 + display_name: "Xanthorhoe lacustrata" +} +item { + name: "16624" + id: 75 + display_name: "Empidonax hammondii" +} +item { + name: "16625" + id: 76 + display_name: "Empidonax occidentalis" +} +item { + name: "243" + id: 77 + display_name: "Rallus limicola" +} +item { + name: "41" + id: 78 + display_name: "Grus grus" +} +item { + name: "49402" + id: 79 + display_name: "Abudefduf saxatilis" +} +item { + name: "58550" + id: 80 + display_name: "Callophrys niphon" +} +item { + name: "205055" + id: 81 + display_name: "Zopherus nodulosus haldemani" +} +item { + name: "82177" + id: 82 + display_name: "Hermetia illucens" +} +item { + name: "9601" + id: 83 + display_name: "Quiscalus major" +} +item { + name: "7101" + id: 84 + display_name: "Branta leucopsis" +} +item { + name: "8470" + id: 85 + display_name: "Cyanocorax yucatanicus" +} +item { + name: "74009" + id: 86 + display_name: "Zamenis longissimus" +} +item { + name: "8474" + id: 87 + display_name: "Cyanocorax yncas" +} +item { + name: "82204" + id: 88 + display_name: "Nadata gibbosa" +} +item { + name: "123168" + id: 89 + display_name: "Ensatina eschscholtzii xanthoptica" +} +item { + name: "82210" + id: 90 + display_name: "Heterocampa biundata" +} +item { + name: "48284" + id: 91 + display_name: "Oniscus asellus" +} +item { + name: "4146" + id: 92 + display_name: "Oceanites oceanicus" +} +item { + name: "82225" + id: 93 + display_name: "Lophocampa caryae" +} +item { + name: "9609" + id: 94 + display_name: "Quiscalus niger" +} +item { + name: "65849" + id: 95 + display_name: "Incilius nebulifer" +} +item { + name: "207583" + id: 96 + display_name: "Miomantis caffra" +} +item { + name: "491839" + id: 97 + display_name: "Pyrausta insequalis" +} +item { + name: "74048" + id: 98 + display_name: "Alces americanus" +} +item { + name: "57665" + id: 99 + display_name: "Cotinis mutabilis" +} +item { + name: "65860" + id: 100 + display_name: "Incilius valliceps" +} +item { + name: "52911" + id: 101 + display_name: "Dolichovespula maculata" +} +item { + name: "8524" + id: 102 + display_name: "Psilorhinus morio" +} +item { + name: "49491" + id: 103 + display_name: "Thalassoma bifasciatum" +} +item { + name: "41301" + id: 104 + display_name: "Tadarida brasiliensis" +} +item { + name: "57687" + id: 105 + display_name: "Xylocopa varipuncta" +} +item { + name: "57689" + id: 106 + display_name: "Bombus vosnesenskii" +} +item { + name: "57690" + id: 107 + display_name: "Bombus sonorus" +} +item { + name: "33118" + id: 108 + display_name: "Basiliscus vittatus" +} +item { + name: "205151" + id: 109 + display_name: "Phlogophora meticulosa" +} +item { + name: "49504" + id: 110 + display_name: "Callinectes sapidus" +} +item { + name: "16737" + id: 111 + display_name: "Megarynchus pitangua" +} +item { + name: "357" + id: 112 + display_name: "Gallinula tenebrosa" +} +item { + name: "82278" + id: 113 + display_name: "Ameiurus melas" +} +item { + name: "82279" + id: 114 + display_name: "Automeris io" +} +item { + name: "505478" + id: 115 + display_name: "Gallus gallus domesticus" +} +item { + name: "33135" + id: 116 + display_name: "Crotaphytus collaris" +} +item { + name: "41328" + id: 117 + display_name: "Lavia frons" +} +item { + name: "196979" + id: 118 + display_name: "Anaxyrus boreas halophilus" +} +item { + name: "44902" + id: 119 + display_name: "Sigmodon hispidus" +} +item { + name: "1428" + id: 120 + display_name: "Numida meleagris" +} +item { + name: "119153" + id: 121 + display_name: "Junco hyemalis caniceps" +} +item { + name: "49539" + id: 122 + display_name: "Pisaster brevispinus" +} +item { + name: "328068" + id: 123 + display_name: "Belocaulus angustipes" +} +item { + name: "120214" + id: 124 + display_name: "Clostera albosigma" +} +item { + name: "16779" + id: 125 + display_name: "Tyrannus vociferans" +} +item { + name: "16782" + id: 126 + display_name: "Tyrannus tyrannus" +} +item { + name: "16783" + id: 127 + display_name: "Tyrannus forficatus" +} +item { + name: "16784" + id: 128 + display_name: "Tyrannus crassirostris" +} +item { + name: "57745" + id: 129 + display_name: "Linckia laevigata" +} +item { + name: "205202" + id: 130 + display_name: "Ecliptopera silaceata" +} +item { + name: "205203" + id: 131 + display_name: "Dyspteris abortivaria" +} +item { + name: "16791" + id: 132 + display_name: "Tyrannus verticalis" +} +item { + name: "16793" + id: 133 + display_name: "Tyrannus savana" +} +item { + name: "205213" + id: 134 + display_name: "Caripeta divisata" +} +item { + name: "49566" + id: 135 + display_name: "Cicindela sexguttata" +} +item { + name: "491935" + id: 136 + display_name: "Thylacodes squamigerus" +} +item { + name: "205216" + id: 137 + display_name: "Cerma cerintha" +} +item { + name: "39665" + id: 138 + display_name: "Caretta caretta" +} +item { + name: "147881" + id: 139 + display_name: "Trichechus manatus latirostris" +} +item { + name: "28743" + id: 140 + display_name: "Salvadora hexalepis" +} +item { + name: "205231" + id: 141 + display_name: "Idaea dimidiata" +} +item { + name: "205233" + id: 142 + display_name: "Iridopsis larvaria" +} +item { + name: "205235" + id: 143 + display_name: "Leuconycta diphteroides" +} +item { + name: "436" + id: 144 + display_name: "Gallirallus australis" +} +item { + name: "205238" + id: 145 + display_name: "Metanema inatomaria" +} +item { + name: "49591" + id: 146 + display_name: "Lepomis macrochirus" +} +item { + name: "229817" + id: 147 + display_name: "Raphia frater" +} +item { + name: "49594" + id: 148 + display_name: "Pomoxis nigromaculatus" +} +item { + name: "65979" + id: 149 + display_name: "Lithobates catesbeianus" +} +item { + name: "49596" + id: 150 + display_name: "Salvelinus fontinalis" +} +item { + name: "65982" + id: 151 + display_name: "Lithobates clamitans" +} +item { + name: "8649" + id: 152 + display_name: "Calocitta formosa" +} +item { + name: "8650" + id: 153 + display_name: "Calocitta colliei" +} +item { + name: "82379" + id: 154 + display_name: "Hemaris thysbe" +} +item { + name: "49614" + id: 155 + display_name: "Lepomis gibbosus" +} +item { + name: "63028" + id: 156 + display_name: "Hypercompe scribonia" +} +item { + name: "39672" + id: 157 + display_name: "Eretmochelys imbricata" +} +item { + name: "66003" + id: 158 + display_name: "Lithobates pipiens" +} +item { + name: "197077" + id: 159 + display_name: "Vanessa kershawi" +} +item { + name: "473" + id: 160 + display_name: "Fulica americana" +} +item { + name: "147930" + id: 161 + display_name: "Rabidosa rabida" +} +item { + name: "147931" + id: 162 + display_name: "Panoquina ocola" +} +item { + name: "66012" + id: 163 + display_name: "Lithobates sylvaticus" +} +item { + name: "8671" + id: 164 + display_name: "Pachyramphus aglaiae" +} +item { + name: "41440" + id: 165 + display_name: "Phocoena phocoena" +} +item { + name: "27388" + id: 166 + display_name: "Carphophis amoenus" +} +item { + name: "82418" + id: 167 + display_name: "Cicindela punctulata" +} +item { + name: "25078" + id: 168 + display_name: "Gastrophryne carolinensis" +} +item { + name: "82425" + id: 169 + display_name: "Cicindela repanda" +} +item { + name: "143446" + id: 170 + display_name: "Paonias myops" +} +item { + name: "41478" + id: 171 + display_name: "Eschrichtius robustus" +} +item { + name: "5200" + id: 172 + display_name: "Buteo lagopus" +} +item { + name: "148908" + id: 173 + display_name: "Chrysodeixis includens" +} +item { + name: "41482" + id: 174 + display_name: "Tursiops truncatus" +} +item { + name: "6914" + id: 175 + display_name: "Cygnus atratus" +} +item { + name: "464301" + id: 176 + display_name: "Philesturnus rufusater" +} +item { + name: "129226" + id: 177 + display_name: "Chytolita morbidalis" +} +item { + name: "180759" + id: 178 + display_name: "Aphonopelma iodius" +} +item { + name: "135318" + id: 179 + display_name: "Apantesis phalerata" +} +item { + name: "49699" + id: 180 + display_name: "Pisaster ochraceus" +} +item { + name: "49700" + id: 181 + display_name: "Coluber lateralis lateralis" +} +item { + name: "61532" + id: 182 + display_name: "Propylea quatuordecimpunctata" +} +item { + name: "4368" + id: 183 + display_name: "Larus marinus" +} +item { + name: "41521" + id: 184 + display_name: "Orcinus orca" +} +item { + name: "49716" + id: 185 + display_name: "Paonias excaecata" +} +item { + name: "41526" + id: 186 + display_name: "Delphinus delphis" +} +item { + name: "49723" + id: 187 + display_name: "Pugettia producta" +} +item { + name: "16956" + id: 188 + display_name: "Pitangus sulphuratus" +} +item { + name: "210607" + id: 189 + display_name: "Diastictis fracturalis" +} +item { + name: "148030" + id: 190 + display_name: "Equus asinus" +} +item { + name: "6924" + id: 191 + display_name: "Anas rubripes" +} +item { + name: "30844" + id: 192 + display_name: "Bothriechis schlegelii" +} +item { + name: "123628" + id: 193 + display_name: "Argynnis paphia" +} +item { + name: "131676" + id: 194 + display_name: "Anthus novaeseelandiae novaeseelandiae" +} +item { + name: "41566" + id: 195 + display_name: "Megaptera novaeangliae" +} +item { + name: "49759" + id: 196 + display_name: "Pyrgus oileus" +} +item { + name: "49761" + id: 197 + display_name: "Anartia jatrophae" +} +item { + name: "49766" + id: 198 + display_name: "Heliconius charithonia" +} +item { + name: "33383" + id: 199 + display_name: "Coleonyx brevis" +} +item { + name: "33384" + id: 200 + display_name: "Coleonyx elegans" +} +item { + name: "312764" + id: 201 + display_name: "Euptoieta hegesia meridiania" +} +item { + name: "82538" + id: 202 + display_name: "Vanessa gonerilla" +} +item { + name: "33387" + id: 203 + display_name: "Coleonyx variegatus" +} +item { + name: "56082" + id: 204 + display_name: "Aeshna canadensis" +} +item { + name: "17008" + id: 205 + display_name: "Sayornis phoebe" +} +item { + name: "200808" + id: 206 + display_name: "Sceloporus graciosus vandenburgianus" +} +item { + name: "17013" + id: 207 + display_name: "Sayornis nigricans" +} +item { + name: "122381" + id: 208 + display_name: "Cupido comyntas" +} +item { + name: "123516" + id: 209 + display_name: "Mydas clavatus" +} +item { + name: "8834" + id: 210 + display_name: "Tityra semifasciata" +} +item { + name: "146199" + id: 211 + display_name: "Lampropeltis californiae" +} +item { + name: "17858" + id: 212 + display_name: "Dryocopus lineatus" +} +item { + name: "334616" + id: 213 + display_name: "Battus philenor hirsuta" +} +item { + name: "82582" + id: 214 + display_name: "Labidomera clivicollis" +} +item { + name: "204699" + id: 215 + display_name: "Pseudothyatira cymatophoroides" +} +item { + name: "41638" + id: 216 + display_name: "Ursus americanus" +} +item { + name: "27420" + id: 217 + display_name: "Desmognathus fuscus" +} +item { + name: "81584" + id: 218 + display_name: "Anisota virginiensis" +} +item { + name: "49848" + id: 219 + display_name: "Navanax inermis" +} +item { + name: "143476" + id: 220 + display_name: "Calledapteryx dryopterata" +} +item { + name: "41663" + id: 221 + display_name: "Procyon lotor" +} +item { + name: "49857" + id: 222 + display_name: "Aplysia vaccaria" +} +item { + name: "41673" + id: 223 + display_name: "Nasua narica" +} +item { + name: "41676" + id: 224 + display_name: "Bassariscus astutus" +} +item { + name: "27427" + id: 225 + display_name: "Aneides lugubris" +} +item { + name: "418530" + id: 226 + display_name: "Porphyrio melanotus" +} +item { + name: "311419" + id: 227 + display_name: "Neobernaya spadicea" +} +item { + name: "113502" + id: 228 + display_name: "Sympetrum costiferum" +} +item { + name: "66278" + id: 229 + display_name: "Oophaga pumilio" +} +item { + name: "6951" + id: 230 + display_name: "Anas bahamensis" +} +item { + name: "213740" + id: 231 + display_name: "Antaeotricha schlaegeri" +} +item { + name: "143485" + id: 232 + display_name: "Xanthorhoe ferrugata" +} +item { + name: "120275" + id: 233 + display_name: "Euphyia intermediata" +} +item { + name: "48035" + id: 234 + display_name: "Strongylocentrotus purpuratus" +} +item { + name: "41728" + id: 235 + display_name: "Mirounga angustirostris" +} +item { + name: "41733" + id: 236 + display_name: "Halichoerus grypus" +} +item { + name: "41740" + id: 237 + display_name: "Zalophus californianus" +} +item { + name: "118914" + id: 238 + display_name: "Echinargus isola" +} +item { + name: "4936" + id: 239 + display_name: "Egretta novaehollandiae" +} +item { + name: "131862" + id: 240 + display_name: "Typocerus velutinus" +} +item { + name: "55401" + id: 241 + display_name: "Pieris brassicae" +} +item { + name: "41752" + id: 242 + display_name: "Arctocephalus forsteri" +} +item { + name: "41755" + id: 243 + display_name: "Eumetopias jubatus" +} +item { + name: "123676" + id: 244 + display_name: "Anas crecca carolinensis" +} +item { + name: "41763" + id: 245 + display_name: "Phocarctos hookeri" +} +item { + name: "181034" + id: 246 + display_name: "Cervus elaphus canadensis" +} +item { + name: "49964" + id: 247 + display_name: "Ginglymostoma cirratum" +} +item { + name: "213809" + id: 248 + display_name: "Anticarsia gemmatalis" +} +item { + name: "49972" + id: 249 + display_name: "Battus philenor" +} +item { + name: "205623" + id: 250 + display_name: "Microstylum morosum" +} +item { + name: "336697" + id: 251 + display_name: "Arctia villica" +} +item { + name: "41789" + id: 252 + display_name: "Taxidea taxus" +} +item { + name: "48724" + id: 253 + display_name: "Phidiana hiltoni" +} +item { + name: "123713" + id: 254 + display_name: "Neoscona oaxacensis" +} +item { + name: "33602" + id: 255 + display_name: "Tarentola mauritanica" +} +item { + name: "846" + id: 256 + display_name: "Alectoris chukar" +} +item { + name: "41808" + id: 257 + display_name: "Mustela erminea" +} +item { + name: "50001" + id: 258 + display_name: "Terrapene carolina carolina" +} +item { + name: "41810" + id: 259 + display_name: "Mustela frenata" +} +item { + name: "82774" + id: 260 + display_name: "Oryctes nasicornis" +} +item { + name: "41815" + id: 261 + display_name: "Mustela nivalis" +} +item { + name: "4239" + id: 262 + display_name: "Tachybaptus dominicus" +} +item { + name: "344926" + id: 263 + display_name: "Artemisiospiza belli" +} +item { + name: "82792" + id: 264 + display_name: "Celastrina neglecta" +} +item { + name: "41841" + id: 265 + display_name: "Meles meles" +} +item { + name: "882" + id: 266 + display_name: "Gallus gallus" +} +item { + name: "125758" + id: 267 + display_name: "Mercenaria mercenaria" +} +item { + name: "9081" + id: 268 + display_name: "Cardinalis sinuatus" +} +item { + name: "9083" + id: 269 + display_name: "Cardinalis cardinalis" +} +item { + name: "9092" + id: 270 + display_name: "Melospiza lincolnii" +} +item { + name: "4246" + id: 271 + display_name: "Podilymbus podiceps" +} +item { + name: "9096" + id: 272 + display_name: "Melospiza georgiana" +} +item { + name: "906" + id: 273 + display_name: "Meleagris gallopavo" +} +item { + name: "50059" + id: 274 + display_name: "Limacia cockerelli" +} +item { + name: "394124" + id: 275 + display_name: "Orthodera novaezealandiae" +} +item { + name: "82832" + id: 276 + display_name: "Cosmopepla lintneriana" +} +item { + name: "913" + id: 277 + display_name: "Meleagris ocellata" +} +item { + name: "41877" + id: 278 + display_name: "Conepatus leuconotus" +} +item { + name: "196419" + id: 279 + display_name: "Euborellia annulipes" +} +item { + name: "50071" + id: 280 + display_name: "Erynnis horatius" +} +item { + name: "41880" + id: 281 + display_name: "Mephitis mephitis" +} +item { + name: "50073" + id: 282 + display_name: "Dryas iulia" +} +item { + name: "173793" + id: 283 + display_name: "Diphthera festiva" +} +item { + name: "41886" + id: 284 + display_name: "Crocuta crocuta" +} +item { + name: "30683" + id: 285 + display_name: "Agkistrodon contortrix contortrix" +} +item { + name: "931" + id: 286 + display_name: "Lagopus lagopus" +} +item { + name: "41901" + id: 287 + display_name: "Herpestes javanicus" +} +item { + name: "143517" + id: 288 + display_name: "Biston betularia" +} +item { + name: "9139" + id: 289 + display_name: "Spizella atrogularis" +} +item { + name: "8350" + id: 290 + display_name: "Pyrrhocorax graculus" +} +item { + name: "9144" + id: 291 + display_name: "Spizella breweri" +} +item { + name: "12936" + id: 292 + display_name: "Sialia currucoides" +} +item { + name: "9152" + id: 293 + display_name: "Spizella pusilla" +} +item { + name: "68229" + id: 294 + display_name: "Tramea carolina" +} +item { + name: "6987" + id: 295 + display_name: "Anas superciliosa" +} +item { + name: "9156" + id: 296 + display_name: "Passerella iliaca" +} +item { + name: "202315" + id: 297 + display_name: "Romaleon antennarium" +} +item { + name: "4257" + id: 298 + display_name: "Phoenicopterus ruber" +} +item { + name: "25545" + id: 299 + display_name: "Rana aurora" +} +item { + name: "15282" + id: 300 + display_name: "Sylvia atricapilla" +} +item { + name: "103927" + id: 301 + display_name: "Ladona deplanata" +} +item { + name: "17356" + id: 302 + display_name: "Vireo bellii" +} +item { + name: "26765" + id: 303 + display_name: "Ambystoma mavortium" +} +item { + name: "205777" + id: 304 + display_name: "Plectrodera scalator" +} +item { + name: "17362" + id: 305 + display_name: "Vireo plumbeus" +} +item { + name: "99283" + id: 306 + display_name: "Didymops transversa" +} +item { + name: "17364" + id: 307 + display_name: "Vireo philadelphicus" +} +item { + name: "17365" + id: 308 + display_name: "Vireo flavifrons" +} +item { + name: "17366" + id: 309 + display_name: "Vireo olivaceus" +} +item { + name: "9182" + id: 310 + display_name: "Zonotrichia querula" +} +item { + name: "17375" + id: 311 + display_name: "Vireo huttoni" +} +item { + name: "9184" + id: 312 + display_name: "Zonotrichia albicollis" +} +item { + name: "9185" + id: 313 + display_name: "Zonotrichia atricapilla" +} +item { + name: "50147" + id: 314 + display_name: "Celithemis eponina" +} +item { + name: "47585" + id: 315 + display_name: "Crassostrea virginica" +} +item { + name: "9195" + id: 316 + display_name: "Emberiza citrinella" +} +item { + name: "41964" + id: 317 + display_name: "Panthera leo" +} +item { + name: "6994" + id: 318 + display_name: "Bucephala islandica" +} +item { + name: "52506" + id: 319 + display_name: "Adalia bipunctata" +} +item { + name: "9201" + id: 320 + display_name: "Emberiza schoeniclus" +} +item { + name: "17394" + id: 321 + display_name: "Vireo gilvus" +} +item { + name: "25591" + id: 322 + display_name: "Rana temporaria" +} +item { + name: "41976" + id: 323 + display_name: "Lynx rufus" +} +item { + name: "214015" + id: 324 + display_name: "Apoda y-inversum" +} +item { + name: "50176" + id: 325 + display_name: "Enallagma vesperum" +} +item { + name: "99331" + id: 326 + display_name: "Diplacodes trivialis" +} +item { + name: "50181" + id: 327 + display_name: "Loxosceles reclusa" +} +item { + name: "74758" + id: 328 + display_name: "Neovison vison" +} +item { + name: "123912" + id: 329 + display_name: "Charaxes jasius" +} +item { + name: "41997" + id: 330 + display_name: "Leopardus pardalis" +} +item { + name: "123920" + id: 331 + display_name: "Dorcus parallelipipedus" +} +item { + name: "132334" + id: 332 + display_name: "Urbanus procne" +} +item { + name: "123922" + id: 333 + display_name: "Abudefduf sordidus" +} +item { + name: "9236" + id: 334 + display_name: "Serinus serinus" +} +item { + name: "42007" + id: 335 + display_name: "Puma concolor" +} +item { + name: "9240" + id: 336 + display_name: "Serinus mozambicus" +} +item { + name: "148506" + id: 337 + display_name: "Melanis pixe" +} +item { + name: "58399" + id: 338 + display_name: "Urosalpinx cinerea" +} +item { + name: "312353" + id: 339 + display_name: "Leptophobia aripa elodia" +} +item { + name: "148517" + id: 340 + display_name: "Heliopetes laviana" +} +item { + name: "73905" + id: 341 + display_name: "Phrynosoma cornutum" +} +item { + name: "39772" + id: 342 + display_name: "Chrysemys picta marginata" +} +item { + name: "25646" + id: 343 + display_name: "Rana boylii" +} +item { + name: "62984" + id: 344 + display_name: "Aedes albopictus" +} +item { + name: "123959" + id: 345 + display_name: "Ensatina eschscholtzii oregonensis" +} +item { + name: "1081" + id: 346 + display_name: "Lophura leucomelanos" +} +item { + name: "39775" + id: 347 + display_name: "Chrysemys picta picta" +} +item { + name: "42046" + id: 348 + display_name: "Canis mesomelas" +} +item { + name: "42048" + id: 349 + display_name: "Canis lupus" +} +item { + name: "42051" + id: 350 + display_name: "Canis latrans" +} +item { + name: "9284" + id: 351 + display_name: "Euphonia elegantissima" +} +item { + name: "25669" + id: 352 + display_name: "Rana dalmatina" +} +item { + name: "9287" + id: 353 + display_name: "Euphonia hirundinacea" +} +item { + name: "9291" + id: 354 + display_name: "Euphonia affinis" +} +item { + name: "222284" + id: 355 + display_name: "Iridopsis defectaria" +} +item { + name: "74832" + id: 356 + display_name: "Papio anubis" +} +item { + name: "148563" + id: 357 + display_name: "Myscelia ethusa" +} +item { + name: "42069" + id: 358 + display_name: "Vulpes vulpes" +} +item { + name: "9743" + id: 359 + display_name: "Agelaius tricolor" +} +item { + name: "42076" + id: 360 + display_name: "Urocyon cinereoargenteus" +} +item { + name: "509025" + id: 361 + display_name: "Momotus lessonii" +} +item { + name: "17506" + id: 362 + display_name: "Zosterops japonicus" +} +item { + name: "4283" + id: 363 + display_name: "Phalacrocorax pelagicus" +} +item { + name: "58469" + id: 364 + display_name: "Thorybes pylades" +} +item { + name: "9319" + id: 365 + display_name: "Icterus cucullatus" +} +item { + name: "58473" + id: 366 + display_name: "Erynnis icelus" +} +item { + name: "58475" + id: 367 + display_name: "Erynnis juvenalis" +} +item { + name: "42093" + id: 368 + display_name: "Lycaon pictus" +} +item { + name: "58478" + id: 369 + display_name: "Erynnis baptisiae" +} +item { + name: "9328" + id: 370 + display_name: "Icterus graduacauda" +} +item { + name: "58481" + id: 371 + display_name: "Ancyloxypha numitor" +} +item { + name: "132210" + id: 372 + display_name: "Deloyala guttata" +} +item { + name: "58484" + id: 373 + display_name: "Thymelicus lineola" +} +item { + name: "13701" + id: 374 + display_name: "Motacilla aguimp" +} +item { + name: "410743" + id: 375 + display_name: "Anas superciliosa \303\227 platyrhynchos" +} +item { + name: "9336" + id: 376 + display_name: "Icterus pustulatus" +} +item { + name: "9339" + id: 377 + display_name: "Icterus gularis" +} +item { + name: "124031" + id: 378 + display_name: "Agrius convolvuli" +} +item { + name: "42113" + id: 379 + display_name: "Pecari tajacu" +} +item { + name: "132227" + id: 380 + display_name: "Lethe appalachia" +} +item { + name: "113516" + id: 381 + display_name: "Sympetrum madidum" +} +item { + name: "58509" + id: 382 + display_name: "Anatrytone logan" +} +item { + name: "83086" + id: 383 + display_name: "Eurytides marcellus" +} +item { + name: "58511" + id: 384 + display_name: "Poanes viator" +} +item { + name: "83090" + id: 385 + display_name: "Epimecis hortaria" +} +item { + name: "115859" + id: 386 + display_name: "Micrurus tener tener" +} +item { + name: "129902" + id: 387 + display_name: "Camponotus pennsylvanicus" +} +item { + name: "42134" + id: 388 + display_name: "Sus scrofa" +} +item { + name: "58519" + id: 389 + display_name: "Pompeius verna" +} +item { + name: "205977" + id: 390 + display_name: "Coccinella undecimpunctata" +} +item { + name: "58523" + id: 391 + display_name: "Papilio polyxenes" +} +item { + name: "58525" + id: 392 + display_name: "Papilio troilus" +} +item { + name: "410783" + id: 393 + display_name: "Hypoblemum albovittatum" +} +item { + name: "9376" + id: 394 + display_name: "Carduelis cannabina" +} +item { + name: "58531" + id: 395 + display_name: "Colias philodice" +} +item { + name: "50340" + id: 396 + display_name: "Hylephila phyleus" +} +item { + name: "42149" + id: 397 + display_name: "Hippopotamus amphibius" +} +item { + name: "50342" + id: 398 + display_name: "Erythrodiplax umbrata" +} +item { + name: "12883" + id: 399 + display_name: "Catharus minimus" +} +item { + name: "28557" + id: 400 + display_name: "Storeria occipitomaculata" +} +item { + name: "199" + id: 401 + display_name: "Amaurornis phoenicurus" +} +item { + name: "58541" + id: 402 + display_name: "Satyrium liparops" +} +item { + name: "58543" + id: 403 + display_name: "Callophrys augustinus" +} +item { + name: "42161" + id: 404 + display_name: "Dama dama" +} +item { + name: "61508" + id: 405 + display_name: "Ischnura elegans" +} +item { + name: "1204" + id: 406 + display_name: "Pavo cristatus" +} +item { + name: "42166" + id: 407 + display_name: "Axis axis" +} +item { + name: "146797" + id: 408 + display_name: "Platynota idaeusalis" +} +item { + name: "58556" + id: 409 + display_name: "Celastrina ladon" +} +item { + name: "367477" + id: 410 + display_name: "Rallus crepitans" +} +item { + name: "58561" + id: 411 + display_name: "Libytheana carinenta" +} +item { + name: "58563" + id: 412 + display_name: "Speyeria aphrodite" +} +item { + name: "58564" + id: 413 + display_name: "Boloria bellona" +} +item { + name: "413489" + id: 414 + display_name: "Nestor meridionalis septentrionalis" +} +item { + name: "42184" + id: 415 + display_name: "Capreolus capreolus" +} +item { + name: "9419" + id: 416 + display_name: "Pipilo chlorurus" +} +item { + name: "9420" + id: 417 + display_name: "Pipilo maculatus" +} +item { + name: "9424" + id: 418 + display_name: "Pipilo erythrophthalmus" +} +item { + name: "99539" + id: 419 + display_name: "Dorocordulia libera" +} +item { + name: "58580" + id: 420 + display_name: "Polygonia progne" +} +item { + name: "58581" + id: 421 + display_name: "Nymphalis vaualbum" +} +item { + name: "42199" + id: 422 + display_name: "Rangifer tarandus" +} +item { + name: "58586" + id: 423 + display_name: "Limenitis archippus" +} +item { + name: "58587" + id: 424 + display_name: "Asterocampa clyton" +} +item { + name: "42206" + id: 425 + display_name: "Cervus elaphus" +} +item { + name: "312543" + id: 426 + display_name: "Anartia jatrophae luteipicta" +} +item { + name: "204094" + id: 427 + display_name: "Cairina moschata domestica" +} +item { + name: "4304" + id: 428 + display_name: "Phalacrocorax varius" +} +item { + name: "42210" + id: 429 + display_name: "Cervus nippon" +} +item { + name: "17638" + id: 430 + display_name: "Picoides dorsalis" +} +item { + name: "132330" + id: 431 + display_name: "Chlosyne janais" +} +item { + name: "58603" + id: 432 + display_name: "Megisto cymela" +} +item { + name: "42220" + id: 433 + display_name: "Odocoileus hemionus" +} +item { + name: "17645" + id: 434 + display_name: "Picoides nuttallii" +} +item { + name: "58606" + id: 435 + display_name: "Cercyonis pegala" +} +item { + name: "42223" + id: 436 + display_name: "Odocoileus virginianus" +} +item { + name: "58609" + id: 437 + display_name: "Lepisosteus osseus" +} +item { + name: "17650" + id: 438 + display_name: "Picoides scalaris" +} +item { + name: "132339" + id: 439 + display_name: "Anthanassa texana" +} +item { + name: "58612" + id: 440 + display_name: "Carassius auratus" +} +item { + name: "1406" + id: 441 + display_name: "Callipepla gambelii" +} +item { + name: "9462" + id: 442 + display_name: "Pyrrhula pyrrhula" +} +item { + name: "4308" + id: 443 + display_name: "Phalacrocorax brasilianus" +} +item { + name: "17660" + id: 444 + display_name: "Picoides pubescens" +} +item { + name: "1280" + id: 445 + display_name: "Colinus virginianus" +} +item { + name: "129920" + id: 446 + display_name: "Calliostoma ligatum" +} +item { + name: "58627" + id: 447 + display_name: "Perca flavescens" +} +item { + name: "148742" + id: 448 + display_name: "Hamadryas februa" +} +item { + name: "39809" + id: 449 + display_name: "Terrapene ornata ornata" +} +item { + name: "115979" + id: 450 + display_name: "Plestiodon skiltonianus skiltonianus" +} +item { + name: "9484" + id: 451 + display_name: "Sporophila torqueola" +} +item { + name: "17678" + id: 452 + display_name: "Picoides villosus" +} +item { + name: "3862" + id: 453 + display_name: "Calidris pusilla" +} +item { + name: "70421" + id: 454 + display_name: "Acris blanchardi" +} +item { + name: "124183" + id: 455 + display_name: "Phlogophora periculosa" +} +item { + name: "124184" + id: 456 + display_name: "Plodia interpunctella" +} +item { + name: "99609" + id: 457 + display_name: "Dromogomphus spinosus" +} +item { + name: "99610" + id: 458 + display_name: "Dromogomphus spoliatus" +} +item { + name: "17694" + id: 459 + display_name: "Picoides arcticus" +} +item { + name: "113521" + id: 460 + display_name: "Sympetrum pallipes" +} +item { + name: "320801" + id: 461 + display_name: "Aspidoscelis tesselata" +} +item { + name: "7047" + id: 462 + display_name: "Aythya marila" +} +item { + name: "4317" + id: 463 + display_name: "Phaethon aethereus" +} +item { + name: "81606" + id: 464 + display_name: "Littorina littorea" +} +item { + name: "99891" + id: 465 + display_name: "Enallagma aspersum" +} +item { + name: "9528" + id: 466 + display_name: "Sturnella magna" +} +item { + name: "99641" + id: 467 + display_name: "Dythemis fugax" +} +item { + name: "99644" + id: 468 + display_name: "Dythemis nigrescens" +} +item { + name: "39818" + id: 469 + display_name: "Terrapene carolina triunguis" +} +item { + name: "99647" + id: 470 + display_name: "Dythemis velox" +} +item { + name: "148800" + id: 471 + display_name: "Chioides albofasciatus" +} +item { + name: "19339" + id: 472 + display_name: "Melopsittacus undulatus" +} +item { + name: "47509" + id: 473 + display_name: "Diaulula sandiegensis" +} +item { + name: "148810" + id: 474 + display_name: "Anaea aidea" +} +item { + name: "123070" + id: 475 + display_name: "Capra hircus" +} +item { + name: "7054" + id: 476 + display_name: "Aythya affinis" +} +item { + name: "99897" + id: 477 + display_name: "Enallagma civile" +} +item { + name: "42328" + id: 478 + display_name: "Kobus ellipsiprymnus" +} +item { + name: "48328" + id: 479 + display_name: "Aurelia aurita" +} +item { + name: "132445" + id: 480 + display_name: "Conchylodes ovulalis" +} +item { + name: "215271" + id: 481 + display_name: "Bleptina caradrinalis" +} +item { + name: "83297" + id: 482 + display_name: "Scarus rubroviolaceus" +} +item { + name: "42347" + id: 483 + display_name: "Rupicapra rupicapra" +} +item { + name: "7058" + id: 484 + display_name: "Aythya novaeseelandiae" +} +item { + name: "52457" + id: 485 + display_name: "Chaetodon auriga" +} +item { + name: "1392" + id: 486 + display_name: "Cyrtonyx montezumae" +} +item { + name: "4328" + id: 487 + display_name: "Pelecanus occidentalis" +} +item { + name: "7647" + id: 488 + display_name: "Cinclus cinclus" +} +item { + name: "148856" + id: 489 + display_name: "Anteos clorinde" +} +item { + name: "7060" + id: 490 + display_name: "Chen rossii" +} +item { + name: "58750" + id: 491 + display_name: "Nomophila nearctica" +} +item { + name: "1409" + id: 492 + display_name: "Callipepla californica" +} +item { + name: "9602" + id: 493 + display_name: "Quiscalus quiscula" +} +item { + name: "296326" + id: 494 + display_name: "Oncopeltus sexmaculatus" +} +item { + name: "9607" + id: 495 + display_name: "Quiscalus mexicanus" +} +item { + name: "319724" + id: 496 + display_name: "Euphoria kernii" +} +item { + name: "1419" + id: 497 + display_name: "Callipepla squamata" +} +item { + name: "148883" + id: 498 + display_name: "Eantis tamenund" +} +item { + name: "42391" + id: 499 + display_name: "Ovis canadensis" +} +item { + name: "107937" + id: 500 + display_name: "Orthemis discolor" +} +item { + name: "42405" + id: 501 + display_name: "Syncerus caffer" +} +item { + name: "42408" + id: 502 + display_name: "Bison bison" +} +item { + name: "116137" + id: 503 + display_name: "Sceloporus cowlesi" +} +item { + name: "326296" + id: 504 + display_name: "Bufo bufo" +} +item { + name: "148907" + id: 505 + display_name: "Cydia latiferreana" +} +item { + name: "42414" + id: 506 + display_name: "Oreamnos americanus" +} +item { + name: "116143" + id: 507 + display_name: "Sceloporus tristichus" +} +item { + name: "99912" + id: 508 + display_name: "Enallagma geminatum" +} +item { + name: "226889" + id: 509 + display_name: "Pangrapta decoralis" +} +item { + name: "42429" + id: 510 + display_name: "Antilocapra americana" +} +item { + name: "17855" + id: 511 + display_name: "Dryocopus pileatus" +} +item { + name: "107974" + id: 512 + display_name: "Orthetrum sabina" +} +item { + name: "56225" + id: 513 + display_name: "Polygonia c-album" +} +item { + name: "67016" + id: 514 + display_name: "Rana draytonii" +} +item { + name: "132553" + id: 515 + display_name: "Strymon istapa" +} +item { + name: "73155" + id: 516 + display_name: "Passerina caerulea" +} +item { + name: "26074" + id: 517 + display_name: "Crocodylus moreletii" +} +item { + name: "171903" + id: 518 + display_name: "Oligyra orbiculata" +} +item { + name: "26085" + id: 519 + display_name: "Crocodylus acutus" +} +item { + name: "143613" + id: 520 + display_name: "Homophoberia apicosa" +} +item { + name: "5715" + id: 521 + display_name: "Amazilia beryllina" +} +item { + name: "9721" + id: 522 + display_name: "Geothlypis trichas" +} +item { + name: "154446" + id: 523 + display_name: "Lambdina fiscellaria" +} +item { + name: "236841" + id: 524 + display_name: "Lichanura orcutti" +} +item { + name: "20737" + id: 525 + display_name: "Trogon melanocephalus" +} +item { + name: "124431" + id: 526 + display_name: "Cycloneda sanguinea" +} +item { + name: "124432" + id: 527 + display_name: "Deroceras reticulatum" +} +item { + name: "39566" + id: 528 + display_name: "Apalone ferox" +} +item { + name: "149017" + id: 529 + display_name: "Chlorochlamys chloroleucaria" +} +item { + name: "15281" + id: 530 + display_name: "Sylvia communis" +} +item { + name: "312873" + id: 531 + display_name: "Anartia fatima fatima" +} +item { + name: "9771" + id: 532 + display_name: "Pinicola enucleator" +} +item { + name: "39858" + id: 533 + display_name: "Graptemys geographica" +} +item { + name: "26159" + id: 534 + display_name: "Alligator mississippiensis" +} +item { + name: "304690" + id: 535 + display_name: "Naupactus cervinus" +} +item { + name: "124467" + id: 536 + display_name: "Pseudosphinx tetrio" +} +item { + name: "99892" + id: 537 + display_name: "Enallagma basidens" +} +item { + name: "99895" + id: 538 + display_name: "Enallagma carunculatum" +} +item { + name: "67129" + id: 539 + display_name: "Rhinella marina" +} +item { + name: "83515" + id: 540 + display_name: "Oxybelis aeneus" +} +item { + name: "81681" + id: 541 + display_name: "Campaea perlata" +} +item { + name: "99901" + id: 542 + display_name: "Enallagma cyathigerum" +} +item { + name: "99911" + id: 543 + display_name: "Enallagma exsulans" +} +item { + name: "9800" + id: 544 + display_name: "Coccothraustes vespertinus" +} +item { + name: "9801" + id: 545 + display_name: "Coccothraustes coccothraustes" +} +item { + name: "154551" + id: 546 + display_name: "Leptoglossus zonatus" +} +item { + name: "9807" + id: 547 + display_name: "Vermivora chrysoptera" +} +item { + name: "61157" + id: 548 + display_name: "Trichodes ornatus" +} +item { + name: "99924" + id: 549 + display_name: "Enallagma signatum" +} +item { + name: "1626" + id: 550 + display_name: "Opisthocomus hoazin" +} +item { + name: "132704" + id: 551 + display_name: "Setophaga coronata coronata" +} +item { + name: "119056" + id: 552 + display_name: "Centruroides vittatus" +} +item { + name: "50786" + id: 553 + display_name: "Vanessa annabella" +} +item { + name: "60347" + id: 554 + display_name: "Pituophis catenifer sayi" +} +item { + name: "9833" + id: 555 + display_name: "Diglossa baritula" +} +item { + name: "132718" + id: 556 + display_name: "Scathophaga stercoraria" +} +item { + name: "132719" + id: 557 + display_name: "Calopteron reticulatum" +} +item { + name: "116340" + id: 558 + display_name: "Dreissena polymorpha" +} +item { + name: "134078" + id: 559 + display_name: "Scoliopteryx libatrix" +} +item { + name: "9850" + id: 560 + display_name: "Saltator coerulescens" +} +item { + name: "117695" + id: 561 + display_name: "Cucumaria miniata" +} +item { + name: "9854" + id: 562 + display_name: "Saltator atriceps" +} +item { + name: "132736" + id: 563 + display_name: "Urola nivalis" +} +item { + name: "34435" + id: 564 + display_name: "Hemidactylus turcicus" +} +item { + name: "9864" + id: 565 + display_name: "Sicalis flaveola" +} +item { + name: "7106" + id: 566 + display_name: "Aix galericulata" +} +item { + name: "485010" + id: 567 + display_name: "Chinavia hilaris" +} +item { + name: "132764" + id: 568 + display_name: "Junco hyemalis hyemalis" +} +item { + name: "367558" + id: 569 + display_name: "Eupsittula canicularis" +} +item { + name: "370351" + id: 570 + display_name: "Microcarbo melanoleucos" +} +item { + name: "50867" + id: 571 + display_name: "Argiope bruennichi" +} +item { + name: "67252" + id: 572 + display_name: "Trachycephalus typhonius" +} +item { + name: "132789" + id: 573 + display_name: "Clepsis peritana" +} +item { + name: "9915" + id: 574 + display_name: "Piranga rubra" +} +item { + name: "50880" + id: 575 + display_name: "Limenitis lorquini" +} +item { + name: "9921" + id: 576 + display_name: "Piranga olivacea" +} +item { + name: "100034" + id: 577 + display_name: "Epiaeschna heros" +} +item { + name: "9924" + id: 578 + display_name: "Piranga flava" +} +item { + name: "42339" + id: 579 + display_name: "Tragelaphus strepsiceros" +} +item { + name: "50892" + id: 580 + display_name: "Euphydryas chalcedona" +} +item { + name: "130348" + id: 581 + display_name: "Dione moneta" +} +item { + name: "394966" + id: 582 + display_name: "Phaulacridium marginale" +} +item { + name: "9943" + id: 583 + display_name: "Amphispiza bilineata" +} +item { + name: "4388" + id: 584 + display_name: "Larus dominicanus" +} +item { + name: "1758" + id: 585 + display_name: "Piaya cayana" +} +item { + name: "50913" + id: 586 + display_name: "Hyalophora euryalus" +} +item { + name: "9958" + id: 587 + display_name: "Aimophila ruficeps" +} +item { + name: "59115" + id: 588 + display_name: "Gambusia affinis" +} +item { + name: "64346" + id: 589 + display_name: "Natrix tessellata" +} +item { + name: "59119" + id: 590 + display_name: "Pontia protodice" +} +item { + name: "18160" + id: 591 + display_name: "Melanerpes lewis" +} +item { + name: "18161" + id: 592 + display_name: "Melanerpes uropygialis" +} +item { + name: "50931" + id: 593 + display_name: "Strymon melinus" +} +item { + name: "59124" + id: 594 + display_name: "Anthocharis sara" +} +item { + name: "59127" + id: 595 + display_name: "Lycaena helloides" +} +item { + name: "59128" + id: 596 + display_name: "Atlides halesus" +} +item { + name: "67324" + id: 597 + display_name: "Eurema daira" +} +item { + name: "9981" + id: 598 + display_name: "Passerculus sandwichensis" +} +item { + name: "59134" + id: 599 + display_name: "Satyrium sylvinus" +} +item { + name: "67327" + id: 600 + display_name: "Schistocerca obscura" +} +item { + name: "67328" + id: 601 + display_name: "Pholcus phalangioides" +} +item { + name: "59138" + id: 602 + display_name: "Satyrium saepium" +} +item { + name: "132867" + id: 603 + display_name: "Microtia elva" +} +item { + name: "18181" + id: 604 + display_name: "Melanerpes pucherani" +} +item { + name: "7486" + id: 605 + display_name: "Salpinctes obsoletus" +} +item { + name: "108303" + id: 606 + display_name: "Paltothemis lineatipes" +} +item { + name: "59152" + id: 607 + display_name: "Leptotes marina" +} +item { + name: "132881" + id: 608 + display_name: "Catocala ultronia" +} +item { + name: "143662" + id: 609 + display_name: "Orthosoma brunneum" +} +item { + name: "59164" + id: 610 + display_name: "Plebejus icarioides" +} +item { + name: "18205" + id: 611 + display_name: "Melanerpes carolinus" +} +item { + name: "18206" + id: 612 + display_name: "Melanerpes chrysogenys" +} +item { + name: "83744" + id: 613 + display_name: "Amblyomma americanum" +} +item { + name: "18209" + id: 614 + display_name: "Melanerpes formicivorus" +} +item { + name: "116517" + id: 615 + display_name: "Caiman crocodilus" +} +item { + name: "59176" + id: 616 + display_name: "Phyciodes mylitta" +} +item { + name: "59182" + id: 617 + display_name: "Euphydryas editha" +} +item { + name: "43997" + id: 618 + display_name: "Myocastor coypus" +} +item { + name: "59185" + id: 619 + display_name: "Coenonympha tullia" +} +item { + name: "59187" + id: 620 + display_name: "Erynnis propertius" +} +item { + name: "59188" + id: 621 + display_name: "Erynnis funeralis" +} +item { + name: "59189" + id: 622 + display_name: "Erynnis tristis" +} +item { + name: "59190" + id: 623 + display_name: "Heliopetes ericetorum" +} +item { + name: "34615" + id: 624 + display_name: "Gekko gecko" +} +item { + name: "42808" + id: 625 + display_name: "Trichosurus vulpecula" +} +item { + name: "59194" + id: 626 + display_name: "Ochlodes sylvanoides" +} +item { + name: "59195" + id: 627 + display_name: "Lerodea eufala" +} +item { + name: "18236" + id: 628 + display_name: "Colaptes auratus" +} +item { + name: "10045" + id: 629 + display_name: "Basileuterus rufifrons" +} +item { + name: "59202" + id: 630 + display_name: "Larus michahellis" +} +item { + name: "10053" + id: 631 + display_name: "Ramphocelus passerinii" +} +item { + name: "19975" + id: 632 + display_name: "Athene cunicularia" +} +item { + name: "82231" + id: 633 + display_name: "Periplaneta americana" +} +item { + name: "67409" + id: 634 + display_name: "Gobiesox maeandricus" +} +item { + name: "83795" + id: 635 + display_name: "Cipangopaludina chinensis" +} +item { + name: "59220" + id: 636 + display_name: "Branta hutchinsii" +} +item { + name: "10069" + id: 637 + display_name: "Fringilla montifringilla" +} +item { + name: "10070" + id: 638 + display_name: "Fringilla coelebs" +} +item { + name: "83802" + id: 639 + display_name: "Megacyllene robiniae" +} +item { + name: "83804" + id: 640 + display_name: "Dynastes tityus" +} +item { + name: "51039" + id: 641 + display_name: "Cepaea hortensis" +} +item { + name: "68062" + id: 642 + display_name: "Menemerus bivittatus" +} +item { + name: "47527" + id: 643 + display_name: "Ostracion meleagris" +} +item { + name: "67435" + id: 644 + display_name: "Urbanus proteus" +} +item { + name: "10094" + id: 645 + display_name: "Junco hyemalis" +} +item { + name: "67440" + id: 646 + display_name: "Utetheisa ornatrix" +} +item { + name: "100210" + id: 647 + display_name: "Epitheca canis" +} +item { + name: "1907" + id: 648 + display_name: "Cuculus canorus" +} +item { + name: "100215" + id: 649 + display_name: "Epitheca princeps" +} +item { + name: "27826" + id: 650 + display_name: "Taricha granulosa" +} +item { + name: "129147" + id: 651 + display_name: "Ammophila procera" +} +item { + name: "10111" + id: 652 + display_name: "Junco phaeonotus" +} +item { + name: "83844" + id: 653 + display_name: "Oxyopes salticus" +} +item { + name: "144107" + id: 654 + display_name: "Tetracis crocallata" +} +item { + name: "51097" + id: 655 + display_name: "Papilio zelicaon" +} +item { + name: "10138" + id: 656 + display_name: "Ammodramus nelsoni" +} +item { + name: "10139" + id: 657 + display_name: "Ammodramus savannarum" +} +item { + name: "10147" + id: 658 + display_name: "Ammodramus maritimus" +} +item { + name: "59300" + id: 659 + display_name: "Anagrapha falcifera" +} +item { + name: "51110" + id: 660 + display_name: "Xylocopa virginica" +} +item { + name: "1960" + id: 661 + display_name: "Coccyzus erythropthalmus" +} +item { + name: "42652" + id: 662 + display_name: "Didelphis virginiana" +} +item { + name: "428606" + id: 663 + display_name: "Heraclides rumiko" +} +item { + name: "127303" + id: 664 + display_name: "Callophrys henrici" +} +item { + name: "1964" + id: 665 + display_name: "Coccyzus minor" +} +item { + name: "1965" + id: 666 + display_name: "Coccyzus americanus" +} +item { + name: "8520" + id: 667 + display_name: "Nucifraga columbiana" +} +item { + name: "116658" + id: 668 + display_name: "Siphanta acuta" +} +item { + name: "1972" + id: 669 + display_name: "Crotophaga sulcirostris" +} +item { + name: "10168" + id: 670 + display_name: "Pooecetes gramineus" +} +item { + name: "53893" + id: 671 + display_name: "Chlosyne palla" +} +item { + name: "10173" + id: 672 + display_name: "Arremonops rufivirgatus" +} +item { + name: "1986" + id: 673 + display_name: "Geococcyx californianus" +} +item { + name: "1987" + id: 674 + display_name: "Geococcyx velox" +} +item { + name: "116680" + id: 675 + display_name: "Tabanus atratus" +} +item { + name: "116681" + id: 676 + display_name: "Atteva aurea" +} +item { + name: "124875" + id: 677 + display_name: "Spodoptera litura" +} +item { + name: "26575" + id: 678 + display_name: "Diadophis punctatus" +} +item { + name: "10199" + id: 679 + display_name: "Coereba flaveola" +} +item { + name: "26591" + id: 680 + display_name: "Diadophis punctatus edwardsii" +} +item { + name: "59360" + id: 681 + display_name: "Neverita duplicata" +} +item { + name: "68263" + id: 682 + display_name: "Papilio multicaudata" +} +item { + name: "26598" + id: 683 + display_name: "Diadophis punctatus amabilis" +} +item { + name: "42983" + id: 684 + display_name: "Phascolarctos cinereus" +} +item { + name: "67560" + id: 685 + display_name: "Adelpha californica" +} +item { + name: "10224" + id: 686 + display_name: "Passerina ciris" +} +item { + name: "2038" + id: 687 + display_name: "Alectura lathami" +} +item { + name: "10232" + id: 688 + display_name: "Passerina leclancherii" +} +item { + name: "10234" + id: 689 + display_name: "Passerina amoena" +} +item { + name: "10243" + id: 690 + display_name: "Icteria virens" +} +item { + name: "2052" + id: 691 + display_name: "Crax rubra" +} +item { + name: "94551" + id: 692 + display_name: "Argia immunda" +} +item { + name: "2062" + id: 693 + display_name: "Penelope purpurascens" +} +item { + name: "204490" + id: 694 + display_name: "Copsychus malabaricus" +} +item { + name: "10257" + id: 695 + display_name: "Paroaria capitata" +} +item { + name: "51221" + id: 696 + display_name: "Procambarus clarkii" +} +item { + name: "10262" + id: 697 + display_name: "Cyanerpes cyaneus" +} +item { + name: "508249" + id: 698 + display_name: "Microcarbo melanoleucos brevirostris" +} +item { + name: "18460" + id: 699 + display_name: "Sphyrapicus thyroideus" +} +item { + name: "10271" + id: 700 + display_name: "Pheucticus ludovicianus" +} +item { + name: "18464" + id: 701 + display_name: "Sphyrapicus ruber" +} +item { + name: "10274" + id: 702 + display_name: "Pheucticus melanocephalus" +} +item { + name: "18467" + id: 703 + display_name: "Sphyrapicus nuchalis" +} +item { + name: "100391" + id: 704 + display_name: "Erythrodiplax berenice" +} +item { + name: "2089" + id: 705 + display_name: "Ortalis poliocephala" +} +item { + name: "2090" + id: 706 + display_name: "Ortalis vetula" +} +item { + name: "8038" + id: 707 + display_name: "Corvus albus" +} +item { + name: "67629" + id: 708 + display_name: "Oligocottus maculosus" +} +item { + name: "10286" + id: 709 + display_name: "Mniotilta varia" +} +item { + name: "10288" + id: 710 + display_name: "Volatinia jacarina" +} +item { + name: "100403" + id: 711 + display_name: "Erythrodiplax minuscula" +} +item { + name: "84023" + id: 712 + display_name: "Amorpha juglandis" +} +item { + name: "84024" + id: 713 + display_name: "Galasa nigrinodis" +} +item { + name: "10297" + id: 714 + display_name: "Thraupis palmarum" +} +item { + name: "67642" + id: 715 + display_name: "Pantherophis spiloides" +} +item { + name: "67653" + id: 716 + display_name: "Phoebis agarithe" +} +item { + name: "84038" + id: 717 + display_name: "Haploa lecontei" +} +item { + name: "26695" + id: 718 + display_name: "Scaphiopus holbrookii" +} +item { + name: "84040" + id: 719 + display_name: "Chauliognathus marginatus" +} +item { + name: "51275" + id: 720 + display_name: "Pentatoma rufipes" +} +item { + name: "2124" + id: 721 + display_name: "Momotus mexicanus" +} +item { + name: "26702" + id: 722 + display_name: "Spea hammondii" +} +item { + name: "10325" + id: 723 + display_name: "Euphagus cyanocephalus" +} +item { + name: "43102" + id: 724 + display_name: "Sylvilagus palustris" +} +item { + name: "49509" + id: 725 + display_name: "Lutjanus griseus" +} +item { + name: "116834" + id: 726 + display_name: "Cacatua galerita" +} +item { + name: "127188" + id: 727 + display_name: "Junco hyemalis oreganus" +} +item { + name: "26725" + id: 728 + display_name: "Ambystoma jeffersonianum" +} +item { + name: "43111" + id: 729 + display_name: "Sylvilagus floridanus" +} +item { + name: "43112" + id: 730 + display_name: "Sylvilagus bachmani" +} +item { + name: "67691" + id: 731 + display_name: "Lophocampa maculata" +} +item { + name: "51311" + id: 732 + display_name: "Urbanus dorantes" +} +item { + name: "67700" + id: 733 + display_name: "Caracolus caracolla" +} +item { + name: "43128" + id: 734 + display_name: "Lepus europaeus" +} +item { + name: "26745" + id: 735 + display_name: "Ambystoma texanum" +} +item { + name: "67706" + id: 736 + display_name: "Argiope argentata" +} +item { + name: "26747" + id: 737 + display_name: "Ambystoma gracile" +} +item { + name: "67708" + id: 738 + display_name: "Argiope trifasciata" +} +item { + name: "26749" + id: 739 + display_name: "Ambystoma tigrinum" +} +item { + name: "4896" + id: 740 + display_name: "Pluvialis fulva" +} +item { + name: "10369" + id: 741 + display_name: "Molothrus aeneus" +} +item { + name: "26754" + id: 742 + display_name: "Ambystoma macrodactylum" +} +item { + name: "10373" + id: 743 + display_name: "Molothrus ater" +} +item { + name: "2185" + id: 744 + display_name: "Merops pusillus" +} +item { + name: "84109" + id: 745 + display_name: "Pisaurina mira" +} +item { + name: "67726" + id: 746 + display_name: "Aeshna palmata" +} +item { + name: "2191" + id: 747 + display_name: "Merops apiaster" +} +item { + name: "67731" + id: 748 + display_name: "Anax junius" +} +item { + name: "198804" + id: 749 + display_name: "Satyrium titus" +} +item { + name: "51349" + id: 750 + display_name: "Pyrgus communis" +} +item { + name: "18584" + id: 751 + display_name: "Pteroglossus torquatus" +} +item { + name: "67737" + id: 752 + display_name: "Rhionaeschna multicolor" +} +item { + name: "198812" + id: 753 + display_name: "Lethe anthedon" +} +item { + name: "321697" + id: 754 + display_name: "Melanchroia chephise" +} +item { + name: "198821" + id: 755 + display_name: "Pieris oleracea" +} +item { + name: "26790" + id: 756 + display_name: "Ambystoma maculatum" +} +item { + name: "10411" + id: 757 + display_name: "Loxia curvirostra" +} +item { + name: "133295" + id: 758 + display_name: "Melitaea didyma" +} +item { + name: "67760" + id: 759 + display_name: "Popillia japonica" +} +item { + name: "43188" + id: 760 + display_name: "Ochotona princeps" +} +item { + name: "2229" + id: 761 + display_name: "Merops orientalis" +} +item { + name: "10423" + id: 762 + display_name: "Loxia leucoptera" +} +item { + name: "67771" + id: 763 + display_name: "Leptoglossus occidentalis" +} +item { + name: "84162" + id: 764 + display_name: "Chrysochus auratus" +} +item { + name: "26822" + id: 765 + display_name: "Dicamptodon tenebrosus" +} +item { + name: "26823" + id: 766 + display_name: "Dicamptodon ensatus" +} +item { + name: "51402" + id: 767 + display_name: "Megalops atlanticus" +} +item { + name: "67725" + id: 768 + display_name: "Aeshna interrupta" +} +item { + name: "411858" + id: 769 + display_name: "Vanessa gonerilla gonerilla" +} +item { + name: "26835" + id: 770 + display_name: "Drymobius margaritiferus" +} +item { + name: "84185" + id: 771 + display_name: "Megalopyge opercularis" +} +item { + name: "2266" + id: 772 + display_name: "Coracias garrulus" +} +item { + name: "141531" + id: 773 + display_name: "Lethe eurydice" +} +item { + name: "2269" + id: 774 + display_name: "Coracias caudatus" +} +item { + name: "133346" + id: 775 + display_name: "Melittia cucurbitae" +} +item { + name: "2275" + id: 776 + display_name: "Coracias benghalensis" +} +item { + name: "84196" + id: 777 + display_name: "Pontania californica" +} +item { + name: "10470" + id: 778 + display_name: "Xanthocephalus xanthocephalus" +} +item { + name: "10479" + id: 779 + display_name: "Chondestes grammacus" +} +item { + name: "51440" + id: 780 + display_name: "Pituophis catenifer catenifer" +} +item { + name: "54087" + id: 781 + display_name: "Pieris napi" +} +item { + name: "59635" + id: 782 + display_name: "Phragmatopoma californica" +} +item { + name: "10487" + id: 783 + display_name: "Dolichonyx oryzivorus" +} +item { + name: "67835" + id: 784 + display_name: "Danaus chrysippus" +} +item { + name: "59644" + id: 785 + display_name: "Pantherophis alleghaniensis" +} +item { + name: "59646" + id: 786 + display_name: "Pantherophis bairdi" +} +item { + name: "116999" + id: 787 + display_name: "Pandion haliaetus" +} +item { + name: "117002" + id: 788 + display_name: "Phainopepla nitens" +} +item { + name: "16770" + id: 789 + display_name: "Tyrannus couchii" +} +item { + name: "84239" + id: 790 + display_name: "Callophrys gryneus" +} +item { + name: "104553" + id: 791 + display_name: "Leucorrhinia proxima" +} +item { + name: "117016" + id: 792 + display_name: "Phylloscopus collybita" +} +item { + name: "49540" + id: 793 + display_name: "Gasteracantha cancriformis" +} +item { + name: "59675" + id: 794 + display_name: "Pyrrharctia isabella" +} +item { + name: "469277" + id: 795 + display_name: "Neotibicen superbus" +} +item { + name: "236973" + id: 796 + display_name: "Circus cyaneus hudsonius" +} +item { + name: "59683" + id: 797 + display_name: "Porpita porpita" +} +item { + name: "26916" + id: 798 + display_name: "Contia tenuis" +} +item { + name: "51493" + id: 799 + display_name: "Trimerotropis pallidipennis" +} +item { + name: "51495" + id: 800 + display_name: "Anthocharis cardamines" +} +item { + name: "133416" + id: 801 + display_name: "Phoebis philea" +} +item { + name: "8583" + id: 802 + display_name: "Grallina cyanoleuca" +} +item { + name: "395569" + id: 803 + display_name: "Prionoplus reticularis" +} +item { + name: "59698" + id: 804 + display_name: "Velella velella" +} +item { + name: "141626" + id: 805 + display_name: "Lygaeus turcicus" +} +item { + name: "84286" + id: 806 + display_name: "Diapheromera femorata" +} +item { + name: "117059" + id: 807 + display_name: "Plectrophenax nivalis" +} +item { + name: "133447" + id: 808 + display_name: "Crambus agitatellus" +} +item { + name: "133448" + id: 809 + display_name: "Climaciella brunnea" +} +item { + name: "51534" + id: 810 + display_name: "Leptotes cassius" +} +item { + name: "205197" + id: 811 + display_name: "Eutrapela clemataria" +} +item { + name: "51536" + id: 812 + display_name: "Ascia monuste" +} +item { + name: "10585" + id: 813 + display_name: "Calamospiza melanocorys" +} +item { + name: "49552" + id: 814 + display_name: "Scutigera coleoptrata" +} +item { + name: "51555" + id: 815 + display_name: "Sympetrum illotum" +} +item { + name: "51557" + id: 816 + display_name: "Bombylius major" +} +item { + name: "117095" + id: 817 + display_name: "Regulus calendula" +} +item { + name: "117097" + id: 818 + display_name: "Regulus ignicapilla" +} +item { + name: "117099" + id: 819 + display_name: "Regulus regulus" +} +item { + name: "117100" + id: 820 + display_name: "Regulus satrapa" +} +item { + name: "84333" + id: 821 + display_name: "Eudryas grata" +} +item { + name: "215409" + id: 822 + display_name: "Bradybaena similaris" +} +item { + name: "16787" + id: 823 + display_name: "Tyrannus melancholicus" +} +item { + name: "46225" + id: 824 + display_name: "Tamias dorsalis" +} +item { + name: "59774" + id: 825 + display_name: "Pachydiplax longipennis" +} +item { + name: "59776" + id: 826 + display_name: "Perithemis tenera" +} +item { + name: "119014" + id: 827 + display_name: "Argia fumipennis violacea" +} +item { + name: "4326" + id: 828 + display_name: "Pelecanus conspicillatus" +} +item { + name: "18833" + id: 829 + display_name: "Aulacorhynchus prasinus" +} +item { + name: "43411" + id: 830 + display_name: "Ateles geoffroyi" +} +item { + name: "141725" + id: 831 + display_name: "Nezara viridula" +} +item { + name: "51614" + id: 832 + display_name: "Eurema hecabe" +} +item { + name: "125343" + id: 833 + display_name: "Crepidula fornicata" +} +item { + name: "2464" + id: 834 + display_name: "Todiramphus sanctus" +} +item { + name: "43432" + id: 835 + display_name: "Cebus capucinus" +} +item { + name: "43436" + id: 836 + display_name: "Alouatta palliata" +} +item { + name: "43439" + id: 837 + display_name: "Alouatta pigra" +} +item { + name: "9357" + id: 838 + display_name: "Icterus bullockii" +} +item { + name: "84403" + id: 839 + display_name: "Phyllopalpus pulchellus" +} +item { + name: "10676" + id: 840 + display_name: "Spiza americana" +} +item { + name: "16798" + id: 841 + display_name: "Tyrannus dominicensis" +} +item { + name: "141752" + id: 842 + display_name: "Biblis hyperia" +} +item { + name: "4512" + id: 843 + display_name: "Chlidonias niger" +} +item { + name: "43460" + id: 844 + display_name: "Macaca mulatta" +} +item { + name: "51654" + id: 845 + display_name: "Junonia almana" +} +item { + name: "51659" + id: 846 + display_name: "Anthopleura xanthogrammica" +} +item { + name: "84428" + id: 847 + display_name: "Drepana arcuata" +} +item { + name: "10702" + id: 848 + display_name: "Oriturus superciliosus" +} +item { + name: "68047" + id: 849 + display_name: "Psarocolius montezuma" +} +item { + name: "12707" + id: 850 + display_name: "Turdus pilaris" +} +item { + name: "84437" + id: 851 + display_name: "Nicrophorus orbicollis" +} +item { + name: "84438" + id: 852 + display_name: "Platyprepia virginalis" +} +item { + name: "117209" + id: 853 + display_name: "Notiomystis cincta" +} +item { + name: "343393" + id: 854 + display_name: "Hypsopygia olinalis" +} +item { + name: "27101" + id: 855 + display_name: "Eurycea longicauda" +} +item { + name: "117214" + id: 856 + display_name: "Sagittarius serpentarius" +} +item { + name: "18911" + id: 857 + display_name: "Psittacula krameri" +} +item { + name: "117218" + id: 858 + display_name: "Verrucosa arenata" +} +item { + name: "117221" + id: 859 + display_name: "Dasymutilla occidentalis" +} +item { + name: "35303" + id: 860 + display_name: "Ctenosaura similis" +} +item { + name: "18920" + id: 861 + display_name: "Platycercus eximius" +} +item { + name: "10729" + id: 862 + display_name: "Protonotaria citrea" +} +item { + name: "35306" + id: 863 + display_name: "Ctenosaura pectinata" +} +item { + name: "109650" + id: 864 + display_name: "Platycnemis pennipes" +} +item { + name: "27120" + id: 865 + display_name: "Eurycea bislineata" +} +item { + name: "27123" + id: 866 + display_name: "Eurycea lucifuga" +} +item { + name: "51702" + id: 867 + display_name: "Coccinella septempunctata" +} +item { + name: "2552" + id: 868 + display_name: "Megaceryle torquata" +} +item { + name: "133625" + id: 869 + display_name: "Zanclognatha jacchusalis" +} +item { + name: "18943" + id: 870 + display_name: "Nestor meridionalis" +} +item { + name: "84481" + id: 871 + display_name: "Calopteryx maculata" +} +item { + name: "35330" + id: 872 + display_name: "Sauromalus ater" +} +item { + name: "27140" + id: 873 + display_name: "Coluber constrictor priapus" +} +item { + name: "199179" + id: 874 + display_name: "Polistes chinensis" +} +item { + name: "51724" + id: 875 + display_name: "Mopalia lignosa" +} +item { + name: "27149" + id: 876 + display_name: "Coluber constrictor constrictor" +} +item { + name: "35342" + id: 877 + display_name: "Iguana iguana" +} +item { + name: "27153" + id: 878 + display_name: "Coluber constrictor flaviventris" +} +item { + name: "35347" + id: 879 + display_name: "Amblyrhynchus cristatus" +} +item { + name: "125461" + id: 880 + display_name: "Ursus arctos horribilis" +} +item { + name: "84507" + id: 881 + display_name: "Lygus lineolaris" +} +item { + name: "35356" + id: 882 + display_name: "Dipsosaurus dorsalis" +} +item { + name: "51743" + id: 883 + display_name: "Danaus gilippus" +} +item { + name: "18976" + id: 884 + display_name: "Amazona viridigenalis" +} +item { + name: "125475" + id: 885 + display_name: "Plusiodonta compressipalpis" +} +item { + name: "51748" + id: 886 + display_name: "Danaus gilippus thersippus" +} +item { + name: "68137" + id: 887 + display_name: "Chlorocebus pygerythrus" +} +item { + name: "133675" + id: 888 + display_name: "Coenobita clypeatus" +} +item { + name: "215596" + id: 889 + display_name: "Buprestis aurulenta" +} +item { + name: "117293" + id: 890 + display_name: "Oecophylla smaragdina" +} +item { + name: "68142" + id: 891 + display_name: "Prenolepis imparis" +} +item { + name: "27184" + id: 892 + display_name: "Plethodon glutinosus" +} +item { + name: "27186" + id: 893 + display_name: "Plethodon cinereus" +} +item { + name: "18995" + id: 894 + display_name: "Amazona albifrons" +} +item { + name: "51765" + id: 895 + display_name: "Poanes melane" +} +item { + name: "18998" + id: 896 + display_name: "Amazona oratrix" +} +item { + name: "41396" + id: 897 + display_name: "Rhynchonycteris naso" +} +item { + name: "27194" + id: 898 + display_name: "Plethodon vehiculum" +} +item { + name: "51773" + id: 899 + display_name: "Nathalis iole" +} +item { + name: "12908" + id: 900 + display_name: "Saxicola rubetra" +} +item { + name: "68165" + id: 901 + display_name: "Linepithema humile" +} +item { + name: "154721" + id: 902 + display_name: "Brachygastra mellifica" +} +item { + name: "338504" + id: 903 + display_name: "Xanthocnemis zealandica" +} +item { + name: "338505" + id: 904 + display_name: "Melangyna novaezelandiae" +} +item { + name: "27093" + id: 905 + display_name: "Eurycea cirrigera" +} +item { + name: "65975" + id: 906 + display_name: "Lithobates berlandieri" +} +item { + name: "19020" + id: 907 + display_name: "Ara militaris" +} +item { + name: "474210" + id: 908 + display_name: "Spizelloides arborea" +} +item { + name: "205240" + id: 909 + display_name: "Pantographa limata" +} +item { + name: "27226" + id: 910 + display_name: "Plethodon albagula" +} +item { + name: "318545" + id: 911 + display_name: "Coreus marginatus" +} +item { + name: "2662" + id: 912 + display_name: "Ceryle rudis" +} +item { + name: "109161" + id: 913 + display_name: "Perithemis intensa" +} +item { + name: "51824" + id: 914 + display_name: "Calopteryx splendens" +} +item { + name: "27250" + id: 915 + display_name: "Ensatina eschscholtzii" +} +item { + name: "2676" + id: 916 + display_name: "Chloroceryle aenea" +} +item { + name: "2679" + id: 917 + display_name: "Chloroceryle amazona" +} +item { + name: "84602" + id: 918 + display_name: "Zale lunata" +} +item { + name: "133756" + id: 919 + display_name: "Leptoglossus oppositus" +} +item { + name: "35453" + id: 920 + display_name: "Zootoca vivipara" +} +item { + name: "84612" + id: 921 + display_name: "Polyphylla decemlineata" +} +item { + name: "133765" + id: 922 + display_name: "Eumenes fraternus" +} +item { + name: "68230" + id: 923 + display_name: "Brachymesia gravida" +} +item { + name: "49601" + id: 924 + display_name: "Mola mola" +} +item { + name: "68232" + id: 925 + display_name: "Papilio palamedes" +} +item { + name: "68233" + id: 926 + display_name: "Orthemis ferruginea" +} +item { + name: "68239" + id: 927 + display_name: "Parnassius clodius" +} +item { + name: "68240" + id: 928 + display_name: "Chlosyne lacinia" +} +item { + name: "68244" + id: 929 + display_name: "Euptoieta claudia" +} +item { + name: "68249" + id: 930 + display_name: "Dymasia dymas" +} +item { + name: "68251" + id: 931 + display_name: "Limenitis weidemeyerii" +} +item { + name: "133790" + id: 932 + display_name: "Chalybion californicum" +} +item { + name: "84644" + id: 933 + display_name: "Phalangium opilio" +} +item { + name: "68262" + id: 934 + display_name: "Polygonia faunus" +} +item { + name: "133799" + id: 935 + display_name: "Xenox tigrinus" +} +item { + name: "68264" + id: 936 + display_name: "Asterocampa celtis" +} +item { + name: "132892" + id: 937 + display_name: "Anacridium aegyptium" +} +item { + name: "68268" + id: 938 + display_name: "Euptoieta hegesia" +} +item { + name: "68269" + id: 939 + display_name: "Aglais milberti" +} +item { + name: "43694" + id: 940 + display_name: "Loxodonta africana" +} +item { + name: "59165" + id: 941 + display_name: "Apodemia mormo" +} +item { + name: "68274" + id: 942 + display_name: "Phyciodes phaon" +} +item { + name: "68275" + id: 943 + display_name: "Battus polydamas" +} +item { + name: "84662" + id: 944 + display_name: "Celastrina lucia" +} +item { + name: "16842" + id: 945 + display_name: "Myiozetetes similis" +} +item { + name: "133826" + id: 946 + display_name: "Zelus longipes" +} +item { + name: "14912" + id: 947 + display_name: "Toxostoma curvirostre" +} +item { + name: "53708" + id: 948 + display_name: "Pacifastacus leniusculus" +} +item { + name: "117452" + id: 949 + display_name: "Sphinx kalmiae" +} +item { + name: "182997" + id: 950 + display_name: "Megisto rubricata" +} +item { + name: "223965" + id: 951 + display_name: "Lithacodia musta" +} +item { + name: "125663" + id: 952 + display_name: "Kelletia kelletii" +} +item { + name: "125669" + id: 953 + display_name: "Rumina decollata" +} +item { + name: "68328" + id: 954 + display_name: "Oxythyrea funesta" +} +item { + name: "179324" + id: 955 + display_name: "Dactylotum bicolor" +} +item { + name: "68330" + id: 956 + display_name: "Arctia caja" +} +item { + name: "2548" + id: 957 + display_name: "Megaceryle alcyon" +} +item { + name: "207600" + id: 958 + display_name: "Thasus neocalifornicus" +} +item { + name: "207601" + id: 959 + display_name: "Palpita quadristigmalis" +} +item { + name: "51954" + id: 960 + display_name: "Sphecius speciosus" +} +item { + name: "207603" + id: 961 + display_name: "Prolimacodes badia" +} +item { + name: "7294" + id: 962 + display_name: "Eremophila alpestris" +} +item { + name: "19196" + id: 963 + display_name: "Alisterus scapularis" +} +item { + name: "145194" + id: 964 + display_name: "Cinnyris jugularis" +} +item { + name: "27390" + id: 965 + display_name: "Desmognathus ochrophaeus" +} +item { + name: "207615" + id: 966 + display_name: "Polistes apachus" +} +item { + name: "63275" + id: 967 + display_name: "Tremex columba" +} +item { + name: "61910" + id: 968 + display_name: "Orgyia antiqua" +} +item { + name: "199438" + id: 969 + display_name: "Orgyia postica" +} +item { + name: "43794" + id: 970 + display_name: "Castor canadensis" +} +item { + name: "84755" + id: 971 + display_name: "Arion rufus" +} +item { + name: "51996" + id: 972 + display_name: "Daphnis nerii" +} +item { + name: "194075" + id: 973 + display_name: "Drymarchon melanurus erebennus" +} +item { + name: "133923" + id: 974 + display_name: "Mermiria bivittata" +} +item { + name: "84778" + id: 975 + display_name: "Leptinotarsa decemlineata" +} +item { + name: "11051" + id: 976 + display_name: "Xiphorhynchus flavigaster" +} +item { + name: "121992" + id: 977 + display_name: "Cervus elaphus roosevelti" +} +item { + name: "27459" + id: 978 + display_name: "Batrachoseps attenuatus" +} +item { + name: "84806" + id: 979 + display_name: "Acanalonia conica" +} +item { + name: "52043" + id: 980 + display_name: "Spoladea recurvalis" +} +item { + name: "27468" + id: 981 + display_name: "Batrachoseps major" +} +item { + name: "133966" + id: 982 + display_name: "Lomographa vestaliata" +} +item { + name: "27474" + id: 983 + display_name: "Batrachoseps nigriventris" +} +item { + name: "101204" + id: 984 + display_name: "Gambusia holbrooki" +} +item { + name: "52055" + id: 985 + display_name: "Crocothemis servilia" +} +item { + name: "4580" + id: 986 + display_name: "Jacana jacana" +} +item { + name: "346970" + id: 987 + display_name: "Callophrys dumetorum" +} +item { + name: "27486" + id: 988 + display_name: "Pseudotriton ruber" +} +item { + name: "52075" + id: 989 + display_name: "Atalopedes campestris" +} +item { + name: "27500" + id: 990 + display_name: "Gyrinophilus porphyriticus" +} +item { + name: "73203" + id: 991 + display_name: "Phalaropus fulicarius" +} +item { + name: "322417" + id: 992 + display_name: "Limacus flavus" +} +item { + name: "40083" + id: 993 + display_name: "Gopherus berlandieri" +} +item { + name: "68469" + id: 994 + display_name: "Papilio demodocus" +} +item { + name: "2938" + id: 995 + display_name: "Streptopelia turtur" +} +item { + name: "117633" + id: 996 + display_name: "Mopalia muscosa" +} +item { + name: "117641" + id: 997 + display_name: "Nucella lamellosa" +} +item { + name: "322443" + id: 998 + display_name: "Thasus gigas" +} +item { + name: "68492" + id: 999 + display_name: "Hemidactylus mabouia" +} +item { + name: "143853" + id: 1000 + display_name: "Pica hudsonia" +} +item { + name: "144757" + id: 1001 + display_name: "Corvus cornix" +} +item { + name: "117650" + id: 1002 + display_name: "Mytilus edulis" +} +item { + name: "19349" + id: 1003 + display_name: "Myiopsitta monachus" +} +item { + name: "2969" + id: 1004 + display_name: "Streptopelia decaocto" +} +item { + name: "9919" + id: 1005 + display_name: "Piranga ludoviciana" +} +item { + name: "5009" + id: 1006 + display_name: "Ixobrychus exilis" +} +item { + name: "117666" + id: 1007 + display_name: "Pleuroncodes planipes" +} +item { + name: "7603" + id: 1008 + display_name: "Auriparus flaviceps" +} +item { + name: "117674" + id: 1009 + display_name: "Ligia occidentalis" +} +item { + name: "145223" + id: 1010 + display_name: "Geothlypis tolmiei" +} +item { + name: "60341" + id: 1011 + display_name: "Lithobates sphenocephalus" +} +item { + name: "60342" + id: 1012 + display_name: "Thamnophis proximus" +} +item { + name: "52155" + id: 1013 + display_name: "Dermacentor variabilis" +} +item { + name: "60349" + id: 1014 + display_name: "Scincella lateralis" +} +item { + name: "52158" + id: 1015 + display_name: "Schistocerca nitens" +} +item { + name: "117696" + id: 1016 + display_name: "Dendraster excentricus" +} +item { + name: "232391" + id: 1017 + display_name: "Tetracha carolina" +} +item { + name: "3017" + id: 1018 + display_name: "Columba livia" +} +item { + name: "145229" + id: 1019 + display_name: "Setophaga citrina" +} +item { + name: "84950" + id: 1020 + display_name: "Alypia octomaculata" +} +item { + name: "52188" + id: 1021 + display_name: "Rhincodon typus" +} +item { + name: "494559" + id: 1022 + display_name: "Polydrusus formosus" +} +item { + name: "145232" + id: 1023 + display_name: "Setophaga cerulea" +} +item { + name: "3048" + id: 1024 + display_name: "Columba palumbus" +} +item { + name: "9922" + id: 1025 + display_name: "Piranga bidentata" +} +item { + name: "44026" + id: 1026 + display_name: "Erethizon dorsatum" +} +item { + name: "61505" + id: 1027 + display_name: "Manduca sexta" +} +item { + name: "84994" + id: 1028 + display_name: "Acanthocephala declivis" +} +item { + name: "27652" + id: 1029 + display_name: "Hemidactylium scutatum" +} +item { + name: "117767" + id: 1030 + display_name: "Cervus elaphus nannodes" +} +item { + name: "494603" + id: 1031 + display_name: "Hermissenda opalescens" +} +item { + name: "39819" + id: 1032 + display_name: "Terrapene carolina bauri" +} +item { + name: "3093" + id: 1033 + display_name: "Patagioenas leucocephala" +} +item { + name: "205316" + id: 1034 + display_name: "Aidemona azteca" +} +item { + name: "216093" + id: 1035 + display_name: "Caracolus marginella" +} +item { + name: "44062" + id: 1036 + display_name: "Thomomys bottae" +} +item { + name: "85024" + id: 1037 + display_name: "Heraclides cresphontes" +} +item { + name: "3108" + id: 1038 + display_name: "Patagioenas fasciata" +} +item { + name: "213510" + id: 1039 + display_name: "Anageshna primordialis" +} +item { + name: "85030" + id: 1040 + display_name: "Crocothemis erythraea" +} +item { + name: "85034" + id: 1041 + display_name: "Neoscona crucifera" +} +item { + name: "3117" + id: 1042 + display_name: "Patagioenas flavirostris" +} +item { + name: "207924" + id: 1043 + display_name: "Synchlora frondaria" +} +item { + name: "35900" + id: 1044 + display_name: "Lacerta bilineata" +} +item { + name: "24382" + id: 1045 + display_name: "Osteopilus septentrionalis" +} +item { + name: "145249" + id: 1046 + display_name: "Setophaga discolor" +} +item { + name: "52297" + id: 1047 + display_name: "Triakis semifasciata" +} +item { + name: "27726" + id: 1048 + display_name: "Salamandra salamandra" +} +item { + name: "27727" + id: 1049 + display_name: "Bogertophis subocularis" +} +item { + name: "143043" + id: 1050 + display_name: "Cycnia tenera" +} +item { + name: "52313" + id: 1051 + display_name: "Diodon hystrix" +} +item { + name: "143316" + id: 1052 + display_name: "Schinia florida" +} +item { + name: "61968" + id: 1053 + display_name: "Graphosoma lineatum" +} +item { + name: "502885" + id: 1054 + display_name: "Lissachatina fulica" +} +item { + name: "71029" + id: 1055 + display_name: "Crotalus cerastes cerastes" +} +item { + name: "207977" + id: 1056 + display_name: "Aglais io" +} +item { + name: "19577" + id: 1057 + display_name: "Chordeiles minor" +} +item { + name: "93312" + id: 1058 + display_name: "Acropora palmata" +} +item { + name: "52354" + id: 1059 + display_name: "Ambystoma laterale" +} +item { + name: "19587" + id: 1060 + display_name: "Chordeiles acutipennis" +} +item { + name: "58585" + id: 1061 + display_name: "Limenitis arthemis astyanax" +} +item { + name: "134277" + id: 1062 + display_name: "Gastrophryne olivacea" +} +item { + name: "60551" + id: 1063 + display_name: "Papilio glaucus" +} +item { + name: "3731" + id: 1064 + display_name: "Platalea leucorodia" +} +item { + name: "232593" + id: 1065 + display_name: "Thyris sepulchralis" +} +item { + name: "19609" + id: 1066 + display_name: "Phalaenoptilus nuttallii" +} +item { + name: "126106" + id: 1067 + display_name: "Haploa clymene" +} +item { + name: "27805" + id: 1068 + display_name: "Notophthalmus viridescens" +} +item { + name: "199840" + id: 1069 + display_name: "Haemorhous mexicanus" +} +item { + name: "199841" + id: 1070 + display_name: "Haemorhous purpureus" +} +item { + name: "219719" + id: 1071 + display_name: "Eudryas unio" +} +item { + name: "27818" + id: 1072 + display_name: "Taricha torosa" +} +item { + name: "19627" + id: 1073 + display_name: "Nyctidromus albicollis" +} +item { + name: "28750" + id: 1074 + display_name: "Salvadora grahamiae lineata" +} +item { + name: "27824" + id: 1075 + display_name: "Taricha rivularis" +} +item { + name: "146632" + id: 1076 + display_name: "Toxomerus politus" +} +item { + name: "52402" + id: 1077 + display_name: "Cetonia aurata" +} +item { + name: "18291" + id: 1078 + display_name: "Campephilus guatemalensis" +} +item { + name: "60598" + id: 1079 + display_name: "Ixodes scapularis" +} +item { + name: "199870" + id: 1080 + display_name: "Pyralis farinalis" +} +item { + name: "60607" + id: 1081 + display_name: "Limenitis arthemis" +} +item { + name: "205241" + id: 1082 + display_name: "Plagodis phlogosaria" +} +item { + name: "14898" + id: 1083 + display_name: "Toxostoma rufum" +} +item { + name: "126153" + id: 1084 + display_name: "Amphion floridensis" +} +item { + name: "126155" + id: 1085 + display_name: "Vespula germanica" +} +item { + name: "51392" + id: 1086 + display_name: "Morone saxatilis" +} +item { + name: "3280" + id: 1087 + display_name: "Leptotila verreauxi" +} +item { + name: "19670" + id: 1088 + display_name: "Nyctibius jamaicensis" +} +item { + name: "6929" + id: 1089 + display_name: "Anas penelope" +} +item { + name: "97738" + id: 1090 + display_name: "Chromagrion conditum" +} +item { + name: "52449" + id: 1091 + display_name: "Rhinecanthus rectangulus" +} +item { + name: "52451" + id: 1092 + display_name: "Naso lituratus" +} +item { + name: "56529" + id: 1093 + display_name: "Papilio machaon" +} +item { + name: "199913" + id: 1094 + display_name: "Buteo plagiatus" +} +item { + name: "199914" + id: 1095 + display_name: "Selasphorus calliope" +} +item { + name: "85227" + id: 1096 + display_name: "Hemideina crassidens" +} +item { + name: "36076" + id: 1097 + display_name: "Cophosaurus texanus" +} +item { + name: "36077" + id: 1098 + display_name: "Cophosaurus texanus texanus" +} +item { + name: "208112" + id: 1099 + display_name: "Palpita magniferalis" +} +item { + name: "85235" + id: 1100 + display_name: "Deinacrida rugosa" +} +item { + name: "93429" + id: 1101 + display_name: "Aeshna constricta" +} +item { + name: "36086" + id: 1102 + display_name: "Callisaurus draconoides rhodostictus" +} +item { + name: "126204" + id: 1103 + display_name: "Synchlora aerata" +} +item { + name: "93437" + id: 1104 + display_name: "Aeshna mixta" +} +item { + name: "126207" + id: 1105 + display_name: "Schizura unicornis" +} +item { + name: "126209" + id: 1106 + display_name: "Metcalfa pruinosa" +} +item { + name: "126211" + id: 1107 + display_name: "Poecilocapsus lineatus" +} +item { + name: "36100" + id: 1108 + display_name: "Uta stansburiana elegans" +} +item { + name: "48342" + id: 1109 + display_name: "Hemigrapsus nudus" +} +item { + name: "199942" + id: 1110 + display_name: "Strategus aloeus" +} +item { + name: "126215" + id: 1111 + display_name: "Monobia quadridens" +} +item { + name: "101640" + id: 1112 + display_name: "Gomphaeschna furcillata" +} +item { + name: "126217" + id: 1113 + display_name: "Pyrausta orphisalis" +} +item { + name: "36107" + id: 1114 + display_name: "Urosaurus ornatus" +} +item { + name: "51940" + id: 1115 + display_name: "Hemidactylus frenatus" +} +item { + name: "36121" + id: 1116 + display_name: "Urosaurus graciosus" +} +item { + name: "19743" + id: 1117 + display_name: "Megascops kennicottii" +} +item { + name: "68901" + id: 1118 + display_name: "Salticus scenicus" +} +item { + name: "44326" + id: 1119 + display_name: "Microtus californicus" +} +item { + name: "82481" + id: 1120 + display_name: "Pieris marginalis" +} +item { + name: "474332" + id: 1121 + display_name: "Porphyrio poliocephalus" +} +item { + name: "81674" + id: 1122 + display_name: "Rivula propinqualis" +} +item { + name: "126252" + id: 1123 + display_name: "Mastigoproctus giganteus" +} +item { + name: "36142" + id: 1124 + display_name: "Sceloporus undulatus" +} +item { + name: "68911" + id: 1125 + display_name: "Libellula needhami" +} +item { + name: "68912" + id: 1126 + display_name: "Dysdera crocata" +} +item { + name: "42888" + id: 1127 + display_name: "Macropus giganteus" +} +item { + name: "19765" + id: 1128 + display_name: "Megascops asio" +} +item { + name: "68918" + id: 1129 + display_name: "Poecilanthrax lucifer" +} +item { + name: "333705" + id: 1130 + display_name: "Pantherophis obsoletus lindheimeri" +} +item { + name: "126267" + id: 1131 + display_name: "Coleomegilla maculata" +} +item { + name: "101693" + id: 1132 + display_name: "Gomphus vastus" +} +item { + name: "85221" + id: 1133 + display_name: "Hemideina thoracica" +} +item { + name: "126276" + id: 1134 + display_name: "Agrotis ipsilon" +} +item { + name: "85317" + id: 1135 + display_name: "Eurosta solidaginis" +} +item { + name: "36169" + id: 1136 + display_name: "Sceloporus spinosus" +} +item { + name: "60752" + id: 1137 + display_name: "Hermeuptychia sosybius" +} +item { + name: "60754" + id: 1138 + display_name: "Pyromorpha dimidiata" +} +item { + name: "126291" + id: 1139 + display_name: "Prosapia bicincta" +} +item { + name: "52564" + id: 1140 + display_name: "Anthopleura elegantissima" +} +item { + name: "126293" + id: 1141 + display_name: "Prionoxystus robiniae" +} +item { + name: "120719" + id: 1142 + display_name: "Pseudacris hypochondriaca" +} +item { + name: "36189" + id: 1143 + display_name: "Sceloporus poinsettii" +} +item { + name: "52576" + id: 1144 + display_name: "Uroctonus mordax" +} +item { + name: "36198" + id: 1145 + display_name: "Sceloporus orcutti" +} +item { + name: "52584" + id: 1146 + display_name: "Pantala hymenaea" +} +item { + name: "44395" + id: 1147 + display_name: "Peromyscus leucopus" +} +item { + name: "36204" + id: 1148 + display_name: "Sceloporus occidentalis" +} +item { + name: "52589" + id: 1149 + display_name: "Coenonympha pamphilus" +} +item { + name: "3439" + id: 1150 + display_name: "Zenaida auriculata" +} +item { + name: "36208" + id: 1151 + display_name: "Sceloporus occidentalis bocourtii" +} +item { + name: "72936" + id: 1152 + display_name: "Hymenolaimus malacorhynchos" +} +item { + name: "85362" + id: 1153 + display_name: "Sphex ichneumoneus" +} +item { + name: "36217" + id: 1154 + display_name: "Sceloporus merriami" +} +item { + name: "68993" + id: 1155 + display_name: "Liometopum occidentale" +} +item { + name: "199916" + id: 1156 + display_name: "Setophaga caerulescens" +} +item { + name: "52620" + id: 1157 + display_name: "Cicindela oregona" +} +item { + name: "36243" + id: 1158 + display_name: "Sceloporus jarrovii" +} +item { + name: "52628" + id: 1159 + display_name: "Araneus diadematus" +} +item { + name: "180007" + id: 1160 + display_name: "Otospermophilus beecheyi" +} +item { + name: "85408" + id: 1161 + display_name: "Erythemis collocata" +} +item { + name: "36262" + id: 1162 + display_name: "Sceloporus grammicus" +} +item { + name: "60839" + id: 1163 + display_name: "Spilosoma virginica" +} +item { + name: "16968" + id: 1164 + display_name: "Camptostoma imberbe" +} +item { + name: "4715" + id: 1165 + display_name: "Caracara plancus" +} +item { + name: "313246" + id: 1166 + display_name: "Olla v-nigrum" +} +item { + name: "126393" + id: 1167 + display_name: "Stomolophus meleagris" +} +item { + name: "126397" + id: 1168 + display_name: "Halysidota harrisii" +} +item { + name: "64221" + id: 1169 + display_name: "Bipalium kewense" +} +item { + name: "28102" + id: 1170 + display_name: "Virginia striatula" +} +item { + name: "150985" + id: 1171 + display_name: "Planorbella trivolvis" +} +item { + name: "36306" + id: 1172 + display_name: "Phrynosoma modestum" +} +item { + name: "36307" + id: 1173 + display_name: "Phrynosoma orbiculare" +} +item { + name: "199929" + id: 1174 + display_name: "Plagiometriona clavata" +} +item { + name: "3545" + id: 1175 + display_name: "Columbina passerina" +} +item { + name: "36315" + id: 1176 + display_name: "Phrynosoma hernandesi" +} +item { + name: "367556" + id: 1177 + display_name: "Eupsittula nana" +} +item { + name: "371963" + id: 1178 + display_name: "Lampropeltis multifasciata" +} +item { + name: "36339" + id: 1179 + display_name: "Holbrookia propinqua" +} +item { + name: "36094" + id: 1180 + display_name: "Uta stansburiana" +} +item { + name: "36343" + id: 1181 + display_name: "Holbrookia maculata" +} +item { + name: "52766" + id: 1182 + display_name: "Megaphasma denticrus" +} +item { + name: "18941" + id: 1183 + display_name: "Nestor notabilis" +} +item { + name: "3580" + id: 1184 + display_name: "Columbina talpacoti" +} +item { + name: "123690" + id: 1185 + display_name: "Caranx melampygus" +} +item { + name: "52482" + id: 1186 + display_name: "Episyrphus balteatus" +} +item { + name: "28762" + id: 1187 + display_name: "Rhinocheilus lecontei" +} +item { + name: "3607" + id: 1188 + display_name: "Geopelia striata" +} +item { + name: "52484" + id: 1189 + display_name: "Celastrina echo" +} +item { + name: "61293" + id: 1190 + display_name: "Thaumetopoea pityocampa" +} +item { + name: "19998" + id: 1191 + display_name: "Athene noctua" +} +item { + name: "44575" + id: 1192 + display_name: "Rattus rattus" +} +item { + name: "44576" + id: 1193 + display_name: "Rattus norvegicus" +} +item { + name: "133250" + id: 1194 + display_name: "Tettigonia viridissima" +} +item { + name: "52774" + id: 1195 + display_name: "Bombus fervidus" +} +item { + name: "49756" + id: 1196 + display_name: "Nephila clavipes" +} +item { + name: "52779" + id: 1197 + display_name: "Bombus bimaculatus" +} +item { + name: "52782" + id: 1198 + display_name: "Melissodes bimaculata" +} +item { + name: "126513" + id: 1199 + display_name: "Larinioides cornutus" +} +item { + name: "69170" + id: 1200 + display_name: "Hemigrapsus oregonensis" +} +item { + name: "1971" + id: 1201 + display_name: "Crotophaga ani" +} +item { + name: "12942" + id: 1202 + display_name: "Sialia sialis" +} +item { + name: "126532" + id: 1203 + display_name: "Toxomerus geminatus" +} +item { + name: "216649" + id: 1204 + display_name: "Chauliognathus pensylvanicus" +} +item { + name: "3734" + id: 1205 + display_name: "Platalea alba" +} +item { + name: "216651" + id: 1206 + display_name: "Chelinidea vittiger" +} +item { + name: "20044" + id: 1207 + display_name: "Bubo virginianus" +} +item { + name: "11855" + id: 1208 + display_name: "Petrochelidon fulva" +} +item { + name: "28246" + id: 1209 + display_name: "Arizona elegans" +} +item { + name: "224855" + id: 1210 + display_name: "Melipotis indomita" +} +item { + name: "11867" + id: 1211 + display_name: "Progne subis" +} +item { + name: "126562" + id: 1212 + display_name: "Setophaga coronata auduboni" +} +item { + name: "126568" + id: 1213 + display_name: "Manduca rustica" +} +item { + name: "11882" + id: 1214 + display_name: "Hirundo neoxena" +} +item { + name: "11901" + id: 1215 + display_name: "Hirundo rustica" +} +item { + name: "52865" + id: 1216 + display_name: "Tramea lacerata" +} +item { + name: "142978" + id: 1217 + display_name: "Simyra insularis" +} +item { + name: "123499" + id: 1218 + display_name: "Notophthalmus viridescens viridescens" +} +item { + name: "339592" + id: 1219 + display_name: "Calidris virgata" +} +item { + name: "339593" + id: 1220 + display_name: "Calidris pugnax" +} +item { + name: "44311" + id: 1221 + display_name: "Microtus pennsylvanicus" +} +item { + name: "142988" + id: 1222 + display_name: "Lerema accius" +} +item { + name: "142990" + id: 1223 + display_name: "Autographa precationis" +} +item { + name: "142995" + id: 1224 + display_name: "Hymenia perspectalis" +} +item { + name: "129423" + id: 1225 + display_name: "Zelus luridus" +} +item { + name: "3733" + id: 1226 + display_name: "Platalea regia" +} +item { + name: "470678" + id: 1227 + display_name: "Cerithideopsis californica" +} +item { + name: "146713" + id: 1228 + display_name: "Elaphria grata" +} +item { + name: "143002" + id: 1229 + display_name: "Orthonama obstipata" +} +item { + name: "11931" + id: 1230 + display_name: "Tachycineta thalassina" +} +item { + name: "143005" + id: 1231 + display_name: "Costaconvexa centrostrigaria" +} +item { + name: "3743" + id: 1232 + display_name: "Bostrychia hagedash" +} +item { + name: "143009" + id: 1233 + display_name: "Ectropis crepuscularia" +} +item { + name: "36514" + id: 1234 + display_name: "Anolis carolinensis" +} +item { + name: "143012" + id: 1235 + display_name: "Zanclognatha pedipilalis" +} +item { + name: "11941" + id: 1236 + display_name: "Riparia riparia" +} +item { + name: "52902" + id: 1237 + display_name: "Palthis asopialis" +} +item { + name: "3751" + id: 1238 + display_name: "Eudocimus albus" +} +item { + name: "52906" + id: 1239 + display_name: "Chytonix palliatricula" +} +item { + name: "3756" + id: 1240 + display_name: "Plegadis falcinellus" +} +item { + name: "3759" + id: 1241 + display_name: "Plegadis chihi" +} +item { + name: "143024" + id: 1242 + display_name: "Eusarca confusaria" +} +item { + name: "62067" + id: 1243 + display_name: "Orthetrum cancellatum" +} +item { + name: "28340" + id: 1244 + display_name: "Thamnophis sauritus" +} +item { + name: "28345" + id: 1245 + display_name: "Thamnophis cyrtopsis" +} +item { + name: "143034" + id: 1246 + display_name: "Hippodamia variegata" +} +item { + name: "28347" + id: 1247 + display_name: "Thamnophis cyrtopsis ocellatus" +} +item { + name: "52925" + id: 1248 + display_name: "Phyciodes tharos" +} +item { + name: "8010" + id: 1249 + display_name: "Corvus corax" +} +item { + name: "11970" + id: 1250 + display_name: "Stelgidopteryx serripennis" +} +item { + name: "28362" + id: 1251 + display_name: "Thamnophis sirtalis" +} +item { + name: "3788" + id: 1252 + display_name: "Sula dactylatra" +} +item { + name: "44749" + id: 1253 + display_name: "Neotoma fuscipes" +} +item { + name: "52943" + id: 1254 + display_name: "Trichodezia albovittata" +} +item { + name: "3793" + id: 1255 + display_name: "Sula sula" +} +item { + name: "101667" + id: 1256 + display_name: "Gomphus exilis" +} +item { + name: "3797" + id: 1257 + display_name: "Sula leucogaster" +} +item { + name: "118486" + id: 1258 + display_name: "Macaria aemulataria" +} +item { + name: "3801" + id: 1259 + display_name: "Morus serrator" +} +item { + name: "28378" + id: 1260 + display_name: "Thamnophis radix" +} +item { + name: "118492" + id: 1261 + display_name: "Helicoverpa zea" +} +item { + name: "148793" + id: 1262 + display_name: "Asterocampa leilia" +} +item { + name: "28384" + id: 1263 + display_name: "Thamnophis proximus rubrilineatus" +} +item { + name: "257761" + id: 1264 + display_name: "Phocides polybius" +} +item { + name: "28387" + id: 1265 + display_name: "Thamnophis proximus orarius" +} +item { + name: "28390" + id: 1266 + display_name: "Thamnophis marcianus" +} +item { + name: "118503" + id: 1267 + display_name: "Darapsa myron" +} +item { + name: "3817" + id: 1268 + display_name: "Eudyptula minor" +} +item { + name: "36135" + id: 1269 + display_name: "Uma scoparia" +} +item { + name: "28396" + id: 1270 + display_name: "Thamnophis hammondii" +} +item { + name: "28400" + id: 1271 + display_name: "Thamnophis elegans elegans" +} +item { + name: "118513" + id: 1272 + display_name: "Hypena scabra" +} +item { + name: "28403" + id: 1273 + display_name: "Thamnophis elegans vagrans" +} +item { + name: "201342" + id: 1274 + display_name: "Chalcoela iphitalis" +} +item { + name: "3831" + id: 1275 + display_name: "Megadyptes antipodes" +} +item { + name: "126712" + id: 1276 + display_name: "Corydalus cornutus" +} +item { + name: "30676" + id: 1277 + display_name: "Agkistrodon piscivorus leucostoma" +} +item { + name: "3834" + id: 1278 + display_name: "Scopus umbretta" +} +item { + name: "213631" + id: 1279 + display_name: "Anicla infecta" +} +item { + name: "143105" + id: 1280 + display_name: "Pleuroprucha insulsaria" +} +item { + name: "28418" + id: 1281 + display_name: "Thamnophis atratus" +} +item { + name: "118531" + id: 1282 + display_name: "Parallelia bistriaris" +} +item { + name: "145363" + id: 1283 + display_name: "Troglodytes troglodytes" +} +item { + name: "3845" + id: 1284 + display_name: "Calidris canutus" +} +item { + name: "12038" + id: 1285 + display_name: "Lanius collurio" +} +item { + name: "143114" + id: 1286 + display_name: "Phragmatobia fuliginosa" +} +item { + name: "3851" + id: 1287 + display_name: "Calidris bairdii" +} +item { + name: "324226" + id: 1288 + display_name: "Meleagris gallopavo intermedia" +} +item { + name: "143118" + id: 1289 + display_name: "Pseudeustrotia carneola" +} +item { + name: "3855" + id: 1290 + display_name: "Calidris mauri" +} +item { + name: "3856" + id: 1291 + display_name: "Calidris maritima" +} +item { + name: "3857" + id: 1292 + display_name: "Calidris alpina" +} +item { + name: "143124" + id: 1293 + display_name: "Parapediasia teterrella" +} +item { + name: "143125" + id: 1294 + display_name: "Hypena madefactalis" +} +item { + name: "3863" + id: 1295 + display_name: "Calidris ferruginea" +} +item { + name: "118552" + id: 1296 + display_name: "Felis catus" +} +item { + name: "3865" + id: 1297 + display_name: "Calidris melanotos" +} +item { + name: "3869" + id: 1298 + display_name: "Limnodromus griseus" +} +item { + name: "118558" + id: 1299 + display_name: "Manduca quinquemaculata" +} +item { + name: "118559" + id: 1300 + display_name: "Tetraopes tetrophthalmus" +} +item { + name: "12065" + id: 1301 + display_name: "Malurus cyaneus" +} +item { + name: "3878" + id: 1302 + display_name: "Tringa nebularia" +} +item { + name: "101681" + id: 1303 + display_name: "Gomphus militaris" +} +item { + name: "413483" + id: 1304 + display_name: "Todiramphus sanctus vagans" +} +item { + name: "3885" + id: 1305 + display_name: "Tringa ochropus" +} +item { + name: "3888" + id: 1306 + display_name: "Tringa glareola" +} +item { + name: "126770" + id: 1307 + display_name: "Vulpes vulpes fulvus" +} +item { + name: "3892" + id: 1308 + display_name: "Tringa melanoleuca" +} +item { + name: "3893" + id: 1309 + display_name: "Tringa flavipes" +} +item { + name: "126775" + id: 1310 + display_name: "Cervus elaphus nelsoni" +} +item { + name: "3896" + id: 1311 + display_name: "Numenius arquata" +} +item { + name: "126777" + id: 1312 + display_name: "Peucetia viridans" +} +item { + name: "3901" + id: 1313 + display_name: "Numenius phaeopus" +} +item { + name: "32058" + id: 1314 + display_name: "Elgaria multicarinata webbii" +} +item { + name: "413506" + id: 1315 + display_name: "Phalacrocorax carbo novaehollandiae" +} +item { + name: "413508" + id: 1316 + display_name: "Petroica macrocephala macrocephala" +} +item { + name: "413512" + id: 1317 + display_name: "Petroica australis longipes" +} +item { + name: "61258" + id: 1318 + display_name: "Junonia evarete" +} +item { + name: "28493" + id: 1319 + display_name: "Tantilla nigriceps" +} +item { + name: "413522" + id: 1320 + display_name: "Prosthemadera novaeseelandiae novaeseelandiae" +} +item { + name: "58506" + id: 1321 + display_name: "Polites themistocles" +} +item { + name: "28505" + id: 1322 + display_name: "Tantilla gracilis" +} +item { + name: "20315" + id: 1323 + display_name: "Asio flammeus" +} +item { + name: "143196" + id: 1324 + display_name: "Schinia arcigera" +} +item { + name: "413533" + id: 1325 + display_name: "Rhipidura fuliginosa fuliginosa" +} +item { + name: "3936" + id: 1326 + display_name: "Scolopax minor" +} +item { + name: "3938" + id: 1327 + display_name: "Arenaria interpres" +} +item { + name: "3941" + id: 1328 + display_name: "Arenaria melanocephala" +} +item { + name: "413543" + id: 1329 + display_name: "Rhipidura fuliginosa placabilis" +} +item { + name: "3947" + id: 1330 + display_name: "Limosa limosa" +} +item { + name: "3950" + id: 1331 + display_name: "Limosa haemastica" +} +item { + name: "126269" + id: 1332 + display_name: "Austrolestes colensonis" +} +item { + name: "3954" + id: 1333 + display_name: "Limosa fedoa" +} +item { + name: "199998" + id: 1334 + display_name: "Pedicia albivitta" +} +item { + name: "3959" + id: 1335 + display_name: "Phalaropus lobatus" +} +item { + name: "3962" + id: 1336 + display_name: "Bartramia longicauda" +} +item { + name: "199999" + id: 1337 + display_name: "Callopistria mollissima" +} +item { + name: "104426" + id: 1338 + display_name: "Lestes disjunctus" +} +item { + name: "126848" + id: 1339 + display_name: "Delphinia picta" +} +item { + name: "3951" + id: 1340 + display_name: "Limosa lapponica" +} +item { + name: "20356" + id: 1341 + display_name: "Aegolius acadicus" +} +item { + name: "121792" + id: 1342 + display_name: "Polistes carolina" +} +item { + name: "3978" + id: 1343 + display_name: "Actitis hypoleucos" +} +item { + name: "53911" + id: 1344 + display_name: "Cyprinus carpio" +} +item { + name: "135055" + id: 1345 + display_name: "Bufotes balearicus" +} +item { + name: "19121" + id: 1346 + display_name: "Trichoglossus haematodus" +} +item { + name: "28562" + id: 1347 + display_name: "Storeria dekayi" +} +item { + name: "28563" + id: 1348 + display_name: "Storeria dekayi texana" +} +item { + name: "20372" + id: 1349 + display_name: "Surnia ulula" +} +item { + name: "135064" + id: 1350 + display_name: "Bufotes viridis" +} +item { + name: "28570" + id: 1351 + display_name: "Storeria dekayi dekayi" +} +item { + name: "61341" + id: 1352 + display_name: "Narceus americanus" +} +item { + name: "7493" + id: 1353 + display_name: "Polioptila caerulea" +} +item { + name: "29339" + id: 1354 + display_name: "Natrix natrix" +} +item { + name: "9135" + id: 1355 + display_name: "Spizella passerina" +} +item { + name: "126889" + id: 1356 + display_name: "Toxomerus marginatus" +} +item { + name: "143274" + id: 1357 + display_name: "Gluphisia septentrionis" +} +item { + name: "343021" + id: 1358 + display_name: "Anguis fragilis" +} +item { + name: "14591" + id: 1359 + display_name: "Pycnonotus jocosus" +} +item { + name: "10227" + id: 1360 + display_name: "Passerina cyanea" +} +item { + name: "10228" + id: 1361 + display_name: "Passerina versicolor" +} +item { + name: "61371" + id: 1362 + display_name: "Panulirus interruptus" +} +item { + name: "143294" + id: 1363 + display_name: "Colias croceus" +} +item { + name: "135104" + id: 1364 + display_name: "Ichthyosaura alpestris" +} +item { + name: "83958" + id: 1365 + display_name: "Phryganidia californica" +} +item { + name: "143302" + id: 1366 + display_name: "Megapallifera mutabilis" +} +item { + name: "12231" + id: 1367 + display_name: "Manorina melanocephala" +} +item { + name: "200661" + id: 1368 + display_name: "Coluber constrictor mormon" +} +item { + name: "3681" + id: 1369 + display_name: "Ocyphaps lophotes" +} +item { + name: "4773" + id: 1370 + display_name: "Jabiru mycteria" +} +item { + name: "135140" + id: 1371 + display_name: "Taricha sierrae" +} +item { + name: "28649" + id: 1372 + display_name: "Sonora semiannulata" +} +item { + name: "53226" + id: 1373 + display_name: "Boisea rubrolineata" +} +item { + name: "53227" + id: 1374 + display_name: "Boisea trivittata" +} +item { + name: "14593" + id: 1375 + display_name: "Pycnonotus cafer" +} +item { + name: "61428" + id: 1376 + display_name: "Arion subfuscus" +} +item { + name: "333822" + id: 1377 + display_name: "Anser cygnoides domesticus" +} +item { + name: "41641" + id: 1378 + display_name: "Ursus arctos" +} +item { + name: "56602" + id: 1379 + display_name: "Plebejus lupini" +} +item { + name: "55295" + id: 1380 + display_name: "Grapsus grapsus" +} +item { + name: "36181" + id: 1381 + display_name: "Sceloporus cyanogenys" +} +item { + name: "41708" + id: 1382 + display_name: "Phoca vitulina" +} +item { + name: "118788" + id: 1383 + display_name: "Desmia funeralis" +} +item { + name: "61445" + id: 1384 + display_name: "Acanthocephala terminalis" +} +item { + name: "30721" + id: 1385 + display_name: "Crotalus triseriatus" +} +item { + name: "180010" + id: 1386 + display_name: "Callospermophilus lateralis" +} +item { + name: "53875" + id: 1387 + display_name: "Ocypode quadrata" +} +item { + name: "18358" + id: 1388 + display_name: "Picus viridis" +} +item { + name: "143390" + id: 1389 + display_name: "Oxidus gracilis" +} +item { + name: "55785" + id: 1390 + display_name: "Ochlodes agricola" +} +item { + name: "4141" + id: 1391 + display_name: "Phoebastria nigripes" +} +item { + name: "20526" + id: 1392 + display_name: "Struthio camelus" +} +item { + name: "32093" + id: 1393 + display_name: "Boa constrictor" +} +item { + name: "4144" + id: 1394 + display_name: "Phoebastria immutabilis" +} +item { + name: "74442" + id: 1395 + display_name: "Hydrochoerus hydrochaeris" +} +item { + name: "61492" + id: 1396 + display_name: "Chrysopilus thoracicus" +} +item { + name: "61495" + id: 1397 + display_name: "Erythemis simplicicollis" +} +item { + name: "389177" + id: 1398 + display_name: "Eriophora pustulosa" +} +item { + name: "61503" + id: 1399 + display_name: "Ascalapha odorata" +} +item { + name: "118855" + id: 1400 + display_name: "Calosoma scrutator" +} +item { + name: "61513" + id: 1401 + display_name: "Adelges tsugae" +} +item { + name: "28749" + id: 1402 + display_name: "Salvadora grahamiae" +} +item { + name: "143440" + id: 1403 + display_name: "Ceratomia catalpae" +} +item { + name: "61523" + id: 1404 + display_name: "Helix pomatia" +} +item { + name: "4180" + id: 1405 + display_name: "Fulmarus glacialis" +} +item { + name: "143445" + id: 1406 + display_name: "Pachysphinx modesta" +} +item { + name: "233560" + id: 1407 + display_name: "Vespula squamosa" +} +item { + name: "126308" + id: 1408 + display_name: "Marpesia chiron" +} +item { + name: "61536" + id: 1409 + display_name: "Calopteryx virgo" +} +item { + name: "685" + id: 1410 + display_name: "Francolinus pondicerianus" +} +item { + name: "60774" + id: 1411 + display_name: "Psychomorpha epimenis" +} +item { + name: "135271" + id: 1412 + display_name: "Amphibolips confluenta" +} +item { + name: "69736" + id: 1413 + display_name: "Schistocerca americana" +} +item { + name: "69737" + id: 1414 + display_name: "Xylophanes tersa" +} +item { + name: "6141" + id: 1415 + display_name: "Cynanthus latirostris" +} +item { + name: "4205" + id: 1416 + display_name: "Podiceps nigricollis" +} +item { + name: "69743" + id: 1417 + display_name: "Wallengrenia otho" +} +item { + name: "4208" + id: 1418 + display_name: "Podiceps cristatus" +} +item { + name: "4209" + id: 1419 + display_name: "Podiceps auritus" +} +item { + name: "118901" + id: 1420 + display_name: "Hyles gallii" +} +item { + name: "17871" + id: 1421 + display_name: "Dendrocopos major" +} +item { + name: "143484" + id: 1422 + display_name: "Blepharomastix ranalis" +} +item { + name: "4224" + id: 1423 + display_name: "Podiceps grisegena" +} +item { + name: "200834" + id: 1424 + display_name: "Sphenodon punctatus" +} +item { + name: "179995" + id: 1425 + display_name: "Urocitellus beldingi" +} +item { + name: "322024" + id: 1426 + display_name: "Apatura ilia" +} +item { + name: "44396" + id: 1427 + display_name: "Peromyscus maniculatus" +} +item { + name: "4237" + id: 1428 + display_name: "Tachybaptus ruficollis" +} +item { + name: "118930" + id: 1429 + display_name: "Spodoptera ornithogalli" +} +item { + name: "118936" + id: 1430 + display_name: "Euplagia quadripunctaria" +} +item { + name: "4804" + id: 1431 + display_name: "Charadrius montanus" +} +item { + name: "127133" + id: 1432 + display_name: "Hyphantria cunea" +} +item { + name: "143518" + id: 1433 + display_name: "Prochoerodes lineola" +} +item { + name: "52592" + id: 1434 + display_name: "Pararge aegeria" +} +item { + name: "36149" + id: 1435 + display_name: "Sceloporus torquatus" +} +item { + name: "118951" + id: 1436 + display_name: "Pterophylla camellifolia" +} +item { + name: "4265" + id: 1437 + display_name: "Phalacrocorax auritus" +} +item { + name: "4270" + id: 1438 + display_name: "Phalacrocorax carbo" +} +item { + name: "446640" + id: 1439 + display_name: "Neomonachus schauinslandi" +} +item { + name: "118961" + id: 1440 + display_name: "Conocephalus brevipennis" +} +item { + name: "28850" + id: 1441 + display_name: "Regina septemvittata" +} +item { + name: "4277" + id: 1442 + display_name: "Phalacrocorax penicillatus" +} +item { + name: "4234" + id: 1443 + display_name: "Aechmophorus clarkii" +} +item { + name: "118967" + id: 1444 + display_name: "Psyllobora vigintimaculata" +} +item { + name: "118968" + id: 1445 + display_name: "Allograpta obliqua" +} +item { + name: "118970" + id: 1446 + display_name: "Bombus impatiens" +} +item { + name: "123594" + id: 1447 + display_name: "Anaxyrus americanus americanus" +} +item { + name: "69838" + id: 1448 + display_name: "Cyanea capillata" +} +item { + name: "69844" + id: 1449 + display_name: "Anthocharis midea" +} +item { + name: "48505" + id: 1450 + display_name: "Junonia coenia" +} +item { + name: "151769" + id: 1451 + display_name: "Diaphania hyalinata" +} +item { + name: "151770" + id: 1452 + display_name: "Peridea angulosa" +} +item { + name: "53467" + id: 1453 + display_name: "Leucauge venusta" +} +item { + name: "119013" + id: 1454 + display_name: "Ctenucha virginica" +} +item { + name: "4327" + id: 1455 + display_name: "Pelecanus onocrotalus" +} +item { + name: "143592" + id: 1456 + display_name: "Spragueia leo" +} +item { + name: "200938" + id: 1457 + display_name: "Diaethria anna" +} +item { + name: "4334" + id: 1458 + display_name: "Pelecanus erythrorhynchos" +} +item { + name: "151794" + id: 1459 + display_name: "Atta texana" +} +item { + name: "3454" + id: 1460 + display_name: "Zenaida macroura" +} +item { + name: "4872" + id: 1461 + display_name: "Vanellus miles" +} +item { + name: "4345" + id: 1462 + display_name: "Larus occidentalis" +} +item { + name: "143610" + id: 1463 + display_name: "Besma quercivoraria" +} +item { + name: "20733" + id: 1464 + display_name: "Trogon massena" +} +item { + name: "143615" + id: 1465 + display_name: "Udea rubigalis" +} +item { + name: "4352" + id: 1466 + display_name: "Larus thayeri" +} +item { + name: "4353" + id: 1467 + display_name: "Larus heermanni" +} +item { + name: "4354" + id: 1468 + display_name: "Larus livens" +} +item { + name: "4356" + id: 1469 + display_name: "Larus canus" +} +item { + name: "220826" + id: 1470 + display_name: "Habrosyne scripta" +} +item { + name: "4361" + id: 1471 + display_name: "Larus glaucoides" +} +item { + name: "4364" + id: 1472 + display_name: "Larus delawarensis" +} +item { + name: "102672" + id: 1473 + display_name: "Hetaerina titia" +} +item { + name: "20754" + id: 1474 + display_name: "Trogon collaris" +} +item { + name: "479512" + id: 1475 + display_name: "Acronicta fallax" +} +item { + name: "3460" + id: 1476 + display_name: "Zenaida asiatica" +} +item { + name: "119066" + id: 1477 + display_name: "Idia lubricalis" +} +item { + name: "119068" + id: 1478 + display_name: "Apodemia virgulti" +} +item { + name: "4381" + id: 1479 + display_name: "Larus fuscus" +} +item { + name: "4385" + id: 1480 + display_name: "Larus californicus" +} +item { + name: "69922" + id: 1481 + display_name: "Oncorhynchus nerka" +} +item { + name: "12580" + id: 1482 + display_name: "Prosthemadera novaeseelandiae" +} +item { + name: "69925" + id: 1483 + display_name: "Clinocardium nuttallii" +} +item { + name: "20781" + id: 1484 + display_name: "Trogon elegans" +} +item { + name: "4399" + id: 1485 + display_name: "Larus glaucescens" +} +item { + name: "94513" + id: 1486 + display_name: "Archilestes grandis" +} +item { + name: "119090" + id: 1487 + display_name: "Eremnophila aureonotata" +} +item { + name: "20787" + id: 1488 + display_name: "Trogon citreolus" +} +item { + name: "69940" + id: 1489 + display_name: "Hemiargus ceraunus" +} +item { + name: "61749" + id: 1490 + display_name: "Lucanus cervus" +} +item { + name: "4415" + id: 1491 + display_name: "Cepphus columba" +} +item { + name: "4832" + id: 1492 + display_name: "Himantopus leucocephalus" +} +item { + name: "4418" + id: 1493 + display_name: "Cepphus grylle" +} +item { + name: "12612" + id: 1494 + display_name: "Anthornis melanura" +} +item { + name: "125627" + id: 1495 + display_name: "Ellychnia corrusca" +} +item { + name: "201031" + id: 1496 + display_name: "Leptoptilos crumenifer" +} +item { + name: "201032" + id: 1497 + display_name: "Threskiornis moluccus" +} +item { + name: "60812" + id: 1498 + display_name: "Lucanus capreolus" +} +item { + name: "10295" + id: 1499 + display_name: "Thraupis episcopus" +} +item { + name: "209233" + id: 1500 + display_name: "Equus caballus" +} +item { + name: "119122" + id: 1501 + display_name: "Araneus trifolium" +} +item { + name: "201043" + id: 1502 + display_name: "Geranoaetus albicaudatus" +} +item { + name: "61781" + id: 1503 + display_name: "Ochlodes sylvanus" +} +item { + name: "49133" + id: 1504 + display_name: "Vanessa atalanta" +} +item { + name: "94556" + id: 1505 + display_name: "Argia lugens" +} +item { + name: "94557" + id: 1506 + display_name: "Argia moesta" +} +item { + name: "61524" + id: 1507 + display_name: "Forficula auricularia" +} +item { + name: "4449" + id: 1508 + display_name: "Sterna paradisaea" +} +item { + name: "4450" + id: 1509 + display_name: "Sterna hirundo" +} +item { + name: "348515" + id: 1510 + display_name: "Nyctemera annulata" +} +item { + name: "110625" + id: 1511 + display_name: "Progomphus obscurus" +} +item { + name: "94566" + id: 1512 + display_name: "Argia plana" +} +item { + name: "4457" + id: 1513 + display_name: "Sterna forsteri" +} +item { + name: "94571" + id: 1514 + display_name: "Argia sedula" +} +item { + name: "61804" + id: 1515 + display_name: "Olivella biplicata" +} +item { + name: "204532" + id: 1516 + display_name: "Lanius excubitor" +} +item { + name: "29038" + id: 1517 + display_name: "Pituophis deppei" +} +item { + name: "143728" + id: 1518 + display_name: "Choristoneura rosaceana" +} +item { + name: "94577" + id: 1519 + display_name: "Argia translata" +} +item { + name: "130451" + id: 1520 + display_name: "Dione juno" +} +item { + name: "29044" + id: 1521 + display_name: "Pituophis catenifer" +} +item { + name: "70005" + id: 1522 + display_name: "Ilyanassa obsoleta" +} +item { + name: "143734" + id: 1523 + display_name: "Eupithecia miserulata" +} +item { + name: "20856" + id: 1524 + display_name: "Pharomachrus mocinno" +} +item { + name: "29049" + id: 1525 + display_name: "Pituophis catenifer deserticola" +} +item { + name: "29052" + id: 1526 + display_name: "Pituophis catenifer affinis" +} +item { + name: "29053" + id: 1527 + display_name: "Pituophis catenifer annectens" +} +item { + name: "4478" + id: 1528 + display_name: "Sterna striata" +} +item { + name: "407459" + id: 1529 + display_name: "Dolomedes minor" +} +item { + name: "4489" + id: 1530 + display_name: "Stercorarius parasiticus" +} +item { + name: "4491" + id: 1531 + display_name: "Stercorarius pomarinus" +} +item { + name: "6969" + id: 1532 + display_name: "Anas gracilis" +} +item { + name: "4494" + id: 1533 + display_name: "Rissa tridactyla" +} +item { + name: "4496" + id: 1534 + display_name: "Rynchops niger" +} +item { + name: "4501" + id: 1535 + display_name: "Alca torda" +} +item { + name: "4504" + id: 1536 + display_name: "Fratercula arctica" +} +item { + name: "4509" + id: 1537 + display_name: "Fratercula cirrhata" +} +item { + name: "26693" + id: 1538 + display_name: "Scaphiopus hurterii" +} +item { + name: "94624" + id: 1539 + display_name: "Arigomphus submedianus" +} +item { + name: "94625" + id: 1540 + display_name: "Arigomphus villosipes" +} +item { + name: "120720" + id: 1541 + display_name: "Pseudacris sierra" +} +item { + name: "70057" + id: 1542 + display_name: "Agrilus planipennis" +} +item { + name: "127402" + id: 1543 + display_name: "Grammia virgo" +} +item { + name: "51271" + id: 1544 + display_name: "Trachemys scripta elegans" +} +item { + name: "12716" + id: 1545 + display_name: "Turdus merula" +} +item { + name: "12718" + id: 1546 + display_name: "Turdus plumbeus" +} +item { + name: "12720" + id: 1547 + display_name: "Turdus grayi" +} +item { + name: "63697" + id: 1548 + display_name: "Metacarcinus magister" +} +item { + name: "12727" + id: 1549 + display_name: "Turdus migratorius" +} +item { + name: "26698" + id: 1550 + display_name: "Spea multiplicata" +} +item { + name: "12735" + id: 1551 + display_name: "Turdus viscivorus" +} +item { + name: "26699" + id: 1552 + display_name: "Spea bombifrons" +} +item { + name: "127431" + id: 1553 + display_name: "Emmelina monodactyla" +} +item { + name: "4553" + id: 1554 + display_name: "Cerorhinca monocerata" +} +item { + name: "12748" + id: 1555 + display_name: "Turdus philomelos" +} +item { + name: "233933" + id: 1556 + display_name: "Zale horrida" +} +item { + name: "1468" + id: 1557 + display_name: "Galbula ruficauda" +} +item { + name: "111055" + id: 1558 + display_name: "Pseudoleon superbus" +} +item { + name: "61908" + id: 1559 + display_name: "Orgyia vetusta" +} +item { + name: "43086" + id: 1560 + display_name: "Procavia capensis" +} +item { + name: "143830" + id: 1561 + display_name: "Eumorpha vitis" +} +item { + name: "67663" + id: 1562 + display_name: "Leptysma marginicollis" +} +item { + name: "127457" + id: 1563 + display_name: "Idia americalis" +} +item { + name: "4578" + id: 1564 + display_name: "Jacana spinosa" +} +item { + name: "127460" + id: 1565 + display_name: "Idia aemula" +} +item { + name: "201192" + id: 1566 + display_name: "Saxicola rubicola" +} +item { + name: "20969" + id: 1567 + display_name: "Upupa epops" +} +item { + name: "94699" + id: 1568 + display_name: "Aspidoscelis marmorata" +} +item { + name: "10322" + id: 1569 + display_name: "Euphagus carolinus" +} +item { + name: "53743" + id: 1570 + display_name: "Uca pugilator" +} +item { + name: "61256" + id: 1571 + display_name: "Leptoglossus phyllopus" +} +item { + name: "29438" + id: 1572 + display_name: "Coluber flagellum piceus" +} +item { + name: "53750" + id: 1573 + display_name: "Lottia gigantea" +} +item { + name: "143865" + id: 1574 + display_name: "Odocoileus hemionus hemionus" +} +item { + name: "143867" + id: 1575 + display_name: "Protoboarmia porcelaria" +} +item { + name: "209405" + id: 1576 + display_name: "Cenopis reticulatana" +} +item { + name: "49920" + id: 1577 + display_name: "Nymphalis californica" +} +item { + name: "53762" + id: 1578 + display_name: "Scolopendra polymorpha" +} +item { + name: "127492" + id: 1579 + display_name: "Megalographa biloba" +} +item { + name: "62470" + id: 1580 + display_name: "Limax maximus" +} +item { + name: "4621" + id: 1581 + display_name: "Gavia pacifica" +} +item { + name: "14884" + id: 1582 + display_name: "Mimus gilvus" +} +item { + name: "29200" + id: 1583 + display_name: "Opheodrys aestivus" +} +item { + name: "201233" + id: 1584 + display_name: "Passer italiae" +} +item { + name: "4626" + id: 1585 + display_name: "Gavia immer" +} +item { + name: "4627" + id: 1586 + display_name: "Gavia stellata" +} +item { + name: "12822" + id: 1587 + display_name: "Oenanthe oenanthe" +} +item { + name: "4631" + id: 1588 + display_name: "Fregata magnificens" +} +item { + name: "4636" + id: 1589 + display_name: "Fregata minor" +} +item { + name: "70174" + id: 1590 + display_name: "Hypolimnas bolina" +} +item { + name: "4643" + id: 1591 + display_name: "Falco subbuteo" +} +item { + name: "4644" + id: 1592 + display_name: "Falco mexicanus" +} +item { + name: "4645" + id: 1593 + display_name: "Falco femoralis" +} +item { + name: "4647" + id: 1594 + display_name: "Falco peregrinus" +} +item { + name: "119340" + id: 1595 + display_name: "Amphipyra pyramidoides" +} +item { + name: "61997" + id: 1596 + display_name: "Steatoda grossa" +} +item { + name: "70191" + id: 1597 + display_name: "Ischnura ramburii" +} +item { + name: "53809" + id: 1598 + display_name: "Phidippus audax" +} +item { + name: "143213" + id: 1599 + display_name: "Frontinella communis" +} +item { + name: "4664" + id: 1600 + display_name: "Falco rufigularis" +} +item { + name: "4665" + id: 1601 + display_name: "Falco sparverius" +} +item { + name: "19893" + id: 1602 + display_name: "Strix varia" +} +item { + name: "4672" + id: 1603 + display_name: "Falco columbarius" +} +item { + name: "201281" + id: 1604 + display_name: "Phyllodesma americana" +} +item { + name: "201282" + id: 1605 + display_name: "Gallinula chloropus" +} +item { + name: "152131" + id: 1606 + display_name: "Bagrada hilaris" +} +item { + name: "145276" + id: 1607 + display_name: "Cardellina pusilla" +} +item { + name: "12878" + id: 1608 + display_name: "Catharus ustulatus" +} +item { + name: "4690" + id: 1609 + display_name: "Falco novaeseelandiae" +} +item { + name: "53843" + id: 1610 + display_name: "Brephidium exilis" +} +item { + name: "36281" + id: 1611 + display_name: "Sceloporus clarkii" +} +item { + name: "12890" + id: 1612 + display_name: "Catharus guttatus" +} +item { + name: "62045" + id: 1613 + display_name: "Lygaeus kalmii" +} +item { + name: "47075" + id: 1614 + display_name: "Dasypus novemcinctus" +} +item { + name: "12901" + id: 1615 + display_name: "Catharus fuscescens" +} +item { + name: "4714" + id: 1616 + display_name: "Caracara cheriway" +} +item { + name: "53867" + id: 1617 + display_name: "Erythemis plebeja" +} +item { + name: "62060" + id: 1618 + display_name: "Palomena prasina" +} +item { + name: "53869" + id: 1619 + display_name: "Ocypus olens" +} +item { + name: "4719" + id: 1620 + display_name: "Herpetotheres cachinnans" +} +item { + name: "116840" + id: 1621 + display_name: "Calcarius lapponicus" +} +item { + name: "4726" + id: 1622 + display_name: "Milvago chimachima" +} +item { + name: "29304" + id: 1623 + display_name: "Nerodia taxispilota" +} +item { + name: "29305" + id: 1624 + display_name: "Nerodia sipedon" +} +item { + name: "29306" + id: 1625 + display_name: "Nerodia sipedon sipedon" +} +item { + name: "142783" + id: 1626 + display_name: "Myodocha serripes" +} +item { + name: "4733" + id: 1627 + display_name: "Ciconia ciconia" +} +item { + name: "29310" + id: 1628 + display_name: "Nerodia rhombifer" +} +item { + name: "201343" + id: 1629 + display_name: "Lithacodes fasciola" +} +item { + name: "21121" + id: 1630 + display_name: "Dendrobates auratus" +} +item { + name: "127618" + id: 1631 + display_name: "Epirrhoe alternata" +} +item { + name: "43115" + id: 1632 + display_name: "Sylvilagus audubonii" +} +item { + name: "29317" + id: 1633 + display_name: "Nerodia fasciata" +} +item { + name: "4742" + id: 1634 + display_name: "Mycteria americana" +} +item { + name: "53895" + id: 1635 + display_name: "Stenopelmatus fuscus" +} +item { + name: "4744" + id: 1636 + display_name: "Mycteria ibis" +} +item { + name: "12937" + id: 1637 + display_name: "Sialia mexicana" +} +item { + name: "29322" + id: 1638 + display_name: "Nerodia fasciata confluens" +} +item { + name: "29324" + id: 1639 + display_name: "Nerodia clarkii clarkii" +} +item { + name: "29327" + id: 1640 + display_name: "Nerodia cyclopion" +} +item { + name: "29328" + id: 1641 + display_name: "Nerodia erythrogaster" +} +item { + name: "53905" + id: 1642 + display_name: "Mantis religiosa" +} +item { + name: "4754" + id: 1643 + display_name: "Ephippiorhynchus senegalensis" +} +item { + name: "127635" + id: 1644 + display_name: "Plecia nearctica" +} +item { + name: "4756" + id: 1645 + display_name: "Cathartes aura" +} +item { + name: "29334" + id: 1646 + display_name: "Nerodia erythrogaster flavigaster" +} +item { + name: "12951" + id: 1647 + display_name: "Myadestes townsendi" +} +item { + name: "4761" + id: 1648 + display_name: "Cathartes burrovianus" +} +item { + name: "4763" + id: 1649 + display_name: "Sarcoramphus papa" +} +item { + name: "4765" + id: 1650 + display_name: "Coragyps atratus" +} +item { + name: "19890" + id: 1651 + display_name: "Strix nebulosa" +} +item { + name: "26736" + id: 1652 + display_name: "Ambystoma opacum" +} +item { + name: "66331" + id: 1653 + display_name: "Pelophylax perezi" +} +item { + name: "4776" + id: 1654 + display_name: "Anastomus lamelligerus" +} +item { + name: "4892" + id: 1655 + display_name: "Pluvialis squatarola" +} +item { + name: "4778" + id: 1656 + display_name: "Gymnogyps californianus" +} +item { + name: "12971" + id: 1657 + display_name: "Muscicapa striata" +} +item { + name: "56776" + id: 1658 + display_name: "Glaucopsyche lygdamus" +} +item { + name: "127669" + id: 1659 + display_name: "Jadera haematoloma" +} +item { + name: "4793" + id: 1660 + display_name: "Charadrius vociferus" +} +item { + name: "209594" + id: 1661 + display_name: "Scantius aegyptius" +} +item { + name: "4795" + id: 1662 + display_name: "Charadrius wilsonia" +} +item { + name: "48586" + id: 1663 + display_name: "Cepaea nemoralis" +} +item { + name: "4798" + id: 1664 + display_name: "Charadrius melodus" +} +item { + name: "12992" + id: 1665 + display_name: "Phoenicurus phoenicurus" +} +item { + name: "45763" + id: 1666 + display_name: "Ondatra zibethicus" +} +item { + name: "119492" + id: 1667 + display_name: "Smerinthus cerisyi" +} +item { + name: "13000" + id: 1668 + display_name: "Phoenicurus ochruros" +} +item { + name: "4811" + id: 1669 + display_name: "Charadrius dubius" +} +item { + name: "64973" + id: 1670 + display_name: "Anaxyrus cognatus" +} +item { + name: "2168" + id: 1671 + display_name: "Eumomota superciliosa" +} +item { + name: "6980" + id: 1672 + display_name: "Anas querquedula" +} +item { + name: "64975" + id: 1673 + display_name: "Anaxyrus debilis" +} +item { + name: "43130" + id: 1674 + display_name: "Lepus californicus" +} +item { + name: "67707" + id: 1675 + display_name: "Argiope aurantia" +} +item { + name: "4836" + id: 1676 + display_name: "Himantopus mexicanus" +} +item { + name: "4838" + id: 1677 + display_name: "Haematopus bachmani" +} +item { + name: "43132" + id: 1678 + display_name: "Lepus americanus" +} +item { + name: "144106" + id: 1679 + display_name: "Pica pica" +} +item { + name: "4843" + id: 1680 + display_name: "Haematopus ostralegus" +} +item { + name: "67709" + id: 1681 + display_name: "Antrodiaetus riversi" +} +item { + name: "4848" + id: 1682 + display_name: "Haematopus unicolor" +} +item { + name: "4857" + id: 1683 + display_name: "Vanellus vanellus" +} +item { + name: "29435" + id: 1684 + display_name: "Coluber flagellum testaceus" +} +item { + name: "119550" + id: 1685 + display_name: "Feltia jaculifera" +} +item { + name: "4866" + id: 1686 + display_name: "Vanellus spinosus" +} +item { + name: "4870" + id: 1687 + display_name: "Vanellus armatus" +} +item { + name: "54024" + id: 1688 + display_name: "Satyrium californica" +} +item { + name: "13071" + id: 1689 + display_name: "Luscinia svecica" +} +item { + name: "3544" + id: 1690 + display_name: "Columbina inca" +} +item { + name: "4883" + id: 1691 + display_name: "Recurvirostra avosetta" +} +item { + name: "204701" + id: 1692 + display_name: "Melanchra adjuncta" +} +item { + name: "56083" + id: 1693 + display_name: "Armadillidium vulgare" +} +item { + name: "981" + id: 1694 + display_name: "Phasianus colchicus" +} +item { + name: "4893" + id: 1695 + display_name: "Pluvialis dominica" +} +item { + name: "103200" + id: 1696 + display_name: "Hypsiglena jani" +} +item { + name: "127777" + id: 1697 + display_name: "Vespula vulgaris" +} +item { + name: "7643" + id: 1698 + display_name: "Cinclus mexicanus" +} +item { + name: "13094" + id: 1699 + display_name: "Erithacus rubecula" +} +item { + name: "41777" + id: 1700 + display_name: "Lontra canadensis" +} +item { + name: "64988" + id: 1701 + display_name: "Anaxyrus terrestris" +} +item { + name: "18167" + id: 1702 + display_name: "Melanerpes aurifrons" +} +item { + name: "54064" + id: 1703 + display_name: "Polygonia comma" +} +item { + name: "209713" + id: 1704 + display_name: "Phigalia titea" +} +item { + name: "54068" + id: 1705 + display_name: "Boloria selene" +} +item { + name: "104585" + id: 1706 + display_name: "Libellula semifasciata" +} +item { + name: "119608" + id: 1707 + display_name: "Theba pisana" +} +item { + name: "4801" + id: 1708 + display_name: "Charadrius hiaticula" +} +item { + name: "104586" + id: 1709 + display_name: "Libellula vibrans" +} +item { + name: "4935" + id: 1710 + display_name: "Egretta gularis" +} +item { + name: "4937" + id: 1711 + display_name: "Egretta caerulea" +} +item { + name: "4938" + id: 1712 + display_name: "Egretta tricolor" +} +item { + name: "4940" + id: 1713 + display_name: "Egretta thula" +} +item { + name: "340813" + id: 1714 + display_name: "Hyalymenus tarsatus" +} +item { + name: "4943" + id: 1715 + display_name: "Egretta garzetta" +} +item { + name: "4947" + id: 1716 + display_name: "Egretta sacra" +} +item { + name: "13141" + id: 1717 + display_name: "Monticola solitarius" +} +item { + name: "4952" + id: 1718 + display_name: "Ardea cocoi" +} +item { + name: "4954" + id: 1719 + display_name: "Ardea cinerea" +} +item { + name: "67727" + id: 1720 + display_name: "Aeshna umbrosa" +} +item { + name: "4956" + id: 1721 + display_name: "Ardea herodias" +} +item { + name: "144223" + id: 1722 + display_name: "Chlosyne theona" +} +item { + name: "201568" + id: 1723 + display_name: "Diabrotica undecimpunctata undecimpunctata" +} +item { + name: "47383" + id: 1724 + display_name: "Latrodectus geometricus" +} +item { + name: "119664" + id: 1725 + display_name: "Cacyreus marshalli" +} +item { + name: "62321" + id: 1726 + display_name: "Rutpela maculata" +} +item { + name: "217970" + id: 1727 + display_name: "Cyclophora pendulinaria" +} +item { + name: "4981" + id: 1728 + display_name: "Nycticorax nycticorax" +} +item { + name: "12714" + id: 1729 + display_name: "Turdus rufopalliatus" +} +item { + name: "4994" + id: 1730 + display_name: "Ardeola ralloides" +} +item { + name: "4999" + id: 1731 + display_name: "Nyctanassa violacea" +} +item { + name: "37769" + id: 1732 + display_name: "Plestiodon skiltonianus" +} +item { + name: "213826" + id: 1733 + display_name: "Apamea amputatrix" +} +item { + name: "67736" + id: 1734 + display_name: "Rhionaeschna californica" +} +item { + name: "155380" + id: 1735 + display_name: "Andricus crystallinus" +} +item { + name: "144280" + id: 1736 + display_name: "Aramides cajaneus" +} +item { + name: "5017" + id: 1737 + display_name: "Bubulcus ibis" +} +item { + name: "5020" + id: 1738 + display_name: "Butorides virescens" +} +item { + name: "144285" + id: 1739 + display_name: "Porphyrio martinicus" +} +item { + name: "81729" + id: 1740 + display_name: "Feniseca tarquinius" +} +item { + name: "127905" + id: 1741 + display_name: "Bombus ternarius" +} +item { + name: "5034" + id: 1742 + display_name: "Botaurus lentiginosus" +} +item { + name: "29330" + id: 1743 + display_name: "Nerodia erythrogaster transversa" +} +item { + name: "5036" + id: 1744 + display_name: "Cochlearius cochlearius" +} +item { + name: "46001" + id: 1745 + display_name: "Sciurus vulgaris" +} +item { + name: "46005" + id: 1746 + display_name: "Sciurus variegatoides" +} +item { + name: "127928" + id: 1747 + display_name: "Autochton cellus" +} +item { + name: "340923" + id: 1748 + display_name: "Scolypopa australis" +} +item { + name: "46017" + id: 1749 + display_name: "Sciurus carolinensis" +} +item { + name: "46018" + id: 1750 + display_name: "Sciurus aberti" +} +item { + name: "447427" + id: 1751 + display_name: "Neverita lewisii" +} +item { + name: "46020" + id: 1752 + display_name: "Sciurus niger" +} +item { + name: "5061" + id: 1753 + display_name: "Anhinga novaehollandiae" +} +item { + name: "46023" + id: 1754 + display_name: "Sciurus griseus" +} +item { + name: "122375" + id: 1755 + display_name: "Carterocephalus palaemon" +} +item { + name: "5066" + id: 1756 + display_name: "Anhinga rufa" +} +item { + name: "145289" + id: 1757 + display_name: "Melozone fusca" +} +item { + name: "5074" + id: 1758 + display_name: "Aquila chrysaetos" +} +item { + name: "49998" + id: 1759 + display_name: "Thamnophis sirtalis infernalis" +} +item { + name: "13270" + id: 1760 + display_name: "Hylocichla mustelina" +} +item { + name: "62423" + id: 1761 + display_name: "Cimbex americana" +} +item { + name: "62424" + id: 1762 + display_name: "Sitochroa palealis" +} +item { + name: "111578" + id: 1763 + display_name: "Regina grahamii" +} +item { + name: "144207" + id: 1764 + display_name: "Aphelocoma wollweberi" +} +item { + name: "62429" + id: 1765 + display_name: "Pyronia tithonus" +} +item { + name: "47934" + id: 1766 + display_name: "Libellula luctuosa" +} +item { + name: "50000" + id: 1767 + display_name: "Clemmys guttata" +} +item { + name: "5097" + id: 1768 + display_name: "Accipiter striatus" +} +item { + name: "119789" + id: 1769 + display_name: "Cisseps fulvicollis" +} +item { + name: "5106" + id: 1770 + display_name: "Accipiter nisus" +} +item { + name: "5108" + id: 1771 + display_name: "Accipiter gentilis" +} +item { + name: "62456" + id: 1772 + display_name: "Rhagonycha fulva" +} +item { + name: "4948" + id: 1773 + display_name: "Egretta rufescens" +} +item { + name: "46082" + id: 1774 + display_name: "Marmota marmota" +} +item { + name: "6990" + id: 1775 + display_name: "Bucephala clangula" +} +item { + name: "4535" + id: 1776 + display_name: "Anous stolidus" +} +item { + name: "46087" + id: 1777 + display_name: "Marmota caligata" +} +item { + name: "72458" + id: 1778 + display_name: "Actitis macularius" +} +item { + name: "4951" + id: 1779 + display_name: "Ardea purpurea" +} +item { + name: "128012" + id: 1780 + display_name: "Eumorpha fasciatus" +} +item { + name: "472078" + id: 1781 + display_name: "Todiramphus chloris" +} +item { + name: "46095" + id: 1782 + display_name: "Marmota monax" +} +item { + name: "34" + id: 1783 + display_name: "Grus americana" +} +item { + name: "4835" + id: 1784 + display_name: "Himantopus himantopus" +} +item { + name: "122374" + id: 1785 + display_name: "Eurema mexicana" +} +item { + name: "19812" + id: 1786 + display_name: "Glaucidium gnoma" +} +item { + name: "73823" + id: 1787 + display_name: "Hierophis viridiflavus" +} +item { + name: "5168" + id: 1788 + display_name: "Circus approximans" +} +item { + name: "143110" + id: 1789 + display_name: "Hypagyrtis unipunctata" +} +item { + name: "65976" + id: 1790 + display_name: "Lithobates blairi" +} +item { + name: "5173" + id: 1791 + display_name: "Circus aeruginosus" +} +item { + name: "54327" + id: 1792 + display_name: "Vespa crabro" +} +item { + name: "4273" + id: 1793 + display_name: "Phalacrocorax sulcirostris" +} +item { + name: "5180" + id: 1794 + display_name: "Buteo albonotatus" +} +item { + name: "103485" + id: 1795 + display_name: "Ischnura denticollis" +} +item { + name: "62528" + id: 1796 + display_name: "Butorides striata" +} +item { + name: "62529" + id: 1797 + display_name: "Platalea ajaja" +} +item { + name: "5186" + id: 1798 + display_name: "Buteo brachyurus" +} +item { + name: "103494" + id: 1799 + display_name: "Ischnura hastata" +} +item { + name: "144455" + id: 1800 + display_name: "Ardea alba" +} +item { + name: "103497" + id: 1801 + display_name: "Ischnura perparva" +} +item { + name: "103498" + id: 1802 + display_name: "Ischnura posita" +} +item { + name: "5196" + id: 1803 + display_name: "Buteo swainsoni" +} +item { + name: "128079" + id: 1804 + display_name: "Grammia ornata" +} +item { + name: "29777" + id: 1805 + display_name: "Lampropeltis triangulum" +} +item { + name: "867" + id: 1806 + display_name: "Alectoris rufa" +} +item { + name: "5206" + id: 1807 + display_name: "Buteo lineatus" +} +item { + name: "29783" + id: 1808 + display_name: "Lampropeltis triangulum triangulum" +} +item { + name: "122383" + id: 1809 + display_name: "Plebejus melissa" +} +item { + name: "5212" + id: 1810 + display_name: "Buteo jamaicensis" +} +item { + name: "81495" + id: 1811 + display_name: "Libellula pulchella" +} +item { + name: "35003" + id: 1812 + display_name: "Heloderma suspectum" +} +item { + name: "46180" + id: 1813 + display_name: "Cynomys gunnisoni" +} +item { + name: "144485" + id: 1814 + display_name: "Charadrius nivosus" +} +item { + name: "144490" + id: 1815 + display_name: "Tringa incana" +} +item { + name: "144491" + id: 1816 + display_name: "Tringa semipalmata" +} +item { + name: "25185" + id: 1817 + display_name: "Hypopachus variolosus" +} +item { + name: "5231" + id: 1818 + display_name: "Terathopius ecaudatus" +} +item { + name: "144496" + id: 1819 + display_name: "Gallinago delicata" +} +item { + name: "5233" + id: 1820 + display_name: "Buteogallus anthracinus" +} +item { + name: "211035" + id: 1821 + display_name: "Speranza pustularia" +} +item { + name: "29813" + id: 1822 + display_name: "Lampropeltis getula" +} +item { + name: "144502" + id: 1823 + display_name: "Chroicocephalus philadelphia" +} +item { + name: "5242" + id: 1824 + display_name: "Circaetus gallicus" +} +item { + name: "144507" + id: 1825 + display_name: "Chroicocephalus novaehollandiae" +} +item { + name: "144510" + id: 1826 + display_name: "Chroicocephalus ridibundus" +} +item { + name: "52757" + id: 1827 + display_name: "Polistes fuscatus" +} +item { + name: "144514" + id: 1828 + display_name: "Leucophaeus atricilla" +} +item { + name: "144515" + id: 1829 + display_name: "Leucophaeus pipixcan" +} +item { + name: "46217" + id: 1830 + display_name: "Tamias striatus" +} +item { + name: "144525" + id: 1831 + display_name: "Onychoprion fuscatus" +} +item { + name: "46222" + id: 1832 + display_name: "Tamias minimus" +} +item { + name: "144530" + id: 1833 + display_name: "Sternula antillarum" +} +item { + name: "46230" + id: 1834 + display_name: "Tamias merriami" +} +item { + name: "144537" + id: 1835 + display_name: "Hydroprogne caspia" +} +item { + name: "144539" + id: 1836 + display_name: "Thalasseus maximus" +} +item { + name: "144540" + id: 1837 + display_name: "Thalasseus bergii" +} +item { + name: "5277" + id: 1838 + display_name: "Elanus leucurus" +} +item { + name: "324766" + id: 1839 + display_name: "Epicallima argenticinctella" +} +item { + name: "72486" + id: 1840 + display_name: "Alopochen aegyptiaca" +} +item { + name: "62229" + id: 1841 + display_name: "Ischnura cervula" +} +item { + name: "144550" + id: 1842 + display_name: "Streptopelia senegalensis" +} +item { + name: "46256" + id: 1843 + display_name: "Ammospermophilus harrisii" +} +item { + name: "94559" + id: 1844 + display_name: "Argia nahuana" +} +item { + name: "46259" + id: 1845 + display_name: "Tamiasciurus douglasii" +} +item { + name: "46260" + id: 1846 + display_name: "Tamiasciurus hudsonicus" +} +item { + name: "119989" + id: 1847 + display_name: "Stagmomantis carolina" +} +item { + name: "13494" + id: 1848 + display_name: "Gerygone igata" +} +item { + name: "5305" + id: 1849 + display_name: "Haliaeetus leucocephalus" +} +item { + name: "7596" + id: 1850 + display_name: "Cistothorus platensis" +} +item { + name: "5308" + id: 1851 + display_name: "Haliaeetus vocifer" +} +item { + name: "218301" + id: 1852 + display_name: "Diacme elealis" +} +item { + name: "95422" + id: 1853 + display_name: "Basiaeschna janata" +} +item { + name: "46272" + id: 1854 + display_name: "Glaucomys volans" +} +item { + name: "120010" + id: 1855 + display_name: "Polistes metricus" +} +item { + name: "144594" + id: 1856 + display_name: "Bubo scandiacus" +} +item { + name: "52771" + id: 1857 + display_name: "Gonepteryx rhamni" +} +item { + name: "144597" + id: 1858 + display_name: "Ciccaba virgata" +} +item { + name: "890" + id: 1859 + display_name: "Bonasa umbellus" +} +item { + name: "52773" + id: 1860 + display_name: "Poanes zabulon" +} +item { + name: "120033" + id: 1861 + display_name: "Lapara bombycoides" +} +item { + name: "5346" + id: 1862 + display_name: "Busarellus nigricollis" +} +item { + name: "5349" + id: 1863 + display_name: "Rostrhamus sociabilis" +} +item { + name: "36391" + id: 1864 + display_name: "Anolis equestris" +} +item { + name: "46316" + id: 1865 + display_name: "Trichechus manatus" +} +item { + name: "5267" + id: 1866 + display_name: "Milvus milvus" +} +item { + name: "128241" + id: 1867 + display_name: "Darapsa choerilus" +} +item { + name: "128242" + id: 1868 + display_name: "Palthis angulalis" +} +item { + name: "5366" + id: 1869 + display_name: "Gyps fulvus" +} +item { + name: "204512" + id: 1870 + display_name: "Ficedula hypoleuca" +} +item { + name: "54526" + id: 1871 + display_name: "Crassadoma gigantea" +} +item { + name: "144642" + id: 1872 + display_name: "Momotus coeruliceps" +} +item { + name: "120070" + id: 1873 + display_name: "Strongylocentrotus droebachiensis" +} +item { + name: "54538" + id: 1874 + display_name: "Syngnathus leptorhynchus" +} +item { + name: "81746" + id: 1875 + display_name: "Necrophila americana" +} +item { + name: "300301" + id: 1876 + display_name: "Pseudomyrmex gracilis" +} +item { + name: "202003" + id: 1877 + display_name: "Apiomerus spissipes" +} +item { + name: "41860" + id: 1878 + display_name: "Enhydra lutris" +} +item { + name: "4817" + id: 1879 + display_name: "Charadrius semipalmatus" +} +item { + name: "36145" + id: 1880 + display_name: "Sceloporus variabilis" +} +item { + name: "202012" + id: 1881 + display_name: "Steatoda capensis" +} +item { + name: "62749" + id: 1882 + display_name: "Iphiclides podalirius" +} +item { + name: "5406" + id: 1883 + display_name: "Haliastur indus" +} +item { + name: "62751" + id: 1884 + display_name: "Andricus kingi" +} +item { + name: "5363" + id: 1885 + display_name: "Gyps africanus" +} +item { + name: "5416" + id: 1886 + display_name: "Ictinia mississippiensis" +} +item { + name: "62766" + id: 1887 + display_name: "Issoria lathonia" +} +item { + name: "62768" + id: 1888 + display_name: "Scolia dubia" +} +item { + name: "126206" + id: 1889 + display_name: "Dissosteira carolina" +} +item { + name: "269875" + id: 1890 + display_name: "Mallodon dasystomus" +} +item { + name: "155030" + id: 1891 + display_name: "Limenitis reducta" +} +item { + name: "62345" + id: 1892 + display_name: "Duttaphrynus melanostictus" +} +item { + name: "52519" + id: 1893 + display_name: "Aeshna cyanea" +} +item { + name: "10001" + id: 1894 + display_name: "Dives dives" +} +item { + name: "460365" + id: 1895 + display_name: "Tegula funebralis" +} +item { + name: "13631" + id: 1896 + display_name: "Baeolophus atricristatus" +} +item { + name: "13632" + id: 1897 + display_name: "Baeolophus bicolor" +} +item { + name: "13633" + id: 1898 + display_name: "Baeolophus inornatus" +} +item { + name: "9100" + id: 1899 + display_name: "Melospiza melodia" +} +item { + name: "62796" + id: 1900 + display_name: "Crotaphytus bicinctores" +} +item { + name: "62797" + id: 1901 + display_name: "Gambelia wislizenii" +} +item { + name: "46009" + id: 1902 + display_name: "Sciurus aureogaster" +} +item { + name: "112867" + id: 1903 + display_name: "Sparisoma viride" +} +item { + name: "70997" + id: 1904 + display_name: "Pelecinus polyturator" +} +item { + name: "62806" + id: 1905 + display_name: "Mytilus californianus" +} +item { + name: "120156" + id: 1906 + display_name: "Musca domestica" +} +item { + name: "136548" + id: 1907 + display_name: "Euclea delphinii" +} +item { + name: "50065" + id: 1908 + display_name: "Danaus eresimus" +} +item { + name: "43239" + id: 1909 + display_name: "Tachyglossus aculeatus" +} +item { + name: "145303" + id: 1910 + display_name: "Spinus spinus" +} +item { + name: "120183" + id: 1911 + display_name: "Araneus marmoreus" +} +item { + name: "71032" + id: 1912 + display_name: "Crotalus scutulatus scutulatus" +} +item { + name: "71034" + id: 1913 + display_name: "Tenodera sinensis" +} +item { + name: "143121" + id: 1914 + display_name: "Ochropleura implecta" +} +item { + name: "13695" + id: 1915 + display_name: "Motacilla alba" +} +item { + name: "7458" + id: 1916 + display_name: "Certhia americana" +} +item { + name: "38293" + id: 1917 + display_name: "Lampropholis delicata" +} +item { + name: "144281" + id: 1918 + display_name: "Bucorvus leadbeateri" +} +item { + name: "120217" + id: 1919 + display_name: "Halysidota tessellaris" +} +item { + name: "226718" + id: 1920 + display_name: "Otiorhynchus sulcatus" +} +item { + name: "464287" + id: 1921 + display_name: "Anteaeolidiella oliviae" +} +item { + name: "226720" + id: 1922 + display_name: "Oxychilus draparnaudi" +} +item { + name: "13729" + id: 1923 + display_name: "Anthus pratensis" +} +item { + name: "13732" + id: 1924 + display_name: "Anthus rubescens" +} +item { + name: "11930" + id: 1925 + display_name: "Tachycineta albilinea" +} +item { + name: "71085" + id: 1926 + display_name: "Varanus niloticus" +} +item { + name: "144814" + id: 1927 + display_name: "Poecile carolinensis" +} +item { + name: "144815" + id: 1928 + display_name: "Poecile atricapillus" +} +item { + name: "144816" + id: 1929 + display_name: "Poecile gambeli" +} +item { + name: "144820" + id: 1930 + display_name: "Poecile rufescens" +} +item { + name: "144823" + id: 1931 + display_name: "Periparus ater" +} +item { + name: "10485" + id: 1932 + display_name: "Chlorophanes spiza" +} +item { + name: "40523" + id: 1933 + display_name: "Lasiurus cinereus" +} +item { + name: "47719" + id: 1934 + display_name: "Datana ministra" +} +item { + name: "13770" + id: 1935 + display_name: "Estrilda astrild" +} +item { + name: "144849" + id: 1936 + display_name: "Cyanistes caeruleus" +} +item { + name: "218587" + id: 1937 + display_name: "Discus rotundatus" +} +item { + name: "47105" + id: 1938 + display_name: "Tamandua mexicana" +} +item { + name: "18463" + id: 1939 + display_name: "Sphyrapicus varius" +} +item { + name: "11858" + id: 1940 + display_name: "Petrochelidon pyrrhonota" +} +item { + name: "144882" + id: 1941 + display_name: "Troglodytes pacificus" +} +item { + name: "144883" + id: 1942 + display_name: "Troglodytes hiemalis" +} +item { + name: "153076" + id: 1943 + display_name: "Nephelodes minians" +} +item { + name: "62978" + id: 1944 + display_name: "Chlosyne nycteis" +} +item { + name: "128517" + id: 1945 + display_name: "Catocala ilia" +} +item { + name: "153102" + id: 1946 + display_name: "Dysphania militaris" +} +item { + name: "59651" + id: 1947 + display_name: "Aquarius remigis" +} +item { + name: "13851" + id: 1948 + display_name: "Passer montanus" +} +item { + name: "13858" + id: 1949 + display_name: "Passer domesticus" +} +item { + name: "39742" + id: 1950 + display_name: "Kinosternon flavescens" +} +item { + name: "506118" + id: 1951 + display_name: "Aphelocoma californica" +} +item { + name: "5672" + id: 1952 + display_name: "Amazilia yucatanensis" +} +item { + name: "5676" + id: 1953 + display_name: "Amazilia tzacatl" +} +item { + name: "204503" + id: 1954 + display_name: "Dicrurus adsimilis" +} +item { + name: "52785" + id: 1955 + display_name: "Megachile sculpturalis" +} +item { + name: "126905" + id: 1956 + display_name: "Harrisina americana" +} +item { + name: "55773" + id: 1957 + display_name: "Promachus hinei" +} +item { + name: "84752" + id: 1958 + display_name: "Microcentrum rhombifolium" +} +item { + name: "5698" + id: 1959 + display_name: "Amazilia violiceps" +} +item { + name: "145539" + id: 1960 + display_name: "Ovis canadensis nelsoni" +} +item { + name: "104004" + id: 1961 + display_name: "Lampropeltis splendida" +} +item { + name: "13893" + id: 1962 + display_name: "Lonchura punctulata" +} +item { + name: "63048" + id: 1963 + display_name: "Nuttallina californica" +} +item { + name: "226901" + id: 1964 + display_name: "Panopoda rufimargo" +} +item { + name: "194134" + id: 1965 + display_name: "Anthanassa tulcis" +} +item { + name: "5049" + id: 1966 + display_name: "Tigrisoma mexicanum" +} +item { + name: "407130" + id: 1967 + display_name: "Porphyrio melanotus melanotus" +} +item { + name: "226910" + id: 1968 + display_name: "Panthea furcilla" +} +item { + name: "130661" + id: 1969 + display_name: "Catasticta nimbice" +} +item { + name: "120215" + id: 1970 + display_name: "Bombus griseocollis" +} +item { + name: "144220" + id: 1971 + display_name: "Melanitta americana" +} +item { + name: "9148" + id: 1972 + display_name: "Spizella pallida" +} +item { + name: "320610" + id: 1973 + display_name: "Sceloporus magister" +} +item { + name: "54900" + id: 1974 + display_name: "Papilio polyxenes asterius" +} +item { + name: "36080" + id: 1975 + display_name: "Callisaurus draconoides" +} +item { + name: "5758" + id: 1976 + display_name: "Amazilia rutila" +} +item { + name: "3465" + id: 1977 + display_name: "Zenaida aurita" +} +item { + name: "116461" + id: 1978 + display_name: "Anolis sagrei" +} +item { + name: "61295" + id: 1979 + display_name: "Aporia crataegi" +} +item { + name: "131673" + id: 1980 + display_name: "Tetracis cachexiata" +} +item { + name: "63113" + id: 1981 + display_name: "Blarina brevicauda" +} +item { + name: "26904" + id: 1982 + display_name: "Coronella austriaca" +} +item { + name: "94575" + id: 1983 + display_name: "Argia tibialis" +} +item { + name: "237166" + id: 1984 + display_name: "Lycaena phlaeas hypophlaeas" +} +item { + name: "129305" + id: 1985 + display_name: "Melanoplus bivittatus" +} +item { + name: "63128" + id: 1986 + display_name: "Speyeria atlantis" +} +item { + name: "113514" + id: 1987 + display_name: "Sympetrum internum" +} +item { + name: "48757" + id: 1988 + display_name: "Echinothrix calamaris" +} +item { + name: "128670" + id: 1989 + display_name: "Bombus vagans" +} +item { + name: "13988" + id: 1990 + display_name: "Prunella modularis" +} +item { + name: "54951" + id: 1991 + display_name: "Anartia fatima" +} +item { + name: "54952" + id: 1992 + display_name: "Cardisoma guanhumi" +} +item { + name: "325295" + id: 1993 + display_name: "Cydalima perspectalis" +} +item { + name: "63160" + id: 1994 + display_name: "Celithemis elisa" +} +item { + name: "210615" + id: 1995 + display_name: "Pyrausta volupialis" +} +item { + name: "472766" + id: 1996 + display_name: "Falco tinnunculus" +} +item { + name: "29927" + id: 1997 + display_name: "Heterodon nasicus" +} +item { + name: "145088" + id: 1998 + display_name: "Ixoreus naevius" +} +item { + name: "6432" + id: 1999 + display_name: "Archilochus colubris" +} +item { + name: "5827" + id: 2000 + display_name: "Lampornis clemenciae" +} +item { + name: "15990" + id: 2001 + display_name: "Myiarchus tuberculifer" +} +item { + name: "128712" + id: 2002 + display_name: "Coccinella californica" +} +item { + name: "67559" + id: 2003 + display_name: "Adelpha eulalia" +} +item { + name: "128719" + id: 2004 + display_name: "Echinometra mathaei" +} +item { + name: "10247" + id: 2005 + display_name: "Setophaga ruticilla" +} +item { + name: "202451" + id: 2006 + display_name: "Copaeodes minima" +} +item { + name: "95958" + id: 2007 + display_name: "Boyeria vinosa" +} +item { + name: "16016" + id: 2008 + display_name: "Myiarchus tyrannulus" +} +item { + name: "36202" + id: 2009 + display_name: "Sceloporus olivaceus" +} +item { + name: "95982" + id: 2010 + display_name: "Brachymesia furcata" +} +item { + name: "126589" + id: 2011 + display_name: "Calycopis isobeon" +} +item { + name: "120578" + id: 2012 + display_name: "Micrathena sagittata" +} +item { + name: "194690" + id: 2013 + display_name: "Pogonomyrmex barbatus" +} +item { + name: "120583" + id: 2014 + display_name: "Parasteatoda tepidariorum" +} +item { + name: "202505" + id: 2015 + display_name: "Zosterops lateralis" +} +item { + name: "38671" + id: 2016 + display_name: "Aspidoscelis tigris" +} +item { + name: "38672" + id: 2017 + display_name: "Aspidoscelis tigris stejnegeri" +} +item { + name: "9176" + id: 2018 + display_name: "Zonotrichia leucophrys" +} +item { + name: "120596" + id: 2019 + display_name: "Aphonopelma hentzi" +} +item { + name: "9744" + id: 2020 + display_name: "Agelaius phoeniceus" +} +item { + name: "38684" + id: 2021 + display_name: "Aspidoscelis tigris mundus" +} +item { + name: "62426" + id: 2022 + display_name: "Aphantopus hyperantus" +} +item { + name: "30494" + id: 2023 + display_name: "Micrurus tener" +} +item { + name: "58578" + id: 2024 + display_name: "Euphydryas phaeton" +} +item { + name: "96036" + id: 2025 + display_name: "Brechmorhoga mendax" +} +item { + name: "333608" + id: 2026 + display_name: "Leukoma staminea" +} +item { + name: "38703" + id: 2027 + display_name: "Aspidoscelis sexlineata sexlineata" +} +item { + name: "126600" + id: 2028 + display_name: "Chortophaga viridifasciata" +} +item { + name: "63287" + id: 2029 + display_name: "Megalorchestia californiana" +} +item { + name: "128824" + id: 2030 + display_name: "Lucilia sericata" +} +item { + name: "104249" + id: 2031 + display_name: "Lepisosteus oculatus" +} +item { + name: "203153" + id: 2032 + display_name: "Parus major" +} +item { + name: "9183" + id: 2033 + display_name: "Zonotrichia capensis" +} +item { + name: "82201" + id: 2034 + display_name: "Hypena baltimoralis" +} +item { + name: "145217" + id: 2035 + display_name: "Oreothlypis peregrina" +} +item { + name: "145218" + id: 2036 + display_name: "Oreothlypis celata" +} +item { + name: "145221" + id: 2037 + display_name: "Oreothlypis ruficapilla" +} +item { + name: "145224" + id: 2038 + display_name: "Geothlypis philadelphia" +} +item { + name: "145225" + id: 2039 + display_name: "Geothlypis formosa" +} +item { + name: "448331" + id: 2040 + display_name: "Ambigolimax valentianus" +} +item { + name: "128845" + id: 2041 + display_name: "Copestylum mexicanum" +} +item { + name: "145231" + id: 2042 + display_name: "Setophaga tigrina" +} +item { + name: "145233" + id: 2043 + display_name: "Setophaga americana" +} +item { + name: "145235" + id: 2044 + display_name: "Setophaga magnolia" +} +item { + name: "145236" + id: 2045 + display_name: "Setophaga castanea" +} +item { + name: "145237" + id: 2046 + display_name: "Setophaga fusca" +} +item { + name: "145238" + id: 2047 + display_name: "Setophaga petechia" +} +item { + name: "145240" + id: 2048 + display_name: "Setophaga striata" +} +item { + name: "145242" + id: 2049 + display_name: "Setophaga palmarum" +} +item { + name: "179855" + id: 2050 + display_name: "Polites vibex" +} +item { + name: "145244" + id: 2051 + display_name: "Setophaga pinus" +} +item { + name: "145245" + id: 2052 + display_name: "Setophaga coronata" +} +item { + name: "145246" + id: 2053 + display_name: "Setophaga dominica" +} +item { + name: "5987" + id: 2054 + display_name: "Campylopterus hemileucurus" +} +item { + name: "17382" + id: 2055 + display_name: "Vireo cassinii" +} +item { + name: "145254" + id: 2056 + display_name: "Setophaga nigrescens" +} +item { + name: "145255" + id: 2057 + display_name: "Setophaga townsendi" +} +item { + name: "145256" + id: 2058 + display_name: "Setophaga occidentalis" +} +item { + name: "145257" + id: 2059 + display_name: "Setophaga chrysoparia" +} +item { + name: "145258" + id: 2060 + display_name: "Setophaga virens" +} +item { + name: "48786" + id: 2061 + display_name: "Pollicipes polymerus" +} +item { + name: "36207" + id: 2062 + display_name: "Sceloporus occidentalis longipes" +} +item { + name: "22392" + id: 2063 + display_name: "Eleutherodactylus marnockii" +} +item { + name: "22393" + id: 2064 + display_name: "Eleutherodactylus cystignathoides" +} +item { + name: "145275" + id: 2065 + display_name: "Cardellina canadensis" +} +item { + name: "145277" + id: 2066 + display_name: "Cardellina rubra" +} +item { + name: "7829" + id: 2067 + display_name: "Aphelocoma coerulescens" +} +item { + name: "41963" + id: 2068 + display_name: "Panthera pardus" +} +item { + name: "142998" + id: 2069 + display_name: "Pyrausta acrionalis" +} +item { + name: "18204" + id: 2070 + display_name: "Melanerpes erythrocephalus" +} +item { + name: "47425" + id: 2071 + display_name: "Tonicella lineata" +} +item { + name: "148460" + id: 2072 + display_name: "Charadra deridens" +} +item { + name: "145291" + id: 2073 + display_name: "Emberiza calandra" +} +item { + name: "52523" + id: 2074 + display_name: "Carcinus maenas" +} +item { + name: "46994" + id: 2075 + display_name: "Scapanus latimanus" +} +item { + name: "114314" + id: 2076 + display_name: "Tramea onusta" +} +item { + name: "145300" + id: 2077 + display_name: "Acanthis flammea" +} +item { + name: "63382" + id: 2078 + display_name: "Dermasterias imbricata" +} +item { + name: "126772" + id: 2079 + display_name: "Ursus americanus californiensis" +} +item { + name: "145304" + id: 2080 + display_name: "Spinus pinus" +} +item { + name: "10294" + id: 2081 + display_name: "Thraupis abbas" +} +item { + name: "145308" + id: 2082 + display_name: "Spinus psaltria" +} +item { + name: "145309" + id: 2083 + display_name: "Spinus lawrencei" +} +item { + name: "145310" + id: 2084 + display_name: "Spinus tristis" +} +item { + name: "3739" + id: 2085 + display_name: "Threskiornis aethiopicus" +} +item { + name: "47014" + id: 2086 + display_name: "Scalopus aquaticus" +} +item { + name: "4566" + id: 2087 + display_name: "Gygis alba" +} +item { + name: "43335" + id: 2088 + display_name: "Equus quagga" +} +item { + name: "41970" + id: 2089 + display_name: "Panthera onca" +} +item { + name: "128950" + id: 2090 + display_name: "Lycomorpha pholus" +} +item { + name: "11935" + id: 2091 + display_name: "Tachycineta bicolor" +} +item { + name: "333759" + id: 2092 + display_name: "Larus dominicanus dominicanus" +} +item { + name: "143008" + id: 2093 + display_name: "Herpetogramma pertextalis" +} +item { + name: "235341" + id: 2094 + display_name: "Coenonympha tullia california" +} +item { + name: "44705" + id: 2095 + display_name: "Mus musculus" +} +item { + name: "145352" + id: 2096 + display_name: "Lonchura oryzivora" +} +item { + name: "4840" + id: 2097 + display_name: "Haematopus palliatus" +} +item { + name: "244845" + id: 2098 + display_name: "Apiomerus californicus" +} +item { + name: "145360" + id: 2099 + display_name: "Chloris chloris" +} +item { + name: "5112" + id: 2100 + display_name: "Accipiter cooperii" +} +item { + name: "30675" + id: 2101 + display_name: "Agkistrodon piscivorus" +} +item { + name: "341972" + id: 2102 + display_name: "Crocodylus niloticus" +} +item { + name: "30677" + id: 2103 + display_name: "Agkistrodon piscivorus conanti" +} +item { + name: "30678" + id: 2104 + display_name: "Agkistrodon contortrix" +} +item { + name: "52900" + id: 2105 + display_name: "Caenurgina crassiuscula" +} +item { + name: "30682" + id: 2106 + display_name: "Agkistrodon contortrix laticinctus" +} +item { + name: "47067" + id: 2107 + display_name: "Bradypus variegatus" +} +item { + name: "55260" + id: 2108 + display_name: "Erythemis vesiculosa" +} +item { + name: "17402" + id: 2109 + display_name: "Vireo solitarius" +} +item { + name: "6369" + id: 2110 + display_name: "Selasphorus platycercus" +} +item { + name: "104416" + id: 2111 + display_name: "Lestes alacer" +} +item { + name: "128993" + id: 2112 + display_name: "Narceus annularus" +} +item { + name: "104422" + id: 2113 + display_name: "Lestes congener" +} +item { + name: "227307" + id: 2114 + display_name: "Patalene olyzonaria" +} +item { + name: "104429" + id: 2115 + display_name: "Lestes dryas" +} +item { + name: "194542" + id: 2116 + display_name: "Phyciodes graphica" +} +item { + name: "52904" + id: 2117 + display_name: "Microcrambus elegans" +} +item { + name: "129363" + id: 2118 + display_name: "Calephelis nemesis" +} +item { + name: "144506" + id: 2119 + display_name: "Chroicocephalus scopulinus" +} +item { + name: "30713" + id: 2120 + display_name: "Crotalus oreganus helleri" +} +item { + name: "47101" + id: 2121 + display_name: "Choloepus hoffmanni" +} +item { + name: "210942" + id: 2122 + display_name: "Caedicia simplex" +} +item { + name: "30719" + id: 2123 + display_name: "Crotalus scutulatus" +} +item { + name: "30724" + id: 2124 + display_name: "Crotalus ruber" +} +item { + name: "47110" + id: 2125 + display_name: "Triopha maculata" +} +item { + name: "4235" + id: 2126 + display_name: "Aechmophorus occidentalis" +} +item { + name: "30731" + id: 2127 + display_name: "Crotalus molossus" +} +item { + name: "30733" + id: 2128 + display_name: "Crotalus molossus nigrescens" +} +item { + name: "30735" + id: 2129 + display_name: "Crotalus mitchellii" +} +item { + name: "30740" + id: 2130 + display_name: "Crotalus lepidus" +} +item { + name: "30746" + id: 2131 + display_name: "Crotalus horridus" +} +item { + name: "63518" + id: 2132 + display_name: "Melanoplus differentialis" +} +item { + name: "30751" + id: 2133 + display_name: "Crotalus cerastes" +} +item { + name: "126640" + id: 2134 + display_name: "Caenurgina erechtea" +} +item { + name: "46086" + id: 2135 + display_name: "Marmota flaviventris" +} +item { + name: "194599" + id: 2136 + display_name: "Heliomata cycladata" +} +item { + name: "30764" + id: 2137 + display_name: "Crotalus atrox" +} +item { + name: "204520" + id: 2138 + display_name: "Hemiphaga novaeseelandiae" +} +item { + name: "128141" + id: 2139 + display_name: "Crepidula adunca" +} +item { + name: "121183" + id: 2140 + display_name: "Mythimna unipuncta" +} +item { + name: "40827" + id: 2141 + display_name: "Eidolon helvum" +} +item { + name: "4571" + id: 2142 + display_name: "Xema sabini" +} +item { + name: "211007" + id: 2143 + display_name: "Nepytia canosaria" +} +item { + name: "47171" + id: 2144 + display_name: "Flabellina iodinea" +} +item { + name: "211012" + id: 2145 + display_name: "Maliattha synochitis" +} +item { + name: "30798" + id: 2146 + display_name: "Bothrops asper" +} +item { + name: "47188" + id: 2147 + display_name: "Pachygrapsus crassipes" +} +item { + name: "55387" + id: 2148 + display_name: "Esox lucius" +} +item { + name: "58583" + id: 2149 + display_name: "Limenitis arthemis arthemis" +} +item { + name: "104548" + id: 2150 + display_name: "Leucorrhinia frigida" +} +item { + name: "104550" + id: 2151 + display_name: "Leucorrhinia hudsonica" +} +item { + name: "104551" + id: 2152 + display_name: "Leucorrhinia intacta" +} +item { + name: "47209" + id: 2153 + display_name: "Hermissenda crassicornis" +} +item { + name: "55655" + id: 2154 + display_name: "Lycaena phlaeas" +} +item { + name: "202861" + id: 2155 + display_name: "Otala lactea" +} +item { + name: "143037" + id: 2156 + display_name: "Lineodes integra" +} +item { + name: "47219" + id: 2157 + display_name: "Apis mellifera" +} +item { + name: "24254" + id: 2158 + display_name: "Pseudacris cadaverina" +} +item { + name: "47226" + id: 2159 + display_name: "Papilio rutulus" +} +item { + name: "104572" + id: 2160 + display_name: "Libellula comanche" +} +item { + name: "104574" + id: 2161 + display_name: "Libellula croceipennis" +} +item { + name: "104575" + id: 2162 + display_name: "Libellula cyanea" +} +item { + name: "145538" + id: 2163 + display_name: "Ovis canadensis canadensis" +} +item { + name: "104580" + id: 2164 + display_name: "Libellula incesta" +} +item { + name: "24257" + id: 2165 + display_name: "Pseudacris streckeri" +} +item { + name: "53866" + id: 2166 + display_name: "Calpodes ethlius" +} +item { + name: "18796" + id: 2167 + display_name: "Ramphastos sulfuratus" +} +item { + name: "2413" + id: 2168 + display_name: "Dacelo novaeguineae" +} +item { + name: "482" + id: 2169 + display_name: "Fulica atra" +} +item { + name: "47251" + id: 2170 + display_name: "Sphyraena barracuda" +} +item { + name: "358549" + id: 2171 + display_name: "Hemaris diffinis" +} +item { + name: "81526" + id: 2172 + display_name: "Crotalus viridis" +} +item { + name: "342169" + id: 2173 + display_name: "Hirundo rustica erythrogaster" +} +item { + name: "39280" + id: 2174 + display_name: "Leiocephalus carinatus" +} +item { + name: "47269" + id: 2175 + display_name: "Dasyatis americana" +} +item { + name: "55467" + id: 2176 + display_name: "Sabulodes aegrotata" +} +item { + name: "6316" + id: 2177 + display_name: "Calypte costae" +} +item { + name: "6317" + id: 2178 + display_name: "Calypte anna" +} +item { + name: "47280" + id: 2179 + display_name: "Pterois volitans" +} +item { + name: "81608" + id: 2180 + display_name: "Geukensia demissa" +} +item { + name: "121012" + id: 2181 + display_name: "Euglandina rosea" +} +item { + name: "236980" + id: 2182 + display_name: "Colaptes auratus cafer" +} +item { + name: "38673" + id: 2183 + display_name: "Aspidoscelis tigris tigris" +} +item { + name: "3786" + id: 2184 + display_name: "Sula nebouxii" +} +item { + name: "55487" + id: 2185 + display_name: "Diabrotica undecimpunctata" +} +item { + name: "243904" + id: 2186 + display_name: "Phrynosoma platyrhinos" +} +item { + name: "55489" + id: 2187 + display_name: "Cycloneda munda" +} +item { + name: "204491" + id: 2188 + display_name: "Copsychus saularis" +} +item { + name: "55492" + id: 2189 + display_name: "Cycloneda polita" +} +item { + name: "129222" + id: 2190 + display_name: "Heterophleps triguttaria" +} +item { + name: "129223" + id: 2191 + display_name: "Pasiphila rectangulata" +} +item { + name: "28365" + id: 2192 + display_name: "Thamnophis sirtalis sirtalis" +} +item { + name: "47316" + id: 2193 + display_name: "Chaetodon lunula" +} +item { + name: "6359" + id: 2194 + display_name: "Selasphorus sasin" +} +item { + name: "62500" + id: 2195 + display_name: "Leptophobia aripa" +} +item { + name: "6363" + id: 2196 + display_name: "Selasphorus rufus" +} +item { + name: "96480" + id: 2197 + display_name: "Calopteryx aequabilis" +} +item { + name: "55521" + id: 2198 + display_name: "Papilio eurymedon" +} +item { + name: "6371" + id: 2199 + display_name: "Calothorax lucifer" +} +item { + name: "129263" + id: 2200 + display_name: "Syrbula admirabilis" +} +item { + name: "28371" + id: 2201 + display_name: "Thamnophis sirtalis fitchi" +} +item { + name: "243962" + id: 2202 + display_name: "Charina bottae" +} +item { + name: "145659" + id: 2203 + display_name: "Acronicta americana" +} +item { + name: "14588" + id: 2204 + display_name: "Pycnonotus barbatus" +} +item { + name: "480298" + id: 2205 + display_name: "Cornu aspersum" +} +item { + name: "51584" + id: 2206 + display_name: "Melanitis leda" +} +item { + name: "243970" + id: 2207 + display_name: "Larus glaucescens \303\227 occidentalis" +} +item { + name: "55556" + id: 2208 + display_name: "Oncopeltus fasciatus" +} +item { + name: "506117" + id: 2209 + display_name: "Aphelocoma woodhouseii" +} +item { + name: "63750" + id: 2210 + display_name: "Anavitrinella pampinaria" +} +item { + name: "30983" + id: 2211 + display_name: "Sistrurus miliarius" +} +item { + name: "211210" + id: 2212 + display_name: "Holocnemus pluchei" +} +item { + name: "49587" + id: 2213 + display_name: "Micropterus salmoides" +} +item { + name: "6417" + id: 2214 + display_name: "Florisuga mellivora" +} +item { + name: "47381" + id: 2215 + display_name: "Latrodectus mactans" +} +item { + name: "47382" + id: 2216 + display_name: "Latrodectus hesperus" +} +item { + name: "4851" + id: 2217 + display_name: "Haematopus finschi" +} +item { + name: "51588" + id: 2218 + display_name: "Papilio polytes" +} +item { + name: "144431" + id: 2219 + display_name: "Falcipennis canadensis" +} +item { + name: "118490" + id: 2220 + display_name: "Haematopis grataria" +} +item { + name: "6433" + id: 2221 + display_name: "Archilochus alexandri" +} +item { + name: "52956" + id: 2222 + display_name: "Chaetodon capistratus" +} +item { + name: "203050" + id: 2223 + display_name: "Junonia genoveva" +} +item { + name: "5170" + id: 2224 + display_name: "Circus cyaneus" +} +item { + name: "84332" + id: 2225 + display_name: "Panorpa nuptialis" +} +item { + name: "47414" + id: 2226 + display_name: "Emerita analoga" +} +item { + name: "129335" + id: 2227 + display_name: "Gibbifer californicus" +} +item { + name: "55610" + id: 2228 + display_name: "Pyrrhocoris apterus" +} +item { + name: "58421" + id: 2229 + display_name: "Phidippus johnsoni" +} +item { + name: "208608" + id: 2230 + display_name: "Trachymela sloanei" +} +item { + name: "68138" + id: 2231 + display_name: "Sympetrum corruptum" +} +item { + name: "129350" + id: 2232 + display_name: "Photinus pyralis" +} +item { + name: "55625" + id: 2233 + display_name: "Sympetrum striolatum" +} +item { + name: "55626" + id: 2234 + display_name: "Pieris rapae" +} +item { + name: "203084" + id: 2235 + display_name: "Ardea alba modesta" +} +item { + name: "129362" + id: 2236 + display_name: "Zerene cesonia" +} +item { + name: "55638" + id: 2237 + display_name: "Anania hortulata" +} +item { + name: "148537" + id: 2238 + display_name: "Astraptes fulgerator" +} +item { + name: "55640" + id: 2239 + display_name: "Celastrina argiolus" +} +item { + name: "55641" + id: 2240 + display_name: "Polyommatus icarus" +} +item { + name: "16028" + id: 2241 + display_name: "Myiarchus crinitus" +} +item { + name: "55643" + id: 2242 + display_name: "Araschnia levana" +} +item { + name: "121180" + id: 2243 + display_name: "Megastraea undosa" +} +item { + name: "47454" + id: 2244 + display_name: "Triopha catalinae" +} +item { + name: "28389" + id: 2245 + display_name: "Thamnophis ordinoides" +} +item { + name: "68139" + id: 2246 + display_name: "Sympetrum vicinum" +} +item { + name: "55651" + id: 2247 + display_name: "Autographa gamma" +} +item { + name: "55653" + id: 2248 + display_name: "Maniola jurtina" +} +item { + name: "84369" + id: 2249 + display_name: "Libellula forensis" +} +item { + name: "47135" + id: 2250 + display_name: "Badumna longinqua" +} +item { + name: "48213" + id: 2251 + display_name: "Ariolimax californicus" +} +item { + name: "121196" + id: 2252 + display_name: "Acanthurus coeruleus" +} +item { + name: "47469" + id: 2253 + display_name: "Doris montereyensis" +} +item { + name: "5181" + id: 2254 + display_name: "Buteo regalis" +} +item { + name: "47472" + id: 2255 + display_name: "Acanthodoris lutea" +} +item { + name: "129415" + id: 2256 + display_name: "Copaeodes aurantiaca" +} +item { + name: "47505" + id: 2257 + display_name: "Geitodoris heathi" +} +item { + name: "28398" + id: 2258 + display_name: "Thamnophis elegans" +} +item { + name: "6553" + id: 2259 + display_name: "Aeronautes saxatalis" +} +item { + name: "47516" + id: 2260 + display_name: "Oncorhynchus mykiss" +} +item { + name: "6557" + id: 2261 + display_name: "Chaetura vauxi" +} +item { + name: "47518" + id: 2262 + display_name: "Salmo trutta" +} +item { + name: "55711" + id: 2263 + display_name: "Ladona depressa" +} +item { + name: "55719" + id: 2264 + display_name: "Eristalis tenax" +} +item { + name: "6571" + id: 2265 + display_name: "Chaetura pelagica" +} +item { + name: "119881" + id: 2266 + display_name: "Chrysochus cobaltinus" +} +item { + name: "145239" + id: 2267 + display_name: "Setophaga pensylvanica" +} +item { + name: "154043" + id: 2268 + display_name: "Bombus huntii" +} +item { + name: "41955" + id: 2269 + display_name: "Acinonyx jubatus" +} +item { + name: "55746" + id: 2270 + display_name: "Misumena vatia" +} +item { + name: "12024" + id: 2271 + display_name: "Lanius ludovicianus" +} +item { + name: "5063" + id: 2272 + display_name: "Anhinga anhinga" +} +item { + name: "59892" + id: 2273 + display_name: "Prionus californicus" +} +item { + name: "52986" + id: 2274 + display_name: "Largus californicus" +} +item { + name: "204454" + id: 2275 + display_name: "Acridotheres tristis" +} +item { + name: "14816" + id: 2276 + display_name: "Sitta pygmaea" +} +item { + name: "148560" + id: 2277 + display_name: "Mestra amymone" +} +item { + name: "4585" + id: 2278 + display_name: "Actophilornis africanus" +} +item { + name: "47590" + id: 2279 + display_name: "Phloeodes diabolicus" +} +item { + name: "14823" + id: 2280 + display_name: "Sitta canadensis" +} +item { + name: "14824" + id: 2281 + display_name: "Sitta europaea" +} +item { + name: "14825" + id: 2282 + display_name: "Sitta pusilla" +} +item { + name: "67598" + id: 2283 + display_name: "Solenopsis invicta" +} +item { + name: "6638" + id: 2284 + display_name: "Apus apus" +} +item { + name: "301557" + id: 2285 + display_name: "Euphoria basalis" +} +item { + name: "132070" + id: 2286 + display_name: "Phaneroptera nana" +} +item { + name: "14850" + id: 2287 + display_name: "Sturnus vulgaris" +} +item { + name: "62550" + id: 2288 + display_name: "Seiurus aurocapilla" +} +item { + name: "64006" + id: 2289 + display_name: "Corbicula fluminea" +} +item { + name: "204545" + id: 2290 + display_name: "Motacilla flava" +} +item { + name: "47632" + id: 2291 + display_name: "Katharina tunicata" +} +item { + name: "325309" + id: 2292 + display_name: "Chortophaga viridifasciata viridifasciata" +} +item { + name: "104993" + id: 2293 + display_name: "Macrodiplax balteata" +} +item { + name: "17408" + id: 2294 + display_name: "Vireo griseus" +} +item { + name: "14895" + id: 2295 + display_name: "Toxostoma longirostre" +} +item { + name: "47664" + id: 2296 + display_name: "Henricia leviuscula" +} +item { + name: "31281" + id: 2297 + display_name: "Calotes versicolor" +} +item { + name: "119086" + id: 2298 + display_name: "Agrius cingulata" +} +item { + name: "3849" + id: 2299 + display_name: "Calidris alba" +} +item { + name: "14906" + id: 2300 + display_name: "Toxostoma redivivum" +} +item { + name: "144479" + id: 2301 + display_name: "Gallinula galeata" +} +item { + name: "3850" + id: 2302 + display_name: "Calidris himantopus" +} +item { + name: "117520" + id: 2303 + display_name: "Enhydra lutris nereis" +} +item { + name: "51491" + id: 2304 + display_name: "Myliobatis californica" +} +item { + name: "121612" + id: 2305 + display_name: "Estigmene acrea" +} +item { + name: "105034" + id: 2306 + display_name: "Macromia illinoiensis" +} +item { + name: "6498" + id: 2307 + display_name: "Eugenes fulgens" +} +item { + name: "46179" + id: 2308 + display_name: "Cynomys ludovicianus" +} +item { + name: "105049" + id: 2309 + display_name: "Macromia taeniolata" +} +item { + name: "94045" + id: 2310 + display_name: "Anax longipes" +} +item { + name: "143119" + id: 2311 + display_name: "Galgula partita" +} +item { + name: "9317" + id: 2312 + display_name: "Icterus wagleri" +} +item { + name: "122704" + id: 2313 + display_name: "Nucella ostrina" +} +item { + name: "146709" + id: 2314 + display_name: "Grylloprociphilus imbricator" +} +item { + name: "9318" + id: 2315 + display_name: "Icterus parisorum" +} +item { + name: "85333" + id: 2316 + display_name: "Micrathena gracilis" +} +item { + name: "126737" + id: 2317 + display_name: "Anania funebris" +} +item { + name: "49053" + id: 2318 + display_name: "Cryptochiton stelleri" +} +item { + name: "47721" + id: 2319 + display_name: "Parastichopus californicus" +} +item { + name: "34050" + id: 2320 + display_name: "Phelsuma laticauda" +} +item { + name: "154219" + id: 2321 + display_name: "Notarctia proxima" +} +item { + name: "51781" + id: 2322 + display_name: "Tyria jacobaeae" +} +item { + name: "24230" + id: 2323 + display_name: "Acris crepitans" +} +item { + name: "146032" + id: 2324 + display_name: "Coluber flagellum" +} +item { + name: "146033" + id: 2325 + display_name: "Coluber flagellum flagellum" +} +item { + name: "244340" + id: 2326 + display_name: "Hordnia atropunctata" +} +item { + name: "146037" + id: 2327 + display_name: "Coluber taeniatus" +} +item { + name: "244344" + id: 2328 + display_name: "Scopula rubraria" +} +item { + name: "47737" + id: 2329 + display_name: "Harpaphe haydeniana" +} +item { + name: "5227" + id: 2330 + display_name: "Buteo platypterus" +} +item { + name: "39556" + id: 2331 + display_name: "Apalone spinifera" +} +item { + name: "39560" + id: 2332 + display_name: "Apalone spinifera emoryi" +} +item { + name: "318836" + id: 2333 + display_name: "Gallinago gallinago" +} +item { + name: "105098" + id: 2334 + display_name: "Magicicada septendecim" +} +item { + name: "96907" + id: 2335 + display_name: "Celithemis fasciata" +} +item { + name: "9325" + id: 2336 + display_name: "Icterus spurius" +} +item { + name: "3864" + id: 2337 + display_name: "Calidris minutilla" +} +item { + name: "14995" + id: 2338 + display_name: "Dumetella carolinensis" +} +item { + name: "424597" + id: 2339 + display_name: "Porphyrio hochstetteri" +} +item { + name: "47768" + id: 2340 + display_name: "Doriopsilla albopunctata" +} +item { + name: "498116" + id: 2341 + display_name: "Aeolidia papillosa" +} +item { + name: "244378" + id: 2342 + display_name: "Mallophora fautrix" +} +item { + name: "3866" + id: 2343 + display_name: "Calidris fuscicollis" +} +item { + name: "47776" + id: 2344 + display_name: "Ariolimax columbianus" +} +item { + name: "144497" + id: 2345 + display_name: "Phalaropus tricolor" +} +item { + name: "39824" + id: 2346 + display_name: "Pseudemys nelsoni" +} +item { + name: "236979" + id: 2347 + display_name: "Colaptes auratus auratus" +} +item { + name: "55990" + id: 2348 + display_name: "Podarcis muralis" +} +item { + name: "244407" + id: 2349 + display_name: "Zelus renardii" +} +item { + name: "47802" + id: 2350 + display_name: "Lymantria dispar" +} +item { + name: "15035" + id: 2351 + display_name: "Melanotis caerulescens" +} +item { + name: "51658" + id: 2352 + display_name: "Anthopleura artemisia" +} +item { + name: "121534" + id: 2353 + display_name: "Oreta rosea" +} +item { + name: "73504" + id: 2354 + display_name: "Tiaris olivaceus" +} +item { + name: "15045" + id: 2355 + display_name: "Oreoscoptes montanus" +} +item { + name: "3873" + id: 2356 + display_name: "Limnodromus scolopaceus" +} +item { + name: "47673" + id: 2357 + display_name: "Pycnopodia helianthoides" +} +item { + name: "47817" + id: 2358 + display_name: "Libellula saturata" +} +item { + name: "56644" + id: 2359 + display_name: "Polygonia satyrus" +} +item { + name: "47826" + id: 2360 + display_name: "Cancer productus" +} +item { + name: "3875" + id: 2361 + display_name: "Tringa solitaria" +} +item { + name: "39782" + id: 2362 + display_name: "Trachemys scripta" +} +item { + name: "143140" + id: 2363 + display_name: "Cyllopsis gemma" +} +item { + name: "29818" + id: 2364 + display_name: "Lampropeltis holbrooki" +} +item { + name: "56293" + id: 2365 + display_name: "Macroglossum stellatarum" +} +item { + name: "154340" + id: 2366 + display_name: "Gryllodes sigillatus" +} +item { + name: "14801" + id: 2367 + display_name: "Sitta carolinensis" +} +item { + name: "121578" + id: 2368 + display_name: "Ovis aries" +} +item { + name: "3879" + id: 2369 + display_name: "Tringa totanus" +} +item { + name: "6893" + id: 2370 + display_name: "Dendrocygna autumnalis" +} +item { + name: "154353" + id: 2371 + display_name: "Sunira bicolorago" +} +item { + name: "6898" + id: 2372 + display_name: "Dendrocygna viduata" +} +item { + name: "6899" + id: 2373 + display_name: "Dendrocygna bicolor" +} +item { + name: "9342" + id: 2374 + display_name: "Icterus abeillei" +} +item { + name: "39670" + id: 2375 + display_name: "Lepidochelys olivacea" +} +item { + name: "4867" + id: 2376 + display_name: "Vanellus chilensis" +} +item { + name: "39677" + id: 2377 + display_name: "Dermochelys coriacea" +} +item { + name: "113407" + id: 2378 + display_name: "Stylurus plagiatus" +} +item { + name: "39682" + id: 2379 + display_name: "Chelydra serpentina" +} +item { + name: "6915" + id: 2380 + display_name: "Cygnus buccinator" +} +item { + name: "6916" + id: 2381 + display_name: "Cygnus cygnus" +} +item { + name: "6917" + id: 2382 + display_name: "Cygnus columbianus" +} +item { + name: "29825" + id: 2383 + display_name: "Lampropeltis calligaster calligaster" +} +item { + name: "6921" + id: 2384 + display_name: "Cygnus olor" +} +item { + name: "146186" + id: 2385 + display_name: "Intellagama lesueurii" +} +item { + name: "9346" + id: 2386 + display_name: "Icterus galbula" +} +item { + name: "126765" + id: 2387 + display_name: "Plutella xylostella" +} +item { + name: "71154" + id: 2388 + display_name: "Aphis nerii" +} +item { + name: "6930" + id: 2389 + display_name: "Anas platyrhynchos" +} +item { + name: "6933" + id: 2390 + display_name: "Anas acuta" +} +item { + name: "39703" + id: 2391 + display_name: "Sternotherus odoratus" +} +item { + name: "6937" + id: 2392 + display_name: "Anas crecca" +} +item { + name: "64287" + id: 2393 + display_name: "Lottia digitalis" +} +item { + name: "6944" + id: 2394 + display_name: "Anas cyanoptera" +} +item { + name: "39713" + id: 2395 + display_name: "Kinosternon subrubrum" +} +item { + name: "26691" + id: 2396 + display_name: "Scaphiopus couchii" +} +item { + name: "6948" + id: 2397 + display_name: "Anas fulvigula" +} +item { + name: "6953" + id: 2398 + display_name: "Anas discors" +} +item { + name: "47914" + id: 2399 + display_name: "Eumorpha pandorus" +} +item { + name: "47916" + id: 2400 + display_name: "Actias luna" +} +item { + name: "6957" + id: 2401 + display_name: "Anas strepera" +} +item { + name: "47919" + id: 2402 + display_name: "Antheraea polyphemus" +} +item { + name: "119953" + id: 2403 + display_name: "Hypoprepia fucosa" +} +item { + name: "6961" + id: 2404 + display_name: "Anas clypeata" +} +item { + name: "134119" + id: 2405 + display_name: "Anisomorpha buprestoides" +} +item { + name: "51678" + id: 2406 + display_name: "Coenagrion puella" +} +item { + name: "72502" + id: 2407 + display_name: "Anas chlorotis" +} +item { + name: "49060" + id: 2408 + display_name: "Epiactis prolifera" +} +item { + name: "42122" + id: 2409 + display_name: "Phacochoerus africanus" +} +item { + name: "58507" + id: 2410 + display_name: "Poanes hobomok" +} +item { + name: "121669" + id: 2411 + display_name: "Stenopus hispidus" +} +item { + name: "8143" + id: 2412 + display_name: "Rhipidura leucophrys" +} +item { + name: "6985" + id: 2413 + display_name: "Anas americana" +} +item { + name: "6993" + id: 2414 + display_name: "Bucephala albeola" +} +item { + name: "121682" + id: 2415 + display_name: "Tetraclita rubescens" +} +item { + name: "6996" + id: 2416 + display_name: "Mergus serrator" +} +item { + name: "113498" + id: 2417 + display_name: "Sympetrum ambiguum" +} +item { + name: "39771" + id: 2418 + display_name: "Chrysemys picta" +} +item { + name: "7004" + id: 2419 + display_name: "Mergus merganser" +} +item { + name: "39773" + id: 2420 + display_name: "Chrysemys picta bellii" +} +item { + name: "113503" + id: 2421 + display_name: "Sympetrum danae" +} +item { + name: "113507" + id: 2422 + display_name: "Sympetrum fonscolombii" +} +item { + name: "154469" + id: 2423 + display_name: "Isa textula" +} +item { + name: "47975" + id: 2424 + display_name: "Argia apicalis" +} +item { + name: "7018" + id: 2425 + display_name: "Anser anser" +} +item { + name: "7019" + id: 2426 + display_name: "Anser albifrons" +} +item { + name: "47980" + id: 2427 + display_name: "Speyeria cybele" +} +item { + name: "58514" + id: 2428 + display_name: "Euphyes vestris" +} +item { + name: "113519" + id: 2429 + display_name: "Sympetrum obtrusum" +} +item { + name: "7024" + id: 2430 + display_name: "Somateria mollissima" +} +item { + name: "39793" + id: 2431 + display_name: "Trachemys scripta scripta" +} +item { + name: "367475" + id: 2432 + display_name: "Rallus obsoletus" +} +item { + name: "121716" + id: 2433 + display_name: "Uresiphita reversalis" +} +item { + name: "113525" + id: 2434 + display_name: "Sympetrum sanguineum" +} +item { + name: "113526" + id: 2435 + display_name: "Sympetrum semicinctum" +} +item { + name: "18921" + id: 2436 + display_name: "Platycercus elegans" +} +item { + name: "7032" + id: 2437 + display_name: "Melanitta fusca" +} +item { + name: "5268" + id: 2438 + display_name: "Milvus migrans" +} +item { + name: "144536" + id: 2439 + display_name: "Gelochelidon nilotica" +} +item { + name: "413503" + id: 2440 + display_name: "Ninox novaeseelandiae novaeseelandiae" +} +item { + name: "7036" + id: 2441 + display_name: "Melanitta perspicillata" +} +item { + name: "64382" + id: 2442 + display_name: "Lissotriton vulgaris" +} +item { + name: "39807" + id: 2443 + display_name: "Terrapene ornata" +} +item { + name: "39808" + id: 2444 + display_name: "Terrapene ornata luteola" +} +item { + name: "7044" + id: 2445 + display_name: "Aythya collaris" +} +item { + name: "7045" + id: 2446 + display_name: "Aythya ferina" +} +item { + name: "7046" + id: 2447 + display_name: "Aythya fuligula" +} +item { + name: "146314" + id: 2448 + display_name: "Opheodrys vernalis" +} +item { + name: "3906" + id: 2449 + display_name: "Numenius americanus" +} +item { + name: "39823" + id: 2450 + display_name: "Pseudemys gorzugi" +} +item { + name: "178991" + id: 2451 + display_name: "Sypharochiton pelliserpentis" +} +item { + name: "7061" + id: 2452 + display_name: "Chen caerulescens" +} +item { + name: "39830" + id: 2453 + display_name: "Pseudemys concinna" +} +item { + name: "127490" + id: 2454 + display_name: "Parrhasius m-album" +} +item { + name: "15256" + id: 2455 + display_name: "Chamaea fasciata" +} +item { + name: "39836" + id: 2456 + display_name: "Malaclemys terrapin" +} +item { + name: "133764" + id: 2457 + display_name: "Trichopoda pennipes" +} +item { + name: "334753" + id: 2458 + display_name: "Hypselonotus punctiventris" +} +item { + name: "58611" + id: 2459 + display_name: "Amia calva" +} +item { + name: "56240" + id: 2460 + display_name: "Argia vivida" +} +item { + name: "7089" + id: 2461 + display_name: "Branta canadensis" +} +item { + name: "146354" + id: 2462 + display_name: "Phrynosoma blainvillii" +} +item { + name: "56243" + id: 2463 + display_name: "Plebejus acmon" +} +item { + name: "144542" + id: 2464 + display_name: "Thalasseus elegans" +} +item { + name: "121783" + id: 2465 + display_name: "Lithobates clamitans melanota" +} +item { + name: "39865" + id: 2466 + display_name: "Glyptemys insculpta" +} +item { + name: "39867" + id: 2467 + display_name: "Emys orbicularis" +} +item { + name: "7104" + id: 2468 + display_name: "Branta sandvicensis" +} +item { + name: "50336" + id: 2469 + display_name: "Siproeta stelenes" +} +item { + name: "7056" + id: 2470 + display_name: "Aythya americana" +} +item { + name: "7107" + id: 2471 + display_name: "Aix sponsa" +} +item { + name: "7109" + id: 2472 + display_name: "Lophodytes cucullatus" +} +item { + name: "7111" + id: 2473 + display_name: "Histrionicus histrionicus" +} +item { + name: "367562" + id: 2474 + display_name: "Aratinga nenday" +} +item { + name: "39885" + id: 2475 + display_name: "Emydoidea blandingii" +} +item { + name: "367566" + id: 2476 + display_name: "Psittacara holochlorus" +} +item { + name: "143181" + id: 2477 + display_name: "Marimatha nigrofimbria" +} +item { + name: "7120" + id: 2478 + display_name: "Cairina moschata" +} +item { + name: "7122" + id: 2479 + display_name: "Netta rufina" +} +item { + name: "130003" + id: 2480 + display_name: "Phaeoura quernaria" +} +item { + name: "367572" + id: 2481 + display_name: "Psittacara erythrogenys" +} +item { + name: "17009" + id: 2482 + display_name: "Sayornis saya" +} +item { + name: "154582" + id: 2483 + display_name: "Ennomos magnaria" +} +item { + name: "58532" + id: 2484 + display_name: "Colias eurytheme" +} +item { + name: "121821" + id: 2485 + display_name: "Sceliphron caementarium" +} +item { + name: "48094" + id: 2486 + display_name: "Dryocampa rubicunda" +} +item { + name: "7057" + id: 2487 + display_name: "Aythya valisineria" +} +item { + name: "17646" + id: 2488 + display_name: "Picoides albolarvatus" +} +item { + name: "201551" + id: 2489 + display_name: "Procyon lotor lotor" +} +item { + name: "58534" + id: 2490 + display_name: "Lycaena hyllus" +} +item { + name: "73553" + id: 2491 + display_name: "Vermivora cyanoptera" +} +item { + name: "359401" + id: 2492 + display_name: "Exomala orientalis" +} +item { + name: "8018" + id: 2493 + display_name: "Corvus caurinus" +} +item { + name: "490478" + id: 2494 + display_name: "Tegula brunnea" +} +item { + name: "20307" + id: 2495 + display_name: "Asio otus" +} +item { + name: "227466" + id: 2496 + display_name: "Peridea ferruginea" +} +item { + name: "122172" + id: 2497 + display_name: "Pyrisitia lisa" +} +item { + name: "133631" + id: 2498 + display_name: "Polites peckius" +} +item { + name: "8021" + id: 2499 + display_name: "Corvus brachyrhynchos" +} +item { + name: "7170" + id: 2500 + display_name: "Clangula hyemalis" +} +item { + name: "58539" + id: 2501 + display_name: "Satyrium calanus" +} +item { + name: "27137" + id: 2502 + display_name: "Coluber constrictor" +} +item { + name: "7176" + id: 2503 + display_name: "Chenonetta jubata" +} +item { + name: "42157" + id: 2504 + display_name: "Giraffa camelopardalis" +} +item { + name: "144541" + id: 2505 + display_name: "Thalasseus sandvicensis" +} +item { + name: "23572" + id: 2506 + display_name: "Litoria aurea" +} +item { + name: "354820" + id: 2507 + display_name: "Patiriella regularis" +} +item { + name: "55887" + id: 2508 + display_name: "Andricus quercuscalifornicus" +} +item { + name: "46255" + id: 2509 + display_name: "Ammospermophilus leucurus" +} +item { + name: "334341" + id: 2510 + display_name: "Oryctolagus cuniculus domesticus" +} +item { + name: "144560" + id: 2511 + display_name: "Eolophus roseicapilla" +} +item { + name: "94043" + id: 2512 + display_name: "Anax imperator" +} +item { + name: "425004" + id: 2513 + display_name: "Dryas iulia moderata" +} +item { + name: "269359" + id: 2514 + display_name: "Cactophagus spinolae" +} +item { + name: "72755" + id: 2515 + display_name: "Colaptes rubiginosus" +} +item { + name: "319123" + id: 2516 + display_name: "Meleagris gallopavo silvestris" +} +item { + name: "130846" + id: 2517 + display_name: "Lyssa zampa" +} +item { + name: "203831" + id: 2518 + display_name: "Nemoria bistriaria" +} +item { + name: "367678" + id: 2519 + display_name: "Ptiliogonys cinereus" +} +item { + name: "5301" + id: 2520 + display_name: "Elanoides forficatus" +} +item { + name: "9398" + id: 2521 + display_name: "Carduelis carduelis" +} +item { + name: "143201" + id: 2522 + display_name: "Coryphista meadii" +} +item { + name: "104419" + id: 2523 + display_name: "Lestes australis" +} +item { + name: "367693" + id: 2524 + display_name: "Cassiculus melanicterus" +} +item { + name: "143452" + id: 2525 + display_name: "Deidamia inscriptum" +} +item { + name: "466003" + id: 2526 + display_name: "Romalea microptera" +} +item { + name: "84494" + id: 2527 + display_name: "Paraphidippus aurantius" +} +item { + name: "203866" + id: 2528 + display_name: "Rabdophaga strobiloides" +} +item { + name: "72797" + id: 2529 + display_name: "Dendragapus fuliginosus" +} +item { + name: "7266" + id: 2530 + display_name: "Psaltriparus minimus" +} +item { + name: "120920" + id: 2531 + display_name: "Odocoileus virginianus clavium" +} +item { + name: "7278" + id: 2532 + display_name: "Aegithalos caudatus" +} +item { + name: "30681" + id: 2533 + display_name: "Agkistrodon contortrix mokasen" +} +item { + name: "413547" + id: 2534 + display_name: "Zosterops lateralis lateralis" +} +item { + name: "48262" + id: 2535 + display_name: "Apatelodes torrefacta" +} +item { + name: "121993" + id: 2536 + display_name: "Lampides boeticus" +} +item { + name: "48267" + id: 2537 + display_name: "Crotalus oreganus oreganus" +} +item { + name: "48268" + id: 2538 + display_name: "Crotalus oreganus" +} +item { + name: "147309" + id: 2539 + display_name: "Feltia herilis" +} +item { + name: "146413" + id: 2540 + display_name: "Sceloporus consobrinus" +} +item { + name: "326764" + id: 2541 + display_name: "Cyprinus carpio haematopterus" +} +item { + name: "5315" + id: 2542 + display_name: "Haliaeetus leucogaster" +} +item { + name: "4519" + id: 2543 + display_name: "Uria aalge" +} +item { + name: "40085" + id: 2544 + display_name: "Gopherus polyphemus" +} +item { + name: "23702" + id: 2545 + display_name: "Agalychnis callidryas" +} +item { + name: "210116" + id: 2546 + display_name: "Tringa semipalmata inornatus" +} +item { + name: "40092" + id: 2547 + display_name: "Stigmochelys pardalis" +} +item { + name: "59931" + id: 2548 + display_name: "Acanthurus triostegus" +} +item { + name: "48292" + id: 2549 + display_name: "Philoscia muscorum" +} +item { + name: "146601" + id: 2550 + display_name: "Scolopendra heros" +} +item { + name: "244906" + id: 2551 + display_name: "Panchlora nivea" +} +item { + name: "48302" + id: 2552 + display_name: "Limulus polyphemus" +} +item { + name: "180008" + id: 2553 + display_name: "Otospermophilus variegatus" +} +item { + name: "7347" + id: 2554 + display_name: "Alauda arvensis" +} +item { + name: "43459" + id: 2555 + display_name: "Macaca fascicularis" +} +item { + name: "113846" + id: 2556 + display_name: "Telebasis salva" +} +item { + name: "7356" + id: 2557 + display_name: "Galerida cristata" +} +item { + name: "64705" + id: 2558 + display_name: "Delichon urbicum" +} +item { + name: "145932" + id: 2559 + display_name: "Aspidoscelis hyperythra beldingi" +} +item { + name: "72912" + id: 2560 + display_name: "Helmitheros vermivorum" +} +item { + name: "69805" + id: 2561 + display_name: "Octogomphus specularis" +} +item { + name: "129572" + id: 2562 + display_name: "Aphomia sociella" +} +item { + name: "31964" + id: 2563 + display_name: "Barisia imbricata" +} +item { + name: "244625" + id: 2564 + display_name: "Halmus chalybeus" +} +item { + name: "58576" + id: 2565 + display_name: "Phyciodes cocyta" +} +item { + name: "72931" + id: 2566 + display_name: "Hylocharis leucotis" +} +item { + name: "104449" + id: 2567 + display_name: "Lestes rectangularis" +} +item { + name: "14886" + id: 2568 + display_name: "Mimus polyglottos" +} +item { + name: "23783" + id: 2569 + display_name: "Hyla versicolor" +} +item { + name: "23784" + id: 2570 + display_name: "Hyla plicata" +} +item { + name: "8575" + id: 2571 + display_name: "Gymnorhina tibicen" +} +item { + name: "2599" + id: 2572 + display_name: "Alcedo atthis" +} +item { + name: "61152" + id: 2573 + display_name: "Pyrrhosoma nymphula" +} +item { + name: "58579" + id: 2574 + display_name: "Polygonia interrogationis" +} +item { + name: "31993" + id: 2575 + display_name: "Ophisaurus attenuatus attenuatus" +} +item { + name: "53985" + id: 2576 + display_name: "Odocoileus hemionus californicus" +} +item { + name: "144549" + id: 2577 + display_name: "Streptopelia chinensis" +} +item { + name: "105730" + id: 2578 + display_name: "Micrathyria hagenii" +} +item { + name: "7428" + id: 2579 + display_name: "Bombycilla cedrorum" +} +item { + name: "7429" + id: 2580 + display_name: "Bombycilla garrulus" +} +item { + name: "50391" + id: 2581 + display_name: "Polygonia gracilis" +} +item { + name: "7067" + id: 2582 + display_name: "Tadorna tadorna" +} +item { + name: "413513" + id: 2583 + display_name: "Petroica australis australis" +} +item { + name: "39469" + id: 2584 + display_name: "Varanus varius" +} +item { + name: "58479" + id: 2585 + display_name: "Pholisora catullus" +} +item { + name: "127929" + id: 2586 + display_name: "Achalarus lyciades" +} +item { + name: "48403" + id: 2587 + display_name: "Gasterosteus aculeatus" +} +item { + name: "18990" + id: 2588 + display_name: "Amazona autumnalis" +} +item { + name: "1241" + id: 2589 + display_name: "Dendragapus obscurus" +} +item { + name: "228634" + id: 2590 + display_name: "Ponometia erastrioides" +} +item { + name: "64806" + id: 2591 + display_name: "Pelophylax" +} +item { + name: "51761" + id: 2592 + display_name: "Hetaerina americana" +} +item { + name: "7464" + id: 2593 + display_name: "Catherpes mexicanus" +} +item { + name: "318761" + id: 2594 + display_name: "Sceloporus uniformis" +} +item { + name: "7068" + id: 2595 + display_name: "Tadorna ferruginea" +} +item { + name: "204077" + id: 2596 + display_name: "Achyra rantalis" +} +item { + name: "7470" + id: 2597 + display_name: "Campylorhynchus brunneicapillus" +} +item { + name: "32048" + id: 2598 + display_name: "Gerrhonotus infernalis" +} +item { + name: "204081" + id: 2599 + display_name: "Pyrausta laticlavia" +} +item { + name: "7476" + id: 2600 + display_name: "Campylorhynchus rufinucha" +} +item { + name: "32055" + id: 2601 + display_name: "Elgaria multicarinata" +} +item { + name: "244276" + id: 2602 + display_name: "Rhipidura fuliginosa" +} +item { + name: "144187" + id: 2603 + display_name: "Pyrisitia proterpia" +} +item { + name: "32059" + id: 2604 + display_name: "Elgaria multicarinata multicarinata" +} +item { + name: "32061" + id: 2605 + display_name: "Elgaria kingii" +} +item { + name: "146750" + id: 2606 + display_name: "Lascoria ambigualis" +} +item { + name: "32064" + id: 2607 + display_name: "Elgaria coerulea" +} +item { + name: "23873" + id: 2608 + display_name: "Hyla squirella" +} +item { + name: "48450" + id: 2609 + display_name: "Peltodoris nobilis" +} +item { + name: "64146" + id: 2610 + display_name: "Fissurella volcano" +} +item { + name: "48259" + id: 2611 + display_name: "Pelidnota punctata" +} +item { + name: "122185" + id: 2612 + display_name: "Pantherophis alleghaniensis quadrivittata" +} +item { + name: "7498" + id: 2613 + display_name: "Polioptila melanura" +} +item { + name: "56652" + id: 2614 + display_name: "Haliotis rufescens" +} +item { + name: "122191" + id: 2615 + display_name: "Pelecanus occidentalis carolinensis" +} +item { + name: "73041" + id: 2616 + display_name: "Melozone aberti" +} +item { + name: "199381" + id: 2617 + display_name: "Homalodisca vitripennis" +} +item { + name: "73044" + id: 2618 + display_name: "Melozone crissalis" +} +item { + name: "83290" + id: 2619 + display_name: "Zanclus cornutus" +} +item { + name: "7513" + id: 2620 + display_name: "Thryothorus ludovicianus" +} +item { + name: "28559" + id: 2621 + display_name: "Storeria occipitomaculata occipitomaculata" +} +item { + name: "24255" + id: 2622 + display_name: "Pseudacris maculata" +} +item { + name: "130398" + id: 2623 + display_name: "Melanargia galathea" +} +item { + name: "29925" + id: 2624 + display_name: "Heterodon platirhinos" +} +item { + name: "48484" + id: 2625 + display_name: "Harmonia axyridis" +} +item { + name: "122214" + id: 2626 + display_name: "Odontotaenius disjunctus" +} +item { + name: "39484" + id: 2627 + display_name: "Xantusia vigilis" +} +item { + name: "73919" + id: 2628 + display_name: "Podarcis sicula" +} +item { + name: "154553" + id: 2629 + display_name: "Leptoglossus clypealis" +} +item { + name: "23922" + id: 2630 + display_name: "Hyla intermedia" +} +item { + name: "122228" + id: 2631 + display_name: "Acharia stimulea" +} +item { + name: "108344" + id: 2632 + display_name: "Pantala flavescens" +} +item { + name: "118538" + id: 2633 + display_name: "Cotinis nitida" +} +item { + name: "23930" + id: 2634 + display_name: "Hyla chrysoscelis" +} +item { + name: "23933" + id: 2635 + display_name: "Hyla arenicolor" +} +item { + name: "122238" + id: 2636 + display_name: "Porcellio scaber" +} +item { + name: "479803" + id: 2637 + display_name: "Dioprosopa clavata" +} +item { + name: "5355" + id: 2638 + display_name: "Parabuteo unicinctus" +} +item { + name: "146822" + id: 2639 + display_name: "Texola elada" +} +item { + name: "236935" + id: 2640 + display_name: "Anas platyrhynchos domesticus" +} +item { + name: "7562" + id: 2641 + display_name: "Troglodytes aedon" +} +item { + name: "339444" + id: 2642 + display_name: "Buteo lineatus elegans" +} +item { + name: "42221" + id: 2643 + display_name: "Odocoileus hemionus columbianus" +} +item { + name: "15764" + id: 2644 + display_name: "Thamnophilus doliatus" +} +item { + name: "122261" + id: 2645 + display_name: "Cucullia convexipennis" +} +item { + name: "122262" + id: 2646 + display_name: "Brachystola magna" +} +item { + name: "7576" + id: 2647 + display_name: "Thryomanes bewickii" +} +item { + name: "143015" + id: 2648 + display_name: "Eubaphe mendica" +} +item { + name: "73592" + id: 2649 + display_name: "Actinemys marmorata" +} +item { + name: "84549" + id: 2650 + display_name: "Plathemis lydia" +} +item { + name: "23969" + id: 2651 + display_name: "Hyla cinerea" +} +item { + name: "318882" + id: 2652 + display_name: "Ancistrocerus gazella" +} +item { + name: "7072" + id: 2653 + display_name: "Tadorna variegata" +} +item { + name: "48548" + id: 2654 + display_name: "Vanessa cardui" +} +item { + name: "48549" + id: 2655 + display_name: "Vanessa virginiensis" +} +item { + name: "122278" + id: 2656 + display_name: "Pomacea canaliculata" +} +item { + name: "9457" + id: 2657 + display_name: "Myioborus miniatus" +} +item { + name: "122280" + id: 2658 + display_name: "Pyrgus albescens" +} +item { + name: "122281" + id: 2659 + display_name: "Calycopis cecrops" +} +item { + name: "130474" + id: 2660 + display_name: "Achlyodes pallida" +} +item { + name: "338503" + id: 2661 + display_name: "Phalacrocorax varius varius" +} +item { + name: "9458" + id: 2662 + display_name: "Myioborus pictus" +} +item { + name: "73629" + id: 2663 + display_name: "Anolis nebulosus" +} +item { + name: "122291" + id: 2664 + display_name: "Larus argentatus smithsonianus" +} +item { + name: "56756" + id: 2665 + display_name: "Murgantia histrionica" +} +item { + name: "73148" + id: 2666 + display_name: "Parkesia motacilla" +} +item { + name: "48575" + id: 2667 + display_name: "Okenia rosacea" +} +item { + name: "56768" + id: 2668 + display_name: "Sula granti" +} +item { + name: "48578" + id: 2669 + display_name: "Anteos maerula" +} +item { + name: "64968" + id: 2670 + display_name: "Anaxyrus americanus" +} +item { + name: "64970" + id: 2671 + display_name: "Anaxyrus boreas" +} +item { + name: "115549" + id: 2672 + display_name: "Crotalus lepidus lepidus" +} +item { + name: "64977" + id: 2673 + display_name: "Anaxyrus fowleri" +} +item { + name: "19022" + id: 2674 + display_name: "Ara macao" +} +item { + name: "24259" + id: 2675 + display_name: "Pseudacris regilla" +} +item { + name: "64984" + id: 2676 + display_name: "Anaxyrus punctatus" +} +item { + name: "64985" + id: 2677 + display_name: "Anaxyrus quercicus" +} +item { + name: "73178" + id: 2678 + display_name: "Peucaea ruficauda" +} +item { + name: "64987" + id: 2679 + display_name: "Anaxyrus speciosus" +} +item { + name: "64989" + id: 2680 + display_name: "Anaxyrus woodhousii" +} +item { + name: "339596" + id: 2681 + display_name: "Calidris subruficollis" +} +item { + name: "56552" + id: 2682 + display_name: "Carabus nemoralis" +} +item { + name: "84722" + id: 2683 + display_name: "Ischnura verticalis" +} +item { + name: "122356" + id: 2684 + display_name: "Eumorpha achemon" +} +item { + name: "318965" + id: 2685 + display_name: "Chrysolina bankii" +} +item { + name: "228855" + id: 2686 + display_name: "Protodeltote muscosula" +} +item { + name: "146940" + id: 2687 + display_name: "Agriphila vulgivagella" +} +item { + name: "56832" + id: 2688 + display_name: "Nymphalis antiopa" +} +item { + name: "61355" + id: 2689 + display_name: "Vespula pensylvanica" +} +item { + name: "48645" + id: 2690 + display_name: "Megathura crenulata" +} +item { + name: "73222" + id: 2691 + display_name: "Phoenicopterus roseus" +} +item { + name: "363354" + id: 2692 + display_name: "Lobatus gigas" +} +item { + name: "3802" + id: 2693 + display_name: "Morus bassanus" +} +item { + name: "62722" + id: 2694 + display_name: "Apalone spinifera spinifera" +} +item { + name: "48655" + id: 2695 + display_name: "Aplysia californica" +} +item { + name: "54468" + id: 2696 + display_name: "Aglais urticae" +} +item { + name: "48662" + id: 2697 + display_name: "Danaus plexippus" +} +item { + name: "49071" + id: 2698 + display_name: "Metridium senile" +} +item { + name: "228899" + id: 2699 + display_name: "Psamatodes abydata" +} +item { + name: "133102" + id: 2700 + display_name: "Oncometopia orbona" +} +item { + name: "39659" + id: 2701 + display_name: "Chelonia mydas" +} +item { + name: "121437" + id: 2702 + display_name: "Dolomedes triton" +} +item { + name: "94545" + id: 2703 + display_name: "Argia fumipennis" +} +item { + name: "56887" + id: 2704 + display_name: "Bombus pensylvanicus" +} +item { + name: "40509" + id: 2705 + display_name: "Eptesicus fuscus" +} +item { + name: "58635" + id: 2706 + display_name: "Lepomis megalotis" +} +item { + name: "100369" + id: 2707 + display_name: "Erpetogomphus designatus" +} +item { + name: "58636" + id: 2708 + display_name: "Lepomis cyanellus" +} +item { + name: "40522" + id: 2709 + display_name: "Lasiurus borealis" +} +item { + name: "102006" + id: 2710 + display_name: "Hagenius brevistylus" +} +item { + name: "50283" + id: 2711 + display_name: "Marpesia petreus" +} +item { + name: "123829" + id: 2712 + display_name: "Pelecanus occidentalis californicus" +} +item { + name: "62453" + id: 2713 + display_name: "Anthidium manicatum" +} +item { + name: "56925" + id: 2714 + display_name: "Graphocephala coccinea" +} +item { + name: "48738" + id: 2715 + display_name: "Sphex pensylvanicus" +} +item { + name: "43151" + id: 2716 + display_name: "Oryctolagus cuniculus" +} +item { + name: "19822" + id: 2717 + display_name: "Glaucidium brasilianum" +} +item { + name: "48750" + id: 2718 + display_name: "Lottia scabra" +} +item { + name: "335071" + id: 2719 + display_name: "Elophila obliteralis" +} +item { + name: "81521" + id: 2720 + display_name: "Vipera berus" +} +item { + name: "43697" + id: 2721 + display_name: "Elephas maximus" +} +item { + name: "7079" + id: 2722 + display_name: "Oxyura jamaicensis" +} +item { + name: "43042" + id: 2723 + display_name: "Erinaceus europaeus" +} +item { + name: "40086" + id: 2724 + display_name: "Gopherus agassizii" +} +item { + name: "81545" + id: 2725 + display_name: "Lumbricus terrestris" +} +item { + name: "16010" + id: 2726 + display_name: "Myiarchus cinerascens" +} +item { + name: "2669" + id: 2727 + display_name: "Chloroceryle americana" +} +item { + name: "9535" + id: 2728 + display_name: "Sturnella neglecta" +} +item { + name: "81554" + id: 2729 + display_name: "Ictalurus punctatus" +} +item { + name: "339907" + id: 2730 + display_name: "Ramphastos ambiguus" +} +item { + name: "39814" + id: 2731 + display_name: "Terrapene carolina" +} +item { + name: "10254" + id: 2732 + display_name: "Paroaria coronata" +} +item { + name: "40614" + id: 2733 + display_name: "Antrozous pallidus" +} +item { + name: "502385" + id: 2734 + display_name: "Probole amicaria" +} +item { + name: "24233" + id: 2735 + display_name: "Acris gryllus" +} +item { + name: "81579" + id: 2736 + display_name: "Steatoda triangulosa" +} +item { + name: "81580" + id: 2737 + display_name: "Callosamia promethea" +} +item { + name: "146034" + id: 2738 + display_name: "Coluber lateralis" +} +item { + name: "81582" + id: 2739 + display_name: "Hyalophora cecropia" +} +item { + name: "81583" + id: 2740 + display_name: "Anisota senatoria" +} +item { + name: "66002" + id: 2741 + display_name: "Lithobates palustris" +} +item { + name: "81586" + id: 2742 + display_name: "Citheronia regalis" +} +item { + name: "40629" + id: 2743 + display_name: "Lasionycteris noctivagans" +} +item { + name: "81590" + id: 2744 + display_name: "Eacles imperialis" +} +item { + name: "204472" + id: 2745 + display_name: "Buteo buteo" +} +item { + name: "65212" + id: 2746 + display_name: "Craugastor augusti" +} +item { + name: "48830" + id: 2747 + display_name: "Patiria miniata" +} +item { + name: "48833" + id: 2748 + display_name: "Pisaster giganteus" +} +item { + name: "16071" + id: 2749 + display_name: "Myiodynastes luteiventris" +} +item { + name: "81610" + id: 2750 + display_name: "Balanus glandula" +} +item { + name: "24268" + id: 2751 + display_name: "Pseudacris crucifer" +} +item { + name: "16079" + id: 2752 + display_name: "Contopus sordidulus" +} +item { + name: "204496" + id: 2753 + display_name: "Corvus corone" +} +item { + name: "204498" + id: 2754 + display_name: "Cyanoramphus novaezelandiae" +} +item { + name: "24277" + id: 2755 + display_name: "Smilisca baudinii" +} +item { + name: "22631" + id: 2756 + display_name: "Eleutherodactylus planirostris" +} +item { + name: "16100" + id: 2757 + display_name: "Contopus virens" +} +item { + name: "42278" + id: 2758 + display_name: "Aepyceros melampus" +} +item { + name: "16106" + id: 2759 + display_name: "Contopus pertinax" +} +item { + name: "16110" + id: 2760 + display_name: "Contopus cooperi" +} +item { + name: "42280" + id: 2761 + display_name: "Connochaetes taurinus" +} +item { + name: "47455" + id: 2762 + display_name: "Octopus rubescens" +} +item { + name: "204533" + id: 2763 + display_name: "Larus argentatus" +} +item { + name: "81656" + id: 2764 + display_name: "Nematocampa resistaria" +} +item { + name: "81657" + id: 2765 + display_name: "Lacinipolia renigera" +} +item { + name: "204519" + id: 2766 + display_name: "Halcyon smyrnensis" +} +item { + name: "62762" + id: 2767 + display_name: "Cordulegaster dorsalis" +} +item { + name: "81663" + id: 2768 + display_name: "Malacosoma disstria" +} +item { + name: "32512" + id: 2769 + display_name: "Rena dulcis" +} +item { + name: "81665" + id: 2770 + display_name: "Orgyia leucostigma" +} +item { + name: "130821" + id: 2771 + display_name: "Haploa confusa" +} +item { + name: "81672" + id: 2772 + display_name: "Clemensia albata" +} +item { + name: "204554" + id: 2773 + display_name: "Onychognathus morio" +} +item { + name: "81677" + id: 2774 + display_name: "Euchaetes egle" +} +item { + name: "81680" + id: 2775 + display_name: "Scopula limboundata" +} +item { + name: "318497" + id: 2776 + display_name: "Hemipenthes sinuosa" +} +item { + name: "179987" + id: 2777 + display_name: "Ictidomys parvidens" +} +item { + name: "179988" + id: 2778 + display_name: "Ictidomys tridecemlineatus" +} +item { + name: "81685" + id: 2779 + display_name: "Evergestis pallidata" +} +item { + name: "81687" + id: 2780 + display_name: "Noctua pronuba" +} +item { + name: "179992" + id: 2781 + display_name: "Xerospermophilus spilosoma" +} +item { + name: "179994" + id: 2782 + display_name: "Urocitellus armatus" +} +item { + name: "9519" + id: 2783 + display_name: "Cyanocompsa parellina" +} +item { + name: "179998" + id: 2784 + display_name: "Urocitellus columbianus" +} +item { + name: "114463" + id: 2785 + display_name: "Trithemis annulata" +} +item { + name: "199169" + id: 2786 + display_name: "Catocala maestosa" +} +item { + name: "143323" + id: 2787 + display_name: "Tolype velleda" +} +item { + name: "120113" + id: 2788 + display_name: "Anthrenus verbasci" +} +item { + name: "7601" + id: 2789 + display_name: "Cistothorus palustris" +} +item { + name: "81706" + id: 2790 + display_name: "Alaus oculatus" +} +item { + name: "220974" + id: 2791 + display_name: "Harrisimemna trisignata" +} +item { + name: "20445" + id: 2792 + display_name: "Tyto alba" +} +item { + name: "73523" + id: 2793 + display_name: "Trogon caligatus" +} +item { + name: "49590" + id: 2794 + display_name: "Micropterus dolomieu" +} +item { + name: "41729" + id: 2795 + display_name: "Mirounga leonina" +} +item { + name: "48957" + id: 2796 + display_name: "Arilus cristatus" +} +item { + name: "81727" + id: 2797 + display_name: "Abaeis nicippe" +} +item { + name: "8000" + id: 2798 + display_name: "Corvus monedula" +} +item { + name: "8001" + id: 2799 + display_name: "Corvus ossifragus" +} +item { + name: "171843" + id: 2800 + display_name: "Rabdotus dealbatus" +} +item { + name: "81734" + id: 2801 + display_name: "Neophasia menapia" +} +item { + name: "258813" + id: 2802 + display_name: "Clogmia albipunctata" +} +item { + name: "332243" + id: 2803 + display_name: "Lepturobosca chrysocoma" +} +item { + name: "81744" + id: 2804 + display_name: "Heliconius erato" +} +item { + name: "218424" + id: 2805 + display_name: "Dicymolomia julianalis" +} +item { + name: "3813" + id: 2806 + display_name: "Spheniscus demersus" +} +item { + name: "81749" + id: 2807 + display_name: "Malacosoma americanum" +} +item { + name: "81752" + id: 2808 + display_name: "Pyrausta tyralis" +} +item { + name: "48987" + id: 2809 + display_name: "Hippodamia convergens" +} +item { + name: "8029" + id: 2810 + display_name: "Corvus frugilegus" +} +item { + name: "8031" + id: 2811 + display_name: "Corvus splendens" +} +item { + name: "147298" + id: 2812 + display_name: "Lasiommata megera" +} +item { + name: "7087" + id: 2813 + display_name: "Branta bernicla" +} +item { + name: "48550" + id: 2814 + display_name: "Phoebis sennae" +} +item { + name: "4349" + id: 2815 + display_name: "Larus hyperboreus" +} +item { + name: "84027" + id: 2816 + display_name: "Trigonopeltastes delta" +} +item { + name: "194762" + id: 2817 + display_name: "Vanessa itea" +} +item { + name: "311163" + id: 2818 + display_name: "Pseudomops septentrionalis" +} +item { + name: "55957" + id: 2819 + display_name: "Scudderia furcata" +} +item { + name: "39822" + id: 2820 + display_name: "Pseudemys texana" +} +item { + name: "204685" + id: 2821 + display_name: "Chlosyne ehrenbergii" +} +item { + name: "122767" + id: 2822 + display_name: "Columba livia domestica" +} +item { + name: "55960" + id: 2823 + display_name: "Sceloporus graciosus" +} +item { + name: "121823" + id: 2824 + display_name: "Autographa californica" +} +item { + name: "8088" + id: 2825 + display_name: "Garrulus glandarius" +} +item { + name: "65433" + id: 2826 + display_name: "Ecnomiohyla miotympanum" +} +item { + name: "49051" + id: 2827 + display_name: "Anthopleura sola" +} +item { + name: "125815" + id: 2828 + display_name: "Coenonympha arcania" +} +item { + name: "55963" + id: 2829 + display_name: "Malacosoma californicum" +} +item { + name: "120479" + id: 2830 + display_name: "Anser anser domesticus" +} +item { + name: "133788" + id: 2831 + display_name: "Xylocopa micans" +} +item { + name: "81559" + id: 2832 + display_name: "Epargyreus clarus" +} +item { + name: "81839" + id: 2833 + display_name: "Platycryptus undatus" +} +item { + name: "133791" + id: 2834 + display_name: "Polistes exclamans" +} +item { + name: "84640" + id: 2835 + display_name: "Polistes dominula" +} +item { + name: "73666" + id: 2836 + display_name: "Aspidoscelis exsanguis" +} +item { + name: "73669" + id: 2837 + display_name: "Aspidoscelis gularis" +} +item { + name: "16326" + id: 2838 + display_name: "Mitrephanes phaeocercus" +} +item { + name: "49095" + id: 2839 + display_name: "Pagurus samuelis" +} +item { + name: "73672" + id: 2840 + display_name: "Aspidoscelis hyperythra" +} +item { + name: "59192" + id: 2841 + display_name: "Polites sabuleti" +} +item { + name: "81561" + id: 2842 + display_name: "Anaea andria" +} +item { + name: "81881" + id: 2843 + display_name: "Amphipsalta zelandica" +} +item { + name: "73690" + id: 2844 + display_name: "Aspidoscelis sexlineata" +} +item { + name: "73694" + id: 2845 + display_name: "Aspidoscelis velox" +} +item { + name: "335840" + id: 2846 + display_name: "Pyrausta inornatalis" +} +item { + name: "49126" + id: 2847 + display_name: "Strongylocentrotus franciscanus" +} +item { + name: "204775" + id: 2848 + display_name: "Kricogonia lyside" +} +item { + name: "475115" + id: 2849 + display_name: "Ardenna creatopus" +} +item { + name: "475120" + id: 2850 + display_name: "Ardenna gravis" +} +item { + name: "62803" + id: 2851 + display_name: "Monadenia fidelis" +} +item { + name: "49150" + id: 2852 + display_name: "Agraulis vanillae" +} +item { + name: "83929" + id: 2853 + display_name: "Phanaeus vindex" +} +item { + name: "199839" + id: 2854 + display_name: "Haemorhous cassinii" +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/kitti_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/kitti_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..0afcc6936ebdb37ecbc7c3245929fcf178a02c0b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/kitti_label_map.pbtxt @@ -0,0 +1,9 @@ +item { + id: 1 + name: 'car' +} + +item { + id: 2 + name: 'pedestrian' +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_complete_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_complete_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..d73fc065a4bd4f024670f242acbc79c1fd8e82fe --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_complete_label_map.pbtxt @@ -0,0 +1,455 @@ +item { + name: "background" + id: 0 + display_name: "background" +} +item { + name: "/m/01g317" + id: 1 + display_name: "person" +} +item { + name: "/m/0199g" + id: 2 + display_name: "bicycle" +} +item { + name: "/m/0k4j" + id: 3 + display_name: "car" +} +item { + name: "/m/04_sv" + id: 4 + display_name: "motorcycle" +} +item { + name: "/m/05czz6l" + id: 5 + display_name: "airplane" +} +item { + name: "/m/01bjv" + id: 6 + display_name: "bus" +} +item { + name: "/m/07jdr" + id: 7 + display_name: "train" +} +item { + name: "/m/07r04" + id: 8 + display_name: "truck" +} +item { + name: "/m/019jd" + id: 9 + display_name: "boat" +} +item { + name: "/m/015qff" + id: 10 + display_name: "traffic light" +} +item { + name: "/m/01pns0" + id: 11 + display_name: "fire hydrant" +} +item { + name: "12" + id: 12 + display_name: "12" +} +item { + name: "/m/02pv19" + id: 13 + display_name: "stop sign" +} +item { + name: "/m/015qbp" + id: 14 + display_name: "parking meter" +} +item { + name: "/m/0cvnqh" + id: 15 + display_name: "bench" +} +item { + name: "/m/015p6" + id: 16 + display_name: "bird" +} +item { + name: "/m/01yrx" + id: 17 + display_name: "cat" +} +item { + name: "/m/0bt9lr" + id: 18 + display_name: "dog" +} +item { + name: "/m/03k3r" + id: 19 + display_name: "horse" +} +item { + name: "/m/07bgp" + id: 20 + display_name: "sheep" +} +item { + name: "/m/01xq0k1" + id: 21 + display_name: "cow" +} +item { + name: "/m/0bwd_0j" + id: 22 + display_name: "elephant" +} +item { + name: "/m/01dws" + id: 23 + display_name: "bear" +} +item { + name: "/m/0898b" + id: 24 + display_name: "zebra" +} +item { + name: "/m/03bk1" + id: 25 + display_name: "giraffe" +} +item { + name: "26" + id: 26 + display_name: "26" +} +item { + name: "/m/01940j" + id: 27 + display_name: "backpack" +} +item { + name: "/m/0hnnb" + id: 28 + display_name: "umbrella" +} +item { + name: "29" + id: 29 + display_name: "29" +} +item { + name: "30" + id: 30 + display_name: "30" +} +item { + name: "/m/080hkjn" + id: 31 + display_name: "handbag" +} +item { + name: "/m/01rkbr" + id: 32 + display_name: "tie" +} +item { + name: "/m/01s55n" + id: 33 + display_name: "suitcase" +} +item { + name: "/m/02wmf" + id: 34 + display_name: "frisbee" +} +item { + name: "/m/071p9" + id: 35 + display_name: "skis" +} +item { + name: "/m/06__v" + id: 36 + display_name: "snowboard" +} +item { + name: "/m/018xm" + id: 37 + display_name: "sports ball" +} +item { + name: "/m/02zt3" + id: 38 + display_name: "kite" +} +item { + name: "/m/03g8mr" + id: 39 + display_name: "baseball bat" +} +item { + name: "/m/03grzl" + id: 40 + display_name: "baseball glove" +} +item { + name: "/m/06_fw" + id: 41 + display_name: "skateboard" +} +item { + name: "/m/019w40" + id: 42 + display_name: "surfboard" +} +item { + name: "/m/0dv9c" + id: 43 + display_name: "tennis racket" +} +item { + name: "/m/04dr76w" + id: 44 + display_name: "bottle" +} +item { + name: "45" + id: 45 + display_name: "45" +} +item { + name: "/m/09tvcd" + id: 46 + display_name: "wine glass" +} +item { + name: "/m/08gqpm" + id: 47 + display_name: "cup" +} +item { + name: "/m/0dt3t" + id: 48 + display_name: "fork" +} +item { + name: "/m/04ctx" + id: 49 + display_name: "knife" +} +item { + name: "/m/0cmx8" + id: 50 + display_name: "spoon" +} +item { + name: "/m/04kkgm" + id: 51 + display_name: "bowl" +} +item { + name: "/m/09qck" + id: 52 + display_name: "banana" +} +item { + name: "/m/014j1m" + id: 53 + display_name: "apple" +} +item { + name: "/m/0l515" + id: 54 + display_name: "sandwich" +} +item { + name: "/m/0cyhj_" + id: 55 + display_name: "orange" +} +item { + name: "/m/0hkxq" + id: 56 + display_name: "broccoli" +} +item { + name: "/m/0fj52s" + id: 57 + display_name: "carrot" +} +item { + name: "/m/01b9xk" + id: 58 + display_name: "hot dog" +} +item { + name: "/m/0663v" + id: 59 + display_name: "pizza" +} +item { + name: "/m/0jy4k" + id: 60 + display_name: "donut" +} +item { + name: "/m/0fszt" + id: 61 + display_name: "cake" +} +item { + name: "/m/01mzpv" + id: 62 + display_name: "chair" +} +item { + name: "/m/02crq1" + id: 63 + display_name: "couch" +} +item { + name: "/m/03fp41" + id: 64 + display_name: "potted plant" +} +item { + name: "/m/03ssj5" + id: 65 + display_name: "bed" +} +item { + name: "66" + id: 66 + display_name: "66" +} +item { + name: "/m/04bcr3" + id: 67 + display_name: "dining table" +} +item { + name: "68" + id: 68 + display_name: "68" +} +item { + name: "69" + id: 69 + display_name: "69" +} +item { + name: "/m/09g1w" + id: 70 + display_name: "toilet" +} +item { + name: "71" + id: 71 + display_name: "71" +} +item { + name: "/m/07c52" + id: 72 + display_name: "tv" +} +item { + name: "/m/01c648" + id: 73 + display_name: "laptop" +} +item { + name: "/m/020lf" + id: 74 + display_name: "mouse" +} +item { + name: "/m/0qjjc" + id: 75 + display_name: "remote" +} +item { + name: "/m/01m2v" + id: 76 + display_name: "keyboard" +} +item { + name: "/m/050k8" + id: 77 + display_name: "cell phone" +} +item { + name: "/m/0fx9l" + id: 78 + display_name: "microwave" +} +item { + name: "/m/029bxz" + id: 79 + display_name: "oven" +} +item { + name: "/m/01k6s3" + id: 80 + display_name: "toaster" +} +item { + name: "/m/0130jx" + id: 81 + display_name: "sink" +} +item { + name: "/m/040b_t" + id: 82 + display_name: "refrigerator" +} +item { + name: "83" + id: 83 + display_name: "83" +} +item { + name: "/m/0bt_c3" + id: 84 + display_name: "book" +} +item { + name: "/m/01x3z" + id: 85 + display_name: "clock" +} +item { + name: "/m/02s195" + id: 86 + display_name: "vase" +} +item { + name: "/m/01lsmm" + id: 87 + display_name: "scissors" +} +item { + name: "/m/0kmg4" + id: 88 + display_name: "teddy bear" +} +item { + name: "/m/03wvsk" + id: 89 + display_name: "hair drier" +} +item { + name: "/m/012xff" + id: 90 + display_name: "toothbrush" +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..1f4872bd0c7f53e70beecf88af005c07a5df9e08 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_label_map.pbtxt @@ -0,0 +1,400 @@ +item { + name: "/m/01g317" + id: 1 + display_name: "person" +} +item { + name: "/m/0199g" + id: 2 + display_name: "bicycle" +} +item { + name: "/m/0k4j" + id: 3 + display_name: "car" +} +item { + name: "/m/04_sv" + id: 4 + display_name: "motorcycle" +} +item { + name: "/m/05czz6l" + id: 5 + display_name: "airplane" +} +item { + name: "/m/01bjv" + id: 6 + display_name: "bus" +} +item { + name: "/m/07jdr" + id: 7 + display_name: "train" +} +item { + name: "/m/07r04" + id: 8 + display_name: "truck" +} +item { + name: "/m/019jd" + id: 9 + display_name: "boat" +} +item { + name: "/m/015qff" + id: 10 + display_name: "traffic light" +} +item { + name: "/m/01pns0" + id: 11 + display_name: "fire hydrant" +} +item { + name: "/m/02pv19" + id: 13 + display_name: "stop sign" +} +item { + name: "/m/015qbp" + id: 14 + display_name: "parking meter" +} +item { + name: "/m/0cvnqh" + id: 15 + display_name: "bench" +} +item { + name: "/m/015p6" + id: 16 + display_name: "bird" +} +item { + name: "/m/01yrx" + id: 17 + display_name: "cat" +} +item { + name: "/m/0bt9lr" + id: 18 + display_name: "dog" +} +item { + name: "/m/03k3r" + id: 19 + display_name: "horse" +} +item { + name: "/m/07bgp" + id: 20 + display_name: "sheep" +} +item { + name: "/m/01xq0k1" + id: 21 + display_name: "cow" +} +item { + name: "/m/0bwd_0j" + id: 22 + display_name: "elephant" +} +item { + name: "/m/01dws" + id: 23 + display_name: "bear" +} +item { + name: "/m/0898b" + id: 24 + display_name: "zebra" +} +item { + name: "/m/03bk1" + id: 25 + display_name: "giraffe" +} +item { + name: "/m/01940j" + id: 27 + display_name: "backpack" +} +item { + name: "/m/0hnnb" + id: 28 + display_name: "umbrella" +} +item { + name: "/m/080hkjn" + id: 31 + display_name: "handbag" +} +item { + name: "/m/01rkbr" + id: 32 + display_name: "tie" +} +item { + name: "/m/01s55n" + id: 33 + display_name: "suitcase" +} +item { + name: "/m/02wmf" + id: 34 + display_name: "frisbee" +} +item { + name: "/m/071p9" + id: 35 + display_name: "skis" +} +item { + name: "/m/06__v" + id: 36 + display_name: "snowboard" +} +item { + name: "/m/018xm" + id: 37 + display_name: "sports ball" +} +item { + name: "/m/02zt3" + id: 38 + display_name: "kite" +} +item { + name: "/m/03g8mr" + id: 39 + display_name: "baseball bat" +} +item { + name: "/m/03grzl" + id: 40 + display_name: "baseball glove" +} +item { + name: "/m/06_fw" + id: 41 + display_name: "skateboard" +} +item { + name: "/m/019w40" + id: 42 + display_name: "surfboard" +} +item { + name: "/m/0dv9c" + id: 43 + display_name: "tennis racket" +} +item { + name: "/m/04dr76w" + id: 44 + display_name: "bottle" +} +item { + name: "/m/09tvcd" + id: 46 + display_name: "wine glass" +} +item { + name: "/m/08gqpm" + id: 47 + display_name: "cup" +} +item { + name: "/m/0dt3t" + id: 48 + display_name: "fork" +} +item { + name: "/m/04ctx" + id: 49 + display_name: "knife" +} +item { + name: "/m/0cmx8" + id: 50 + display_name: "spoon" +} +item { + name: "/m/04kkgm" + id: 51 + display_name: "bowl" +} +item { + name: "/m/09qck" + id: 52 + display_name: "banana" +} +item { + name: "/m/014j1m" + id: 53 + display_name: "apple" +} +item { + name: "/m/0l515" + id: 54 + display_name: "sandwich" +} +item { + name: "/m/0cyhj_" + id: 55 + display_name: "orange" +} +item { + name: "/m/0hkxq" + id: 56 + display_name: "broccoli" +} +item { + name: "/m/0fj52s" + id: 57 + display_name: "carrot" +} +item { + name: "/m/01b9xk" + id: 58 + display_name: "hot dog" +} +item { + name: "/m/0663v" + id: 59 + display_name: "pizza" +} +item { + name: "/m/0jy4k" + id: 60 + display_name: "donut" +} +item { + name: "/m/0fszt" + id: 61 + display_name: "cake" +} +item { + name: "/m/01mzpv" + id: 62 + display_name: "chair" +} +item { + name: "/m/02crq1" + id: 63 + display_name: "couch" +} +item { + name: "/m/03fp41" + id: 64 + display_name: "potted plant" +} +item { + name: "/m/03ssj5" + id: 65 + display_name: "bed" +} +item { + name: "/m/04bcr3" + id: 67 + display_name: "dining table" +} +item { + name: "/m/09g1w" + id: 70 + display_name: "toilet" +} +item { + name: "/m/07c52" + id: 72 + display_name: "tv" +} +item { + name: "/m/01c648" + id: 73 + display_name: "laptop" +} +item { + name: "/m/020lf" + id: 74 + display_name: "mouse" +} +item { + name: "/m/0qjjc" + id: 75 + display_name: "remote" +} +item { + name: "/m/01m2v" + id: 76 + display_name: "keyboard" +} +item { + name: "/m/050k8" + id: 77 + display_name: "cell phone" +} +item { + name: "/m/0fx9l" + id: 78 + display_name: "microwave" +} +item { + name: "/m/029bxz" + id: 79 + display_name: "oven" +} +item { + name: "/m/01k6s3" + id: 80 + display_name: "toaster" +} +item { + name: "/m/0130jx" + id: 81 + display_name: "sink" +} +item { + name: "/m/040b_t" + id: 82 + display_name: "refrigerator" +} +item { + name: "/m/0bt_c3" + id: 84 + display_name: "book" +} +item { + name: "/m/01x3z" + id: 85 + display_name: "clock" +} +item { + name: "/m/02s195" + id: 86 + display_name: "vase" +} +item { + name: "/m/01lsmm" + id: 87 + display_name: "scissors" +} +item { + name: "/m/0kmg4" + id: 88 + display_name: "teddy bear" +} +item { + name: "/m/03wvsk" + id: 89 + display_name: "hair drier" +} +item { + name: "/m/012xff" + id: 90 + display_name: "toothbrush" +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_minival_ids.txt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_minival_ids.txt new file mode 100644 index 0000000000000000000000000000000000000000..5bbff3c18d4efed835bcf022f3a5fbc11da0e496 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/mscoco_minival_ids.txt @@ -0,0 +1,8059 @@ +25096 +251824 +35313 +546011 +524186 +205866 +511403 +313916 +47471 +258628 +233560 +576017 +404517 +410056 +178690 +248980 +511724 +429718 +163076 +244111 +126766 +313182 +191981 +139992 +325237 +248129 +214519 +175438 +493321 +174103 +563762 +536795 +289960 +473720 +515540 +292118 +360851 +267175 +532876 +171613 +581415 +259819 +441841 +381682 +58157 +4980 +473929 +70626 +93773 +283412 +36765 +495020 +278401 +329307 +192810 +491784 +506416 +225495 +553747 +86442 +242208 +132686 +385877 +290248 +525705 +5476 +486521 +332512 +138556 +348083 +284375 +40018 +296994 +38685 +432429 +183407 +434358 +472164 +530494 +570693 +193401 +392612 +98872 +445766 +532209 +98322 +285114 +267725 +51605 +314812 +91105 +535506 +540264 +375341 +449828 +277659 +68933 +76873 +217554 +213592 +190776 +516224 +474479 +343599 +578813 +128669 +546292 +475365 +377626 +128833 +427091 +547227 +11742 +80213 +462241 +374574 +121572 +29151 +13892 +262394 +303667 +198724 +7320 +448492 +419080 +460379 +483965 +556516 +139181 +1103 +308715 +207507 +213827 +216083 +445597 +240275 +379585 +116389 +138124 +559051 +326898 +419386 +503660 +519460 +23893 +24458 +518109 +462982 +151492 +514254 +2477 +147165 +570394 +548766 +250083 +364341 +351967 +386277 +328084 +511299 +499349 +315501 +234965 +428562 +219771 +288150 +136021 +168619 +298316 +75118 +189752 +243857 +296222 +554002 +533628 +384596 +202981 +498350 +391463 +183991 +528062 +451084 +7899 +408534 +329030 +318566 +22492 +361285 +226973 +213356 +417265 +105622 +161169 +261487 +167477 +233370 +142999 +256713 +305833 +103579 +352538 +135763 +392144 +61181 +200302 +456908 +286858 +179850 +488075 +174511 +194755 +317822 +2302 +304596 +172556 +548275 +341678 +55299 +134760 +352936 +545129 +377012 +141328 +103757 +552837 +28246 +125167 +328745 +278760 +337133 +403389 +146825 +502558 +265916 +428985 +492041 +113403 +372037 +306103 +287574 +187495 +479805 +336309 +162043 +95899 +43133 +464248 +149115 +247438 +74030 +130645 +282841 +127092 +101172 +536743 +179642 +58133 +49667 +170605 +11347 +365277 +201970 +292663 +217219 +463226 +41924 +281102 +357816 +490878 +100343 +525058 +133503 +416145 +29341 +415413 +125527 +507951 +262609 +240210 +581781 +345137 +526342 +268641 +328777 +32001 +137538 +39115 +415958 +6771 +421865 +64909 +383601 +206907 +420840 +370980 +28452 +571893 +153520 +185890 +392991 +547013 +257359 +279879 +478614 +131919 +40937 +22874 +173375 +106344 +44801 +205401 +312870 +400886 +351530 +344013 +173500 +470423 +396729 +402499 +276585 +377097 +367619 +518908 +263866 +332292 +67805 +152211 +515025 +221350 +525247 +78490 +504342 +95908 +82668 +256199 +220270 +552065 +242379 +84866 +152281 +228464 +223122 +67537 +456968 +368349 +101985 +14681 +543551 +107558 +372009 +99054 +126540 +86877 +492785 +482585 +571564 +501116 +296871 +20395 +181518 +568041 +121154 +56187 +190018 +97156 +310325 +393274 +214574 +243222 +289949 +452121 +150508 +341752 +310757 +24040 +228551 +335589 +12020 +529597 +459884 +344888 +229713 +51948 +370929 +552061 +261072 +120070 +332067 +263014 +158993 +451714 +397327 +20965 +414340 +574946 +370266 +487534 +492246 +264771 +73702 +43997 +235124 +301093 +400048 +77681 +58472 +331386 +13783 +242513 +419158 +59325 +383033 +393258 +529041 +249276 +182775 +351793 +9727 +334069 +566771 +539355 +38662 +423617 +47559 +120592 +508303 +462565 +47916 +218208 +182362 +562101 +441442 +71239 +395378 +522637 +25603 +484450 +872 +171483 +527248 +323155 +240754 +15032 +419144 +313214 +250917 +333430 +242757 +221914 +283190 +194297 +228506 +550691 +172513 +312192 +530619 +113867 +323552 +374115 +35435 +160239 +62877 +441873 +196574 +62858 +557114 +427612 +242869 +356733 +304828 +24880 +490509 +407083 +457877 +402788 +536416 +385912 +544121 +500389 +451102 +12120 +483476 +70987 +482799 +542549 +49236 +424258 +435783 +182366 +438093 +501824 +232845 +53965 +223198 +288933 +450458 +285664 +196484 +408930 +519815 +290981 +398567 +315792 +490683 +257136 +75611 +302498 +332153 +82293 +416911 +558608 +564659 +536195 +370260 +57904 +527270 +6593 +145620 +551650 +470832 +515785 +251404 +287331 +150788 +334006 +266117 +10039 +579158 +328397 +468351 +550400 +31745 +405970 +16761 +323515 +459598 +558457 +570736 +476939 +472610 +72155 +112517 +13659 +530905 +458768 +43486 +560893 +493174 +31217 +262736 +412204 +142722 +151231 +480643 +197245 +398666 +444869 +110999 +191724 +479057 +492420 +170638 +277329 +301908 +395644 +537611 +141887 +47149 +403432 +34818 +372495 +67994 +337497 +478586 +249815 +533462 +281032 +289941 +151911 +271215 +407868 +360700 +508582 +103873 +353658 +369081 +406403 +331692 +26430 +105655 +572630 +37181 +91336 +484587 +318284 +113019 +33055 +25293 +229324 +374052 +384111 +213951 +315195 +319283 +539453 +17655 +308974 +326243 +539436 +417876 +526940 +356347 +221932 +73753 +292648 +262284 +304924 +558587 +374858 +253518 +311744 +539636 +40924 +136624 +334305 +365997 +63355 +191226 +526732 +367128 +575198 +500657 +50637 +17182 +424792 +565353 +563040 +383494 +74458 +155142 +197125 +223857 +428241 +440830 +371289 +437303 +330449 +93771 +82715 +499631 +381257 +563951 +192834 +528600 +404273 +270554 +208053 +188613 +484760 +432016 +129800 +91756 +523097 +317018 +487282 +444913 +159500 +126822 +540564 +105812 +560756 +306099 +471226 +123842 +513219 +154877 +497034 +283928 +564003 +238602 +194780 +462728 +558640 +524373 +455624 +3690 +560367 +316351 +455772 +223777 +161517 +243034 +250440 +239975 +441008 +324715 +152106 +246973 +462805 +296521 +412767 +530913 +370165 +292526 +107244 +217440 +330204 +220176 +577735 +197022 +127451 +518701 +212322 +204887 +27696 +348474 +119233 +282804 +230040 +425690 +409241 +296825 +296353 +375909 +123136 +573891 +338256 +198247 +373375 +151051 +500084 +557596 +120478 +44989 +283380 +149005 +522065 +626 +17198 +309633 +524245 +291589 +322714 +455847 +248468 +371948 +444928 +20438 +481670 +147195 +95022 +548159 +553165 +395324 +391371 +86884 +561121 +219737 +38875 +338159 +377881 +185472 +359277 +114861 +378048 +126226 +10217 +320246 +15827 +178236 +370279 +352978 +408101 +77615 +337044 +223714 +20796 +352445 +263834 +156704 +377867 +119402 +399567 +1180 +257941 +560675 +390471 +209290 +258382 +466339 +56437 +195042 +384230 +203214 +36077 +283038 +38323 +158770 +532381 +395903 +375461 +397857 +326798 +371699 +369503 +495626 +464328 +462211 +397719 +434089 +424793 +476770 +531852 +303538 +525849 +480917 +419653 +265063 +48956 +5184 +279149 +396727 +374266 +124429 +36124 +240213 +147556 +339512 +577182 +288599 +257169 +178254 +393869 +122314 +28713 +48133 +540681 +100974 +368459 +500110 +73634 +460982 +203878 +578344 +443602 +502012 +399666 +103603 +22090 +257529 +176328 +536656 +408873 +116881 +460972 +33835 +460781 +51223 +46463 +89395 +407646 +337453 +461715 +16257 +426987 +234889 +3125 +165643 +517472 +451435 +206800 +112128 +331236 +163306 +94185 +498716 +532732 +146509 +458567 +153832 +105996 +353398 +546976 +283060 +247624 +110048 +243491 +154798 +543600 +149962 +355256 +352900 +203081 +372203 +284605 +516244 +190494 +150301 +326082 +64146 +402858 +413538 +399510 +460251 +94336 +458721 +57345 +424162 +423508 +69356 +567220 +509786 +37038 +111535 +341318 +372067 +358120 +244909 +180653 +39852 +438560 +357041 +67065 +51928 +171717 +520430 +552395 +431355 +528084 +20913 +309610 +262323 +573784 +449485 +154846 +283438 +430871 +199578 +516318 +563912 +348483 +485613 +143440 +94922 +168817 +74457 +45830 +66297 +514173 +99186 +296236 +230903 +452312 +476444 +568981 +100811 +237350 +194724 +453622 +49559 +270609 +113701 +415393 +92173 +137004 +188795 +148280 +448114 +575964 +163155 +518719 +219329 +214247 +363927 +65357 +87617 +552612 +457817 +124796 +47740 +560463 +513968 +273637 +354212 +95959 +261061 +307265 +316237 +191342 +463272 +169273 +396518 +93261 +572733 +407386 +202658 +446497 +420852 +229274 +432724 +34900 +352533 +49891 +66144 +146831 +467484 +97988 +561647 +301155 +507421 +173217 +577584 +451940 +99927 +350639 +178941 +485155 +175948 +360673 +92963 +361321 +48739 +577310 +517795 +93405 +506458 +394681 +167920 +16995 +519573 +270532 +527750 +563403 +494608 +557780 +178691 +8676 +186927 +550173 +361656 +575911 +281315 +534377 +57570 +340894 +37624 +143103 +538243 +425077 +376545 +108129 +170974 +7522 +408906 +264279 +79415 +344025 +186797 +234349 +226472 +123639 +225177 +237984 +38714 +223671 +358247 +152465 +521405 +453722 +361111 +557117 +235832 +309341 +268469 +108353 +532531 +357279 +537280 +437618 +122953 +7088 +36693 +127659 +431901 +57244 +567565 +568111 +202926 +504516 +555685 +322369 +347620 +110231 +568982 +295340 +529798 +300341 +158160 +73588 +119476 +387216 +154994 +259755 +211282 +433971 +263588 +299468 +570138 +123017 +355106 +540172 +406215 +8401 +548844 +161820 +396432 +495348 +222407 +53123 +491556 +108130 +440617 +448309 +22596 +346841 +213829 +135076 +56326 +233139 +487418 +227326 +137763 +383389 +47882 +207797 +167452 +112065 +150703 +421109 +171753 +158279 +240800 +66821 +152886 +163640 +475466 +301799 +106712 +470885 +536370 +420389 +396768 +281950 +18903 +357529 +33650 +168243 +201004 +389295 +557150 +185327 +181256 +557396 +182025 +61564 +301928 +332455 +199403 +18444 +177452 +204206 +38465 +215906 +153103 +445019 +324527 +299207 +429281 +574675 +157067 +241269 +100850 +502818 +576566 +296775 +873 +280363 +355240 +383445 +286182 +67327 +422778 +494855 +337246 +266853 +47516 +381991 +44081 +403862 +381430 +370798 +173383 +387173 +22396 +484066 +349414 +262235 +492814 +65238 +209420 +336276 +453328 +407286 +420490 +360328 +158440 +398534 +489475 +477389 +297108 +69750 +507833 +198992 +99736 +546444 +514914 +482574 +54355 +63478 +191693 +61684 +412914 +267408 +424641 +56872 +318080 +30290 +33441 +199310 +337403 +26731 +453390 +506137 +188945 +185950 +239843 +357944 +290570 +523637 +551952 +513397 +357870 +523517 +277048 +259879 +186991 +521943 +21900 +281074 +187194 +526723 +568147 +513037 +177338 +243831 +203488 +208494 +188460 +289943 +399177 +404668 +160761 +271143 +76087 +478922 +440045 +449432 +61025 +331138 +227019 +147577 +548337 +444294 +458663 +236837 +6854 +444926 +484816 +516641 +397863 +188534 +64822 +213453 +66561 +43218 +514901 +322844 +498453 +488788 +391656 +298994 +64088 +464706 +193720 +199017 +186427 +15278 +350386 +342335 +372024 +550939 +35594 +381382 +235902 +26630 +213765 +550001 +129706 +577149 +353096 +376891 +28499 +427041 +314965 +231163 +5728 +347836 +184388 +27476 +284860 +476872 +301317 +99546 +147653 +529515 +311922 +20777 +2613 +59463 +430670 +560744 +60677 +332087 +296724 +353321 +103306 +363887 +76431 +423058 +120340 +119452 +6723 +462327 +163127 +402723 +489382 +183181 +107656 +375409 +355228 +430762 +512468 +409125 +270544 +559113 +495388 +529434 +38355 +422025 +379667 +131386 +183409 +573536 +581317 +425404 +350084 +472 +28532 +329717 +230220 +187196 +484166 +97434 +224595 +87483 +516998 +314876 +32610 +514586 +344816 +394418 +402330 +305993 +371497 +315790 +294908 +207431 +561014 +26584 +368671 +374990 +54747 +47571 +449424 +283761 +84735 +522127 +120473 +524656 +479659 +131627 +450959 +153300 +580908 +207785 +49115 +284991 +96505 +278306 +291655 +1404 +489304 +557459 +37740 +157465 +390475 +119166 +33871 +247428 +75905 +20779 +65035 +333556 +375415 +383676 +505243 +87327 +16451 +287235 +70190 +245067 +417520 +229234 +183786 +333018 +554156 +198915 +108021 +128262 +412443 +242543 +555050 +436511 +445233 +207886 +156397 +526257 +521357 +413043 +427189 +401614 +94823 +351130 +105945 +182314 +305879 +526197 +64409 +496800 +236461 +138175 +43816 +185904 +345711 +72536 +526737 +360400 +556537 +426053 +59044 +28290 +222548 +434915 +418623 +246454 +111801 +12448 +427133 +459117 +11262 +169045 +469996 +304390 +513096 +322822 +196371 +504977 +395364 +243950 +216218 +417217 +106736 +58194 +504101 +478522 +379314 +30432 +207027 +297146 +91844 +176031 +98287 +278095 +196053 +343692 +523137 +220224 +349485 +376193 +407067 +185781 +37871 +336464 +46331 +44244 +80274 +170147 +361106 +468499 +537864 +467457 +267343 +291528 +287828 +555648 +388284 +576085 +531973 +350122 +422253 +509811 +78093 +410019 +133090 +581205 +343976 +9007 +92478 +450674 +486306 +503978 +46378 +335578 +404071 +225558 +217923 +406217 +138054 +575815 +234990 +336257 +159240 +399516 +226408 +531126 +138599 +61693 +89861 +29504 +163296 +477906 +48419 +25595 +195594 +97592 +392555 +203849 +139248 +245651 +275755 +245426 +127279 +521359 +517623 +235747 +475906 +11198 +336101 +70134 +505447 +218996 +30080 +484457 +120441 +575643 +132703 +197915 +505576 +90956 +99741 +517819 +240918 +150834 +207306 +132682 +88250 +213599 +462584 +413321 +361521 +496081 +410583 +440027 +417284 +397069 +280498 +473171 +129739 +279774 +29370 +518899 +509867 +85556 +434930 +280710 +55077 +348793 +157756 +281111 +190689 +281447 +502854 +232894 +268742 +199553 +220808 +137330 +256903 +116017 +466416 +41635 +110906 +340934 +557501 +146767 +517617 +487159 +1561 +417281 +489014 +292463 +113533 +412247 +263973 +515444 +343561 +310200 +293804 +225867 +150320 +183914 +9707 +89999 +177842 +296524 +287829 +68300 +363654 +465986 +159969 +313948 +522779 +219820 +198352 +12959 +266727 +8016 +175804 +497867 +307892 +287527 +309638 +205854 +114119 +23023 +322586 +383341 +134198 +553522 +70426 +329138 +105367 +175597 +187791 +17944 +366611 +93493 +242422 +41842 +558840 +32203 +19667 +124297 +383726 +252625 +234794 +498228 +102906 +287967 +69021 +51326 +243896 +509423 +440124 +122582 +344325 +34455 +442478 +23587 +236904 +185633 +349841 +44294 +112568 +186296 +71914 +3837 +135486 +223747 +557517 +385181 +265313 +404263 +26564 +516867 +497096 +332351 +345139 +444304 +510877 +356387 +561214 +311471 +408789 +561729 +291380 +174671 +45710 +435136 +388858 +361693 +50811 +531134 +573605 +340175 +534988 +382671 +327047 +348400 +547137 +401037 +490711 +499266 +236370 +449075 +334015 +107234 +232315 +462953 +252048 +186822 +410168 +28994 +45550 +453626 +417957 +468577 +106338 +391684 +375143 +217622 +357903 +347648 +142182 +213843 +299148 +352587 +436676 +161875 +144655 +304741 +235017 +181799 +211042 +335507 +553731 +412531 +229740 +437129 +423830 +561806 +337666 +52016 +138057 +70254 +494393 +73119 +262425 +565395 +305329 +489611 +377080 +569450 +549766 +332940 +235302 +53893 +203781 +38449 +114870 +18699 +396338 +449839 +423613 +379767 +369594 +375812 +359219 +229311 +291675 +224907 +416885 +32964 +573406 +17282 +103375 +81860 +576886 +461334 +35672 +243442 +217269 +445055 +211112 +455675 +412384 +88967 +550643 +24223 +504074 +9275 +155546 +329542 +172658 +331600 +315492 +194208 +162867 +324614 +432017 +140860 +157944 +406616 +486079 +361172 +258346 +494140 +315384 +451014 +242619 +413684 +386187 +408501 +121089 +343603 +232538 +558671 +551596 +32992 +406647 +435260 +11156 +40896 +175382 +110560 +252968 +189694 +63154 +564816 +72004 +164788 +434583 +453104 +111878 +268484 +290768 +473215 +450620 +32673 +277479 +529917 +315868 +562419 +378347 +398637 +84097 +120527 +134193 +431472 +400238 +86426 +208830 +524535 +22213 +516813 +526044 +386193 +246672 +386739 +559252 +153344 +236123 +246074 +323615 +92644 +408621 +323231 +499940 +296105 +578902 +150098 +145015 +131431 +318618 +68409 +497928 +362520 +467755 +112702 +163219 +277289 +192362 +497674 +525439 +56267 +465868 +407570 +551608 +345211 +179653 +55295 +97315 +534041 +505822 +411082 +132375 +25378 +272008 +536605 +123511 +148737 +577712 +493751 +29587 +468297 +528458 +491058 +558976 +181421 +209685 +147545 +486964 +570516 +168662 +19446 +395997 +242911 +232511 +317035 +354527 +5961 +513793 +124390 +370123 +113397 +195790 +252813 +326919 +432414 +409239 +458221 +115667 +212239 +279279 +375554 +546622 +317188 +260818 +286021 +377111 +209868 +243148 +132037 +560624 +459721 +193498 +22623 +254164 +112841 +383470 +62692 +227940 +471335 +44858 +213649 +179898 +102837 +474078 +44478 +256197 +309492 +182923 +421139 +275695 +104965 +480780 +449749 +76513 +578591 +336695 +247474 +320490 +246105 +53183 +485740 +575823 +510735 +290741 +37017 +348708 +279784 +453634 +567644 +434192 +482719 +435324 +544299 +106896 +569926 +301574 +492885 +103462 +487151 +513585 +219647 +303685 +459645 +76292 +188579 +154883 +207728 +425074 +310493 +27221 +371694 +119404 +399665 +273556 +454577 +580698 +267664 +295769 +423740 +22461 +22667 +508443 +390401 +369997 +524627 +193349 +132223 +576743 +130586 +487741 +107542 +501420 +520109 +308156 +540581 +231362 +86471 +472930 +351133 +463605 +575577 +159842 +39504 +223020 +63525 +298627 +139883 +375205 +303549 +16838 +495680 +408112 +394474 +188044 +472143 +463751 +31481 +378139 +190853 +442614 +172006 +140270 +133051 +178028 +495090 +88455 +13232 +46323 +346275 +425905 +487013 +433136 +514402 +521906 +4157 +61418 +567205 +213351 +304008 +296492 +506561 +408120 +415961 +323186 +480379 +349199 +201918 +135023 +456483 +136173 +237917 +4972 +99081 +331569 +150007 +36450 +93400 +487461 +203629 +218093 +487181 +113935 +139512 +210981 +358883 +47419 +248382 +80357 +462663 +83097 +26159 +80429 +283055 +452676 +50159 +12326 +29430 +303264 +158122 +569070 +52925 +534876 +46975 +426376 +170293 +434417 +235517 +218476 +445008 +482774 +305632 +116848 +557252 +229270 +453485 +382214 +54759 +59171 +193328 +17152 +238071 +148531 +409725 +75434 +65358 +473057 +415408 +579415 +48636 +269606 +298784 +162799 +356400 +326854 +24601 +66499 +340247 +20992 +190218 +548464 +122203 +405306 +495376 +536028 +5713 +206831 +9395 +503939 +194440 +474253 +395849 +165141 +204935 +412621 +402922 +87141 +570664 +202622 +137362 +221737 +78947 +112129 +341957 +169562 +164780 +360216 +107641 +415015 +444955 +559102 +123070 +176592 +309366 +116461 +222075 +530470 +214363 +414487 +471567 +292123 +370210 +364243 +510254 +396350 +141524 +220310 +398604 +145436 +392476 +17482 +78032 +336171 +130812 +489743 +346638 +418854 +139072 +263860 +458240 +383443 +337533 +182334 +535608 +517946 +489924 +308117 +129945 +59973 +538364 +513458 +449433 +25165 +335851 +487688 +153834 +347612 +349689 +443688 +486008 +479149 +442286 +61108 +315338 +511546 +506444 +775 +121839 +291412 +497626 +387223 +367095 +557896 +196118 +530652 +447991 +215622 +232160 +296731 +272273 +473415 +364705 +235790 +479950 +141278 +547903 +66523 +353989 +121875 +237735 +100083 +348941 +288983 +390083 +168248 +120776 +489764 +219135 +551713 +256035 +309005 +112493 +579759 +114972 +458992 +295768 +158497 +309696 +363844 +507966 +313491 +280779 +327130 +292901 +127761 +183843 +456521 +164475 +224281 +443713 +72514 +567383 +476215 +565650 +17708 +474471 +248334 +196313 +164759 +212453 +319024 +332916 +35436 +113139 +172716 +7570 +161609 +144534 +137475 +561411 +45844 +332027 +36990 +190160 +421231 +283210 +365611 +511407 +400887 +485071 +481214 +347203 +153506 +397403 +229599 +357322 +76034 +101189 +567444 +92363 +526767 +218811 +362812 +339120 +579696 +399269 +10705 +549012 +410428 +105623 +535307 +419235 +119911 +236604 +515779 +188173 +66397 +549119 +478742 +256180 +128224 +440539 +112818 +315434 +97513 +171970 +433483 +226008 +83217 +424548 +343753 +350334 +479280 +208808 +43266 +399893 +444386 +47687 +499093 +565269 +465835 +167486 +433460 +169872 +299640 +158466 +241373 +50576 +161567 +73560 +349804 +181745 +352684 +450357 +532693 +88335 +256518 +94926 +541197 +14629 +276149 +539439 +498738 +25654 +291330 +146465 +160190 +513064 +75748 +499007 +164464 +134042 +422416 +543315 +34056 +303197 +394801 +293071 +44964 +529083 +414522 +331180 +227599 +581040 +382850 +159898 +176841 +205352 +540782 +406591 +184499 +14380 +350230 +458175 +528786 +314935 +111086 +2191 +20371 +337042 +558371 +296907 +539937 +511463 +574856 +87864 +403817 +152598 +169712 +533227 +173545 +478862 +19455 +258433 +373440 +460229 +525682 +176857 +525050 +277025 +156416 +206784 +415179 +183204 +210374 +312868 +514366 +65208 +376342 +515792 +383066 +85247 +119132 +338007 +88748 +206705 +495808 +532164 +150686 +35474 +207860 +111165 +391199 +346011 +537721 +11390 +487482 +360983 +400347 +92795 +347506 +324322 +371958 +101280 +222842 +563604 +210299 +150616 +96351 +330455 +273551 +228749 +248051 +495252 +372265 +52664 +191874 +157416 +446428 +136681 +1228 +321811 +93791 +477867 +192520 +157124 +40620 +200541 +103904 +329494 +60093 +112573 +489125 +513115 +322968 +561619 +74309 +572462 +248252 +375376 +217312 +243213 +79878 +452218 +349754 +554291 +434043 +460373 +452591 +567787 +504711 +196007 +511153 +312416 +296056 +308849 +203667 +253223 +331230 +465545 +363048 +69392 +301506 +216198 +147979 +6005 +381870 +56983 +320972 +144122 +210855 +151480 +299288 +462486 +103931 +321079 +4134 +239861 +540006 +413805 +221222 +198943 +450790 +380597 +388298 +58737 +246197 +160726 +398554 +513946 +222235 +323851 +364703 +125643 +169800 +445662 +223764 +575372 +489207 +559474 +7155 +453819 +402720 +102355 +415076 +287436 +35705 +111076 +395865 +310862 +570834 +54728 +215778 +80053 +35148 +350488 +524140 +190097 +36661 +302110 +96884 +383397 +245462 +446958 +138937 +424712 +561814 +276964 +148034 +411068 +357824 +103257 +322149 +508899 +580294 +214386 +114419 +271429 +168260 +209835 +573072 +252269 +31980 +161308 +281508 +192714 +247599 +188948 +180563 +419601 +233660 +154804 +311846 +181499 +5535 +175082 +531018 +412338 +166995 +441411 +427820 +516846 +287366 +67959 +271266 +330845 +74209 +508167 +542699 +66485 +453756 +158412 +443784 +118097 +265050 +29074 +152623 +532493 +292988 +530384 +192660 +502336 +472648 +151657 +351626 +241010 +115070 +268356 +539557 +304698 +251140 +497158 +527445 +385428 +179200 +512394 +184978 +141910 +36311 +579457 +19129 +424960 +181714 +126216 +512911 +488360 +379533 +337551 +325410 +364587 +468885 +211107 +90062 +500446 +105960 +451951 +431431 +134178 +164548 +173826 +373988 +15157 +3091 +393557 +380011 +75372 +37403 +209995 +493610 +315899 +353299 +355040 +547000 +86133 +58174 +377326 +510230 +480583 +158588 +432529 +311206 +127626 +239980 +166340 +104185 +405174 +507211 +542782 +448078 +253477 +542694 +567308 +214853 +288824 +283268 +480757 +503200 +221089 +112388 +171539 +124452 +224200 +206362 +428754 +256192 +119414 +351620 +330050 +547504 +216398 +94261 +19916 +163242 +432588 +143824 +361103 +271138 +260150 +313627 +141086 +308263 +388453 +153217 +372794 +514787 +251910 +351335 +92683 +465836 +18442 +404128 +208476 +47873 +303219 +201622 +367489 +32760 +436174 +401926 +338419 +45248 +328464 +312216 +156282 +315702 +300701 +345401 +515350 +29094 +284296 +466449 +351057 +110672 +364853 +10014 +415828 +397522 +451412 +433124 +158277 +93476 +183387 +109889 +223326 +105547 +530061 +256301 +526778 +80974 +86650 +45835 +202154 +92678 +315991 +423919 +455044 +491168 +272253 +146627 +285349 +86001 +44171 +162332 +257328 +432820 +519275 +380639 +269436 +236016 +543215 +346752 +575970 +423498 +136926 +195648 +126634 +133078 +138656 +490012 +122388 +195165 +434900 +533625 +504167 +333697 +216576 +538775 +125072 +391154 +545007 +150292 +566717 +367362 +490991 +356623 +141271 +402795 +516786 +39499 +536716 +293324 +212853 +276381 +57124 +325992 +394659 +452178 +117674 +461172 +518586 +497021 +462345 +526570 +17328 +202928 +62566 +411277 +256983 +49473 +211206 +398031 +277955 +531178 +453959 +27946 +252844 +30273 +536933 +500298 +229111 +7977 +27642 +303726 +79927 +110313 +527691 +442205 +33345 +365851 +233236 +239157 +409221 +400803 +32947 +422516 +359727 +215872 +559454 +289716 +450247 +57827 +312298 +530383 +260048 +35857 +224222 +299533 +13296 +325907 +117869 +54088 +391011 +340478 +205344 +347823 +468604 +78701 +101414 +197499 +490871 +89273 +380343 +441974 +35974 +486114 +354398 +535536 +294030 +7276 +278742 +137028 +98721 +372764 +429802 +72105 +220307 +116845 +195406 +333000 +130401 +264382 +125458 +363036 +286994 +531070 +113801 +4108 +47603 +130118 +573924 +302990 +237566 +21470 +577926 +139436 +425925 +36844 +63602 +399791 +35894 +347228 +225617 +504813 +245320 +466007 +553931 +166731 +164885 +19090 +457262 +247806 +502895 +167593 +352491 +520 +26386 +497348 +352000 +386164 +32901 +730 +30925 +333167 +150361 +231747 +462244 +504958 +260738 +313762 +346645 +486118 +202998 +541613 +183884 +230245 +83172 +126638 +51844 +421673 +118625 +377723 +229427 +371326 +104345 +361687 +114246 +397354 +104137 +120850 +260516 +389168 +234555 +26348 +78522 +409784 +303024 +377949 +69887 +546983 +113736 +298197 +476810 +137315 +376321 +410337 +492905 +119785 +158167 +185930 +354061 +106563 +328452 +506587 +536517 +480173 +570688 +376441 +252127 +247720 +132554 +41923 +400317 +170041 +151938 +198650 +6437 +49091 +221820 +455966 +309859 +300659 +15850 +388014 +253386 +65415 +238228 +548882 +302155 +93483 +371869 +397287 +315249 +360564 +448410 +21382 +477474 +144862 +517515 +230190 +322353 +231568 +14940 +132719 +498942 +182469 +113720 +168890 +94852 +246077 +117535 +52596 +419116 +522020 +255338 +125228 +564332 +106375 +249534 +220915 +177758 +293057 +222430 +196878 +554980 +375606 +173081 +84936 +418907 +562229 +457616 +125700 +66038 +239274 +574110 +305540 +98431 +167347 +53345 +438481 +286010 +5569 +343606 +168898 +191301 +236338 +291394 +715 +520237 +236954 +192212 +524002 +471625 +476029 +413124 +203455 +483328 +476417 +114389 +372428 +369221 +322654 +388157 +561314 +264540 +418680 +359540 +426182 +521613 +92248 +74478 +398905 +554273 +125909 +430583 +418959 +503522 +382999 +403145 +536375 +352618 +108193 +279696 +163253 +439007 +204536 +552186 +269926 +372147 +399921 +201418 +240565 +471483 +91619 +393971 +331648 +385856 +567440 +81922 +391722 +372894 +535997 +134096 +545958 +239943 +186929 +34222 +177714 +277812 +197111 +281878 +532003 +557172 +142890 +196116 +385454 +322845 +374987 +123137 +255112 +111207 +304819 +523526 +336046 +42893 +241273 +240049 +90659 +271364 +408008 +253282 +167067 +354278 +178317 +229653 +93333 +163666 +566920 +495199 +100329 +218119 +558864 +257382 +406152 +206587 +420339 +325919 +278853 +555763 +293200 +151000 +209664 +79380 +197177 +353953 +464522 +392260 +46144 +154202 +164366 +206025 +511236 +24921 +497907 +393226 +318138 +364125 +157321 +492395 +187857 +109939 +441500 +144251 +368581 +51403 +283498 +43555 +89356 +404601 +23272 +425762 +460682 +544629 +209829 +322029 +199247 +307262 +571242 +124236 +162393 +104829 +250766 +563938 +237399 +131516 +483001 +21994 +97958 +540187 +264497 +384808 +343187 +51277 +6712 +566103 +435384 +292082 +359039 +165157 +267972 +263796 +489313 +392722 +541924 +554433 +571034 +146112 +201934 +518716 +64116 +294992 +289586 +159970 +479617 +269006 +140465 +513260 +554805 +6579 +452696 +34445 +548296 +372983 +509656 +199339 +130030 +128372 +449454 +139306 +247914 +99024 +499134 +536653 +468917 +412813 +404338 +215303 +455414 +413497 +574988 +397117 +188631 +378701 +241867 +143129 +419884 +412749 +496954 +317732 +16977 +398309 +162363 +147576 +100016 +209018 +92660 +173302 +525732 +449198 +99734 +12733 +172946 +168032 +210988 +340697 +4795 +534887 +483553 +278323 +178175 +190095 +357542 +230432 +227460 +334609 +562121 +378126 +555357 +325666 +451859 +526837 +531710 +297249 +294839 +499785 +254976 +527220 +173057 +11760 +163012 +215998 +114420 +57812 +563712 +513887 +201859 +36333 +291990 +338375 +460621 +518889 +337502 +133050 +80172 +537007 +295270 +335644 +227852 +336044 +204137 +82259 +165675 +295713 +343937 +442567 +356002 +346932 +62985 +180925 +525381 +13081 +377406 +159774 +462643 +359105 +185821 +390201 +84168 +128059 +80340 +481159 +491902 +306619 +353807 +390569 +541562 +292616 +64621 +439224 +96288 +449798 +160927 +496324 +90778 +126145 +97230 +572767 +11570 +539075 +350988 +3779 +208135 +551315 +216449 +169606 +502 +67765 +281414 +118594 +146127 +543985 +124927 +471394 +385508 +373783 +501315 +140974 +42757 +527054 +202387 +513056 +329931 +153973 +510152 +520812 +534601 +131282 +386638 +508538 +234779 +229329 +396568 +153568 +229478 +153574 +356299 +436694 +324139 +299409 +212462 +478155 +393266 +117836 +190760 +213605 +196 +444382 +445211 +363845 +433277 +521141 +464786 +169076 +301402 +4495 +177258 +328962 +183757 +452966 +416059 +113233 +559417 +280678 +481398 +328372 +234910 +30667 +343062 +383046 +370953 +258089 +404229 +456931 +535183 +300867 +60507 +262672 +7288 +81100 +575395 +539951 +347848 +437594 +352005 +14941 +196453 +528386 +466939 +482187 +293468 +494077 +217285 +362951 +435751 +411480 +517315 +480015 +60610 +353001 +376442 +430265 +478338 +303069 +525344 +437331 +389315 +8179 +31981 +313872 +330920 +515465 +258905 +142249 +323128 +389699 +565012 +124636 +488693 +376608 +309424 +370596 +261940 +39871 +226984 +152866 +515050 +116861 +412876 +120411 +550452 +565273 +273791 +181466 +183155 +293505 +336113 +569997 +303738 +331049 +147030 +74058 +198176 +23991 +198841 +79816 +85183 +261535 +566756 +386291 +318200 +569849 +57429 +36049 +420827 +519271 +24391 +172087 +158795 +133002 +522198 +133698 +499365 +79261 +258860 +457718 +179948 +421875 +558073 +206684 +529762 +456756 +65773 +425722 +53102 +294264 +416730 +38574 +176275 +404297 +127494 +242060 +272212 +189244 +510861 +421370 +208516 +206431 +248457 +39502 +375087 +130839 +308730 +572453 +263474 +544611 +255708 +412604 +390094 +578131 +234463 +493563 +9450 +381914 +148999 +32300 +423576 +569758 +347253 +92939 +112212 +13923 +39472 +363736 +289659 +269949 +88349 +188522 +488915 +129054 +573823 +316000 +440562 +408818 +539302 +199575 +122300 +340047 +322816 +472878 +313922 +228071 +265648 +400166 +169166 +10040 +125245 +148766 +31281 +172599 +431067 +208236 +441824 +175611 +15148 +431199 +521587 +50025 +443139 +349822 +515056 +27530 +571970 +82367 +7115 +424333 +157601 +537506 +447187 +115182 +547597 +5586 +143040 +31650 +196336 +279818 +206273 +403104 +514248 +243190 +558642 +548246 +16848 +391539 +89614 +284589 +191314 +259452 +208380 +209441 +465463 +385005 +321385 +223569 +11727 +87574 +566470 +210890 +323598 +427193 +425676 +401240 +94021 +259571 +447553 +456053 +84693 +14278 +119995 +234595 +408696 +136271 +143560 +357578 +28071 +36561 +157102 +293789 +392251 +356622 +180274 +48320 +475779 +301326 +100977 +413551 +574010 +404479 +80725 +552221 +575441 +197424 +124601 +215633 +359546 +25386 +73199 +334466 +156572 +124614 +34121 +460049 +327623 +441695 +292488 +476514 +464018 +348571 +113413 +125208 +129690 +446218 +493761 +383413 +460390 +343149 +374041 +525211 +451263 +333683 +385194 +107427 +102872 +517249 +475879 +575755 +147787 +297180 +343774 +112437 +142240 +384503 +511111 +51089 +145408 +143582 +408138 +162858 +71850 +126925 +222781 +314616 +425609 +203928 +337563 +223300 +52644 +272566 +232597 +374430 +469075 +267164 +265851 +28134 +308889 +465795 +47263 +233727 +42 +493117 +124621 +533378 +361259 +458750 +429033 +383289 +490927 +520964 +174420 +64425 +378859 +401850 +281475 +46508 +205300 +280736 +110961 +230679 +151956 +321497 +73665 +488736 +165353 +365983 +556230 +21465 +581226 +448861 +3793 +347335 +150726 +75319 +2521 +285894 +133876 +104589 +346013 +63516 +83656 +491515 +326256 +49942 +28508 +475413 +270222 +235839 +48554 +327777 +111179 +507171 +425973 +449490 +205239 +82375 +459575 +432300 +91885 +340922 +270239 +195894 +121417 +344831 +439651 +232148 +391688 +480793 +534275 +260823 +469294 +8688 +255654 +191300 +383464 +81594 +21240 +478077 +517596 +555953 +294119 +402234 +459500 +564280 +106849 +167501 +98328 +267411 +145512 +272599 +50054 +414156 +161129 +418226 +11796 +502090 +390350 +440500 +240727 +104406 +163682 +437910 +143767 +358901 +527631 +500543 +28377 +231097 +227985 +556703 +421566 +73201 +478393 +280347 +15497 +131969 +515760 +295440 +462527 +42147 +120007 +212895 +425361 +454143 +5758 +366782 +213932 +229848 +458861 +132791 +476664 +150365 +343038 +529649 +180515 +499810 +329041 +15660 +419228 +396295 +502644 +321085 +245049 +34193 +217323 +446455 +528046 +375573 +15802 +147448 +407291 +84000 +280891 +150487 +510606 +163025 +249964 +126123 +233771 +118507 +97278 +357386 +23121 +10580 +2153 +176017 +371472 +373289 +173908 +296797 +334083 +301107 +577522 +125404 +278359 +575032 +273002 +266371 +108315 +255633 +503490 +250051 +143927 +117407 +198271 +447043 +329789 +399991 +458388 +87489 +228411 +494634 +260802 +454161 +446322 +231079 +438373 +395665 +244539 +212427 +356660 +347276 +183287 +498374 +21167 +544522 +418533 +288493 +245660 +406103 +406976 +367313 +455555 +117337 +384465 +185697 +160393 +463825 +276852 +181462 +176288 +452816 +102497 +54277 +225791 +361046 +197278 +9857 +227736 +398992 +55868 +170914 +181677 +467803 +560470 +264599 +540372 +559442 +201207 +137227 +267643 +355471 +245431 +555669 +344498 +84783 +193474 +102411 +401860 +119469 +448786 +449990 +568082 +340472 +307573 +231828 +307547 +82052 +15140 +493612 +503972 +386592 +473219 +495557 +159440 +355869 +311531 +209733 +240119 +415048 +296098 +249482 +15663 +151432 +263011 +488539 +463913 +502798 +174276 +495613 +407861 +229304 +146742 +545039 +161202 +295134 +162144 +453317 +52759 +335201 +222903 +20333 +559550 +336049 +346140 +491223 +306611 +102746 +455355 +449921 +477288 +77821 +289712 +452663 +147758 +129571 +490869 +345961 +94501 +160394 +432993 +178796 +372494 +316323 +383435 +194940 +74583 +148911 +518027 +431827 +32724 +158548 +227227 +500330 +54679 +321024 +471175 +252074 +476569 +573258 +337247 +294373 +558661 +148898 +563267 +163112 +411968 +193565 +455210 +349344 +337160 +160456 +255158 +553678 +123843 +549687 +381968 +579471 +100604 +379841 +357526 +197263 +14756 +412639 +210915 +47204 +539251 +166255 +490199 +260363 +91654 +170550 +187888 +97362 +285418 +176993 +292741 +361901 +296988 +223496 +493753 +114907 +151358 +316534 +472509 +499802 +348519 +347747 +58851 +104790 +396779 +130528 +2255 +19624 +526800 +233950 +505945 +131207 +290750 +114090 +196665 +8708 +134688 +394715 +115088 +492196 +530099 +518729 +291572 +421457 +445365 +78929 +415461 +551796 +210002 +207913 +344878 +303893 +149196 +353275 +122413 +553361 +519132 +467135 +431439 +17089 +322119 +228214 +35062 +105689 +366141 +285651 +60409 +472671 +401446 +492846 +21023 +421952 +374100 +265200 +506628 +62298 +243626 +212122 +350648 +409921 +428140 +399212 +388267 +198921 +429246 +202040 +570001 +261346 +61171 +131815 +455448 +82696 +554607 +102174 +386803 +188421 +191846 +209898 +380117 +321064 +119617 +188651 +132210 +244299 +174072 +542910 +378334 +118405 +543347 +183657 +581180 +395289 +64760 +265584 +29573 +493720 +94795 +315601 +416596 +260106 +244019 +463884 +579468 +112085 +300972 +238528 +382542 +57672 +165298 +46889 +289497 +337180 +481252 +7913 +432150 +288161 +403758 +257336 +565331 +346589 +270785 +205670 +231580 +508580 +98871 +239997 +554579 +160057 +404922 +78771 +380756 +171199 +148077 +22892 +145378 +26967 +235200 +176007 +90349 +554377 +189744 +257053 +270515 +66508 +113890 +291983 +558927 +420916 +140908 +58384 +438226 +575776 +106935 +40602 +468993 +494810 +210408 +365685 +483722 +39430 +258793 +272615 +51476 +189919 +443887 +391648 +422670 +445135 +198959 +405529 +459757 +465489 +81827 +262576 +408289 +309237 +76249 +460091 +512630 +45959 +280320 +200492 +404652 +48475 +18480 +457097 +65889 +162256 +265950 +520752 +299082 +51500 +499313 +104906 +35438 +167647 +7274 +387824 +242139 +173166 +399830 +12014 +510642 +154053 +67785 +78170 +514118 +87998 +52703 +203539 +534533 +85926 +274438 +401653 +458790 +509262 +144481 +387515 +246649 +503207 +235131 +501531 +62025 +43286 +272323 +326128 +561889 +167529 +171067 +50778 +301282 +469719 +509388 +480317 +379055 +546428 +192763 +445602 +420882 +232790 +174332 +232865 +292822 +511145 +119502 +312591 +110330 +281353 +116244 +58778 +428079 +64902 +520840 +232054 +473214 +572574 +296684 +351590 +217997 +178761 +71618 +226496 +285212 +381195 +499903 +232849 +468997 +345559 +503097 +578570 +396404 +405223 +578752 +403500 +188958 +504498 +491623 +462929 +525762 +395550 +574227 +240751 +169356 +524694 +40886 +571635 +487774 +86220 +95677 +268987 +502599 +155270 +103855 +125100 +241355 +220214 +391774 +110618 +154587 +134483 +458781 +360877 +465963 +194595 +346934 +127153 +188078 +553869 +102665 +400547 +33759 +42779 +397587 +140295 +151807 +549136 +470288 +89738 +328368 +546934 +164255 +563683 +399988 +360951 +217303 +326781 +546133 +135399 +94666 +330037 +569839 +411070 +497466 +404805 +417854 +318442 +255036 +457230 +346863 +307438 +370448 +5124 +152582 +38118 +12179 +58462 +308420 +329456 +74920 +250368 +186428 +556073 +111806 +361244 +80273 +230964 +156754 +503101 +75173 +389404 +195538 +88848 +286018 +245481 +140929 +533721 +268378 +70048 +315467 +46269 +372807 +192403 +387328 +163033 +481314 +65306 +192529 +321107 +112232 +441216 +412399 +565391 +220670 +61471 +463290 +346707 +67587 +147624 +13031 +396754 +278601 +439426 +42834 +281829 +376209 +353148 +556562 +97579 +217989 +319530 +82551 +235319 +431799 +53892 +52853 +54533 +88897 +225093 +386777 +546742 +273684 +413900 +245447 +577995 +16249 +188414 +485142 +199602 +89258 +109679 +502397 +14494 +13632 +51674 +244999 +305050 +455956 +426795 +560700 +327306 +410301 +343803 +539422 +156740 +527845 +100582 +9941 +466585 +61515 +231895 +157052 +41271 +148128 +141172 +320232 +78565 +539883 +391300 +365182 +322194 +116517 +323496 +473783 +519874 +440706 +361587 +265153 +329946 +342814 +32258 +153510 +194555 +309317 +245006 +300303 +97767 +218224 +370170 +290477 +207178 +456730 +209480 +513775 +199516 +581542 +32524 +416337 +96241 +506279 +422893 +248911 +509855 +355183 +201220 +234914 +333436 +68198 +429074 +328430 +160531 +467854 +280688 +140661 +349525 +267315 +565543 +313162 +25751 +232574 +560358 +505213 +494427 +160308 +287335 +99182 +413260 +558808 +290839 +122954 +229221 +192007 +243189 +117645 +552824 +366111 +102056 +356949 +566298 +97899 +422545 +343769 +13127 +179273 +104486 +37660 +304099 +517570 +20207 +36484 +36492 +155974 +107257 +534019 +522371 +222825 +96183 +509227 +302260 +95078 +280918 +367582 +317033 +347982 +73209 +290521 +187243 +425151 +483723 +573796 +187249 +144114 +132992 +35887 +546067 +426532 +45626 +461805 +129989 +541478 +485489 +578498 +485483 +144784 +248224 +372362 +92050 +423519 +473118 +177207 +105455 +276434 +157767 +384335 +509497 +338191 +224010 +327388 +96988 +43376 +67867 +320743 +555197 +104453 +14439 +512194 +396387 +252559 +108953 +461262 +66320 +97946 +238065 +306139 +572408 +577864 +81004 +464526 +89378 +193389 +259049 +85665 +381134 +412419 +308947 +557510 +502084 +288290 +254609 +188752 +439525 +13980 +140513 +240173 +305268 +38678 +394050 +402926 +364079 +159260 +293034 +55429 +289640 +291028 +211120 +48050 +93887 +361029 +486026 +388374 +207803 +540174 +530630 +430359 +36420 +120099 +199764 +492911 +84498 +200882 +139843 +4975 +421209 +259513 +520324 +211317 +236457 +419344 +3867 +287846 +50434 +26624 +507235 +16238 +103705 +497555 +440060 +175825 +245460 +308276 +178535 +391735 +206391 +201550 +400945 +194634 +262360 +554142 +407574 +225225 +246057 +498627 +486172 +226571 +461751 +459733 +345869 +503841 +286460 +45644 +22861 +285599 +580284 +569565 +286778 +150024 +542101 +484075 +538153 +20470 +128034 +544120 +357109 +450728 +550968 +326230 +558809 +76334 +555387 +47121 +523978 +11081 +378134 +116279 +364884 +488250 +551957 +322824 +545564 +255573 +286327 +355453 +361933 +434897 +32597 +226761 +166482 +557564 +208166 +232115 +283520 +137395 +555894 +103509 +174284 +458313 +316147 +344059 +370701 +548930 +89894 +373662 +572095 +19324 +574411 +45746 +480122 +63950 +92339 +201111 +157053 +401539 +427956 +339099 +274651 +159537 +556101 +323399 +564337 +514915 +556025 +66427 +322357 +173737 +369128 +420230 +45176 +509675 +374677 +272311 +109797 +384723 +383678 +453040 +91080 +301634 +533003 +40361 +221605 +216228 +104002 +161011 +146123 +214421 +496252 +264948 +9759 +138856 +316189 +145734 +50411 +325157 +259099 +516856 +529668 +135976 +467130 +367433 +385598 +520933 +102805 +30066 +436696 +216837 +380754 +350457 +126974 +565374 +73832 +214703 +110501 +380609 +135872 +140231 +251816 +133836 +398866 +230362 +426815 +2240 +51484 +546325 +224093 +221190 +525024 +238806 +99908 +165795 +109146 +537727 +496571 +183803 +211175 +433845 +168692 +526394 +368402 +256309 +468972 +139169 +398440 +171678 +547341 +64332 +533589 +483249 +406000 +330348 +439188 +572886 +252829 +242724 +139127 +404568 +45809 +52257 +458727 +334509 +559665 +60992 +290896 +503106 +27972 +536891 +410855 +31202 +457882 +403315 +87399 +395291 +322141 +226377 +202799 +420826 +553034 +212077 +97693 +266370 +101656 +504142 +342933 +87567 +342060 +268854 +437028 +20175 +198625 +405047 +382374 +338291 +403975 +527906 +322429 +545550 +140043 +107389 +74059 +315621 +110138 +78381 +295576 +494438 +106335 +472349 +15818 +162358 +366484 +44604 +66524 +118606 +366873 +270721 +556478 +350789 +298628 +163314 +262800 +459428 +491725 +285421 +406332 +498280 +34535 +524282 +315744 +226592 +218294 +459141 +242034 +114164 +293733 +248242 +452881 +441496 +54358 +177489 +372861 +349489 +483941 +572802 +356494 +193875 +146570 +58253 +21338 +6220 +341933 +533368 +1818 +428248 +293026 +227656 +193021 +326938 +512966 +226020 +343059 +249720 +540106 +375278 +300023 +126512 +517135 +472540 +361439 +132702 +503294 +109537 +540669 +332007 +245266 +313999 +10386 +225715 +311567 +103837 +302405 +248616 +102654 +155087 +124756 +379659 +569272 +160166 +428234 +422280 +174425 +133412 +174503 +216581 +345063 +52949 +69536 +216161 +272728 +200870 +120792 +193480 +493923 +445567 +558539 +51938 +422706 +416271 +244160 +437898 +327352 +305480 +349459 +522418 +485219 +225133 +361400 +546569 +190015 +348216 +421822 +457683 +178683 +40894 +234526 +465074 +518725 +168096 +210190 +139605 +35195 +463640 +286770 +141651 +112022 +532552 +325327 +227224 +17272 +84163 +331475 +126065 +289309 +8583 +52952 +189427 +579693 +437947 +187565 +215982 +356424 +453731 +463522 +372316 +251797 +70187 +280515 +556608 +341635 +391067 +469480 +476298 +57917 +146672 +122747 +394328 +12209 +80013 +573291 +278449 +129659 +579560 +557190 +227468 +334782 +51157 +23774 +9426 +86582 +39211 +275751 +131597 +51250 +357255 +9041 +346482 +9647 +157019 +409016 +273416 +114414 +298172 +388854 +275025 +58079 +518034 +503518 +146710 +120632 +474680 +303713 +259097 +479630 +208318 +437298 +173704 +361831 +371638 +344279 +230175 +72507 +417980 +72621 +163057 +92894 +543525 +577364 +263696 +472732 +66027 +391584 +197745 +131019 +65604 +91318 +535934 +212646 +576354 +482071 +160556 +120129 +7260 +344881 +447548 +318193 +30383 +527002 +34904 +35677 +526222 +105261 +401897 +399452 +25660 +524595 +384512 +117543 +514600 +268944 +112664 +222340 +569058 +495332 +192153 +75591 +286711 +174888 +577065 +25508 +169972 +401820 +425475 +290700 +173091 +559101 +122418 +244124 +198645 +325519 +276437 +528276 +146614 +45574 +417804 +326420 +250594 +27353 +310407 +370103 +274957 +561160 +167598 +397166 +257458 +404546 +148392 +373396 +62230 +493522 +563665 +274240 +269815 +79024 +527427 +84674 +486788 +267690 +443347 +149304 +412285 +207041 +412916 +10764 +151338 +299000 +17882 +475510 +398188 +558213 +70493 +180779 +347210 +280211 +58146 +379022 +504125 +537604 +464858 +329573 +568623 +228309 +454444 +552775 +557884 +435671 +168706 +142257 +571437 +574845 +387773 +321008 +574208 +405811 +375426 +321887 +256852 +433554 +517029 +125870 +80395 +497139 +490008 +405279 +571857 +225738 +514913 +456239 +499402 +96440 +487607 +370999 +319617 +370233 +60760 +352703 +478575 +84170 +134112 +77689 +185036 +73738 +547502 +104782 +213276 +136908 +436273 +442149 +355000 +374061 +249884 +105711 +136464 +146997 +76351 +388487 +99115 +124135 +24721 +132931 +1149 +182403 +386089 +81691 +480657 +441522 +60989 +268000 +55840 +514321 +577959 +359638 +457986 +533596 +60332 +367082 +772 +535842 +473541 +270677 +409009 +259216 +302318 +117036 +331372 +231125 +384486 +405214 +20760 +579760 +172995 +359110 +83110 +410068 +109916 +328757 +299261 +19028 +515660 +40757 +10256 +442695 +553097 +185903 +74388 +425120 +241326 +299609 +29397 +328728 +283881 +344029 +367336 +27075 +163628 +127263 +488979 +460147 +473050 +405762 +221547 +131581 +561187 +406489 +140696 +452721 +530466 +118965 +398803 +218365 +298738 +19441 +521550 +120157 +498687 +4754 +365866 +70865 +235156 +133386 +142742 +221183 +262391 +567053 +520982 +121349 +448779 +440354 +3983 +578993 +519691 +160703 +103307 +300408 +137106 +488377 +523660 +318022 +132578 +302520 +153040 +408817 +145227 +311190 +159662 +202923 +256775 +359864 +384848 +336404 +185303 +421703 +362682 +464622 +246590 +422729 +165500 +42563 +219216 +520232 +95063 +265547 +532686 +290558 +112591 +448211 +315281 +545475 +225850 +232460 +82740 +272880 +347254 +122047 +352151 +541486 +97249 +200252 +544782 +499571 +379014 +303534 +479909 +305464 +323682 +181524 +273855 +190783 +567801 +119752 +241503 +536429 +327323 +128756 +349868 +500495 +372260 +315824 +484986 +364993 +124759 +300124 +329319 +68628 +14549 +121897 +506595 +115709 +199610 +230150 +31717 +139549 +222332 +534161 +360393 +541664 +507167 +286523 +158660 +66926 +195750 +80022 +589 +252220 +47255 +247014 +49881 +455005 +232453 +445722 +516805 +544122 +541917 +469356 +370042 +130522 +502163 +307866 +408894 +524247 +52233 +177861 +348881 +357943 +295303 +475389 +431691 +61316 +143998 +503483 +340155 +488785 +133636 +133567 +251627 +470095 +34873 +88815 +261178 +468612 +127477 +157960 +15687 +303089 +572331 +456708 +190515 +126131 +239194 +332074 +129765 +107167 +478184 +421833 +359715 +112440 +331317 +74492 +505386 +247839 +534210 +134503 +422700 +352111 +98674 +546219 +520508 +503008 +461953 +101913 +362092 +22103 +359128 +316666 +335579 +414750 +297980 +365652 +53635 +547601 +97589 +570515 +7125 +99828 +321437 +80671 +426275 +294883 +212605 +424293 +338108 +25005 +6949 +234291 +428399 +7149 +343076 +575287 +431848 +307611 +293909 +542511 +564739 +573843 +356878 +472864 +336793 +121904 +161060 +254004 +269873 +216428 +77172 +346517 +498555 +203690 +348973 +117704 +552672 +275270 +208107 +314016 +427518 +278134 +53420 +318777 +238980 +350614 +467315 +61233 +272188 +550797 +125051 +553965 +187286 +282912 +102532 +156076 +467848 +130875 +531585 +523470 +507684 +332582 +438989 +489209 +125944 +127474 +371957 +570349 +283286 +541635 +547106 +253630 +388677 +572525 +542302 +554537 +367205 +228300 +443498 +356432 +123946 +490441 +211063 +224542 +116574 +434510 +33116 +353136 +134167 +128291 +542510 +433963 +147453 +365766 +374806 +336600 +38238 +165476 +535578 +127788 +157099 +173640 +114348 +496722 +58141 +467296 +235864 +5154 +22775 +422536 +136820 +453438 +446359 +41990 +422240 +39267 +391392 +233825 +308504 +478250 +87328 +4079 +127074 +267709 +377635 +353231 +185768 +487897 +124215 +249757 +341681 +557552 +280733 +374734 +281601 +456420 +222266 +491947 +432732 +467157 +94025 +410328 +428291 +397639 +163528 +234697 +557573 +208363 +515962 +358658 +373075 +438995 +425672 +450169 +216103 +254638 +288591 +53626 +43417 +372252 +5038 +218357 +120860 +399349 +485509 +530261 +477087 +352302 +96075 +495443 +133928 +197175 +134074 +212553 +448181 +152000 +254277 +105734 +75481 +343662 +479350 +554347 +71090 +297426 +22176 +277622 +469235 +163041 +221272 +154263 +89296 +68411 +192871 +183217 +258141 +53058 +540529 +566414 +560948 +254535 +246076 +135972 +420069 +431023 +343643 +32682 +515176 +222635 +377155 +547041 +513283 +26017 +366096 +252133 +138078 +25685 +321798 +549361 +14088 +423048 +570810 +374974 +447501 +492544 +554046 +575357 +420791 +6019 +340451 +66800 +565575 +148055 +330432 +483038 +455004 +288765 +11034 +86988 +347142 +450559 +543581 +293757 +556901 +533032 +333020 +260266 +22420 +13948 +512657 +214124 +231236 +177149 +560879 +491793 +35767 +312878 +118542 +450596 +423773 +48653 +224523 +509577 +462677 +75405 +350023 +452122 +42008 +302555 +382309 +468483 +368684 +372580 +31333 +153697 +124876 +330023 +315672 +53990 +136533 +82815 +356836 +414821 +268717 +7333 +77544 +525373 +371042 +227048 +576327 +419309 +239773 +8119 +424135 +297425 +222711 +489909 +393995 +31019 +539326 +517612 +102461 +199989 +483374 +44952 +103863 +528980 +441543 +85381 +247234 +50924 +483994 +87456 +424271 +356091 +534669 +378831 +560662 +298773 +257896 +498274 +305800 +40517 +183949 +276840 +84442 +297620 +298252 +119088 +233315 +283977 +345154 +287649 +427311 +63399 +4700 +463611 +224104 +209388 +431655 +364190 +28864 +412455 +283290 +228541 +422200 +985 +133596 +323853 +503081 +130732 +224675 +199688 +230862 +21396 +485390 +1532 +125778 +235541 +370478 +522478 +514292 +384338 +531707 +178746 +532747 +62915 +519491 +140691 +112093 +358024 +263687 +297595 +506085 +102446 +325768 +29558 +222054 +466965 +316254 +546500 +216785 +194184 +464390 +348371 +231582 +208995 +464339 +308856 +340946 +214604 +570586 +182227 +248441 +89078 +376310 +73450 +115924 +308235 +15994 +8749 +429679 +37751 +122040 +284286 +388707 +248163 +11320 +427997 +282062 +237600 +376751 +223314 +86215 +12443 +163255 +564940 +462640 +522713 +306303 +460675 +126833 +26201 +224757 +357899 +546782 +96427 +480944 +479556 +569273 +520528 +190690 +344832 +462466 +270354 +559776 +279259 +280909 +227781 +163798 +491098 +439658 +416088 +107375 +74132 +379800 +511654 +346687 +226161 +578849 +544272 +146149 +570624 +178299 +126671 +356380 +530766 +175954 +158798 +422095 +55780 +512276 +560626 +187329 +513125 +347216 +306486 +161840 +180917 +188192 +421437 +93120 +324891 +252216 +488476 +578347 +101959 +10693 +170038 +213586 +210439 +469202 +381463 +343248 +127785 +287328 +538690 +16382 +293022 +112378 +435785 +56092 +381504 +284365 +406129 +233119 +53629 +188509 +191053 +81056 +82252 +538319 +38439 +181948 +439710 +529344 +434035 +342958 +563882 +37734 +364743 +330986 +546226 +463211 +62210 +442724 +232241 +293858 +119345 +61953 +577033 +522015 +381587 +350107 +4936 +511307 +228771 +177811 +231450 +176168 +84540 +259408 +264238 +539738 +255827 +459382 +221105 +431742 +204337 +227741 +336356 +37655 +167159 +59352 +165937 +53956 +378712 +88462 +495786 +542938 +566498 +367228 +157577 +442661 +62363 +390689 +480664 +521540 +414249 +20571 +160855 +451683 +156832 +570045 +326542 +568276 +568717 +563311 +113579 +218268 +546095 +160661 +341118 +150649 +462632 +198972 +220025 +61720 +430681 +524011 +457217 +40064 +285583 +314493 +78023 +470882 +298722 +555597 +489829 +314779 +367818 +138503 +243737 +580255 +444565 +386677 +190841 +493074 +234347 +466988 +227033 +519039 +351554 +390585 +443303 +140983 +81079 +538005 +169757 +368780 +457322 +341804 +409116 +181805 +284292 +551358 +344548 +503569 +336587 +417055 +522315 +58705 +148955 +375530 +474934 +577893 +28881 +360772 +445267 +244737 +355777 +72811 +190788 +54513 +243075 +518551 +487530 +292169 +69293 +397303 +129285 +429996 +109532 +53802 +340573 +91280 +535602 +270908 +381925 +549220 +488573 +47131 +32735 +117525 +279085 +43961 +188906 +394677 +395 +185201 +189365 +127596 +32712 +504810 +3703 +182874 +146981 +306755 +453093 +520503 +169808 +225670 +91063 +348584 +461802 +572555 +185922 +131497 +46736 +536006 +256505 +214975 +13445 +350736 +98115 +50304 +361180 +511333 +564820 +429717 +222500 +40083 +538230 +349438 +371250 +528578 +240418 +302380 +261758 +535809 +308388 +578878 +509451 +46919 +562592 +499950 +90374 +318146 +195353 +355325 +314515 +237277 +203024 +238911 +32039 +145591 +16030 +135411 +229350 +421757 +48034 +183704 +307292 +97974 +275999 +448256 +451915 +119113 +143503 +494141 +50124 +306553 +35526 +255279 +560908 +247264 +367599 +192782 +511324 +574350 +67569 +204360 +111907 +2839 +513971 +245201 +185240 +339468 +540101 +539673 +194425 +22168 +520150 +301595 +96006 +68286 +131280 +356662 +182441 +284749 +107108 +49761 +386718 +55244 +187990 +248678 +147721 +425727 +360350 +310797 +76765 +400489 +247639 +279864 +44699 +356145 +69138 +445041 +560598 +165464 +536343 +7818 +322831 +334760 +451463 +348730 +285967 +286353 +201887 +166165 +359 +465591 +519359 +550444 +402711 +3661 +132706 +534983 +306281 +150317 +15978 +580029 +496090 +267127 +210980 +384015 +222559 +2235 +255649 +278168 +440840 +27326 +202562 +230268 +362712 +1573 +107661 +464515 +373132 +447242 +547440 +43613 +200143 +260883 +250901 +64693 +408480 +204757 +319933 +147471 +381332 +518197 +27656 +260257 +434580 +159203 +568630 +497441 +499597 +60179 +574804 +343254 +501762 +220704 +524536 +86946 +456046 +62937 +49633 +144305 +475593 +478553 +574145 +63648 +3794 +303177 +1340 +82835 +371427 +156747 +448694 +219567 +75095 +242615 +492077 +132776 +199125 +349622 +195754 +455548 +181873 +138185 +338044 +362797 +180953 +505826 +69773 +304834 +162580 +154090 +519853 +319687 +132328 +27969 +52166 +100547 +568131 +415218 +348045 +478159 +402869 +10211 +26547 +551692 +105432 +313340 +182348 +383419 +570947 +345353 +226883 +255784 +214199 +262262 +283261 +449708 +299970 +392391 +245997 +330410 +343571 +519542 +37470 +42144 +342521 +498537 +10935 +443860 +512648 +146099 +98599 +123932 +489861 +262895 +184700 +218587 +363581 +21001 +481404 +249356 +64240 +492349 +199236 +481064 +353405 +116479 +132024 +138768 +524665 +434511 +326970 +138784 +340368 +312081 +366615 +171942 +21232 +473850 +93686 +295574 +51054 +162692 +174091 +20070 +270066 +492816 +20904 +484500 +147140 +242972 +420081 +63563 +261712 +316396 +49413 +520787 +510955 +393840 +142487 +19817 +261180 +413736 +230619 +484614 +337011 +496575 +4338 +552545 +5601 +75426 +568863 +184227 +170629 +438567 +505132 +541353 +284674 +322567 +182423 +312051 +18896 +40471 +321725 +188850 +37119 +95569 +187362 +397133 +528972 +487131 +174989 +370325 +223554 +385633 +103485 +537574 +63240 +256566 +86467 +401092 +486968 +308441 +280017 +527464 +131965 +310479 +125556 +220160 +532963 +310052 +107963 +293841 +388534 +45603 +368949 +391825 +5107 +569705 +231549 +250108 +152933 +206433 +358817 +434006 +283904 +152808 +539975 +24629 +410231 +13465 +502318 +51961 +445594 +209062 +38726 +295420 +430079 +240147 +561512 +35795 +102589 +505619 +565469 +271772 +520561 +372300 +178807 +492805 +1083 +303704 +125635 +217521 +278032 +208688 +335325 +140435 +313990 +143822 +320857 +549230 +76844 +424219 +463876 +243199 +2988 +215170 +30012 +377738 +408568 +490624 +404839 +138316 +157206 +404461 +122934 +263346 +21327 +99913 +67975 +339676 +391891 +365305 +337055 +233834 +125524 +46869 +32577 +304744 +104176 +167356 +210404 +307989 +217223 +196046 +454414 +16356 +244487 +543660 +197461 +199681 +476787 +455085 +307074 +260547 +107468 +334769 +29437 +166837 +53838 +502979 +82678 +288860 +535523 +311950 +237723 +98656 +223123 +273930 +58057 +544334 +324857 +198043 +535326 +316505 +12991 +576820 +43611 +107839 +275749 +456695 +78188 +375786 +466239 +184830 +537128 +434513 +244344 +374576 +69140 +434247 +555009 +510857 +220819 +20598 +99416 +74967 +533129 +515577 +213361 +330974 +548848 +431557 +503278 +130043 +402570 +320554 +559884 +252629 +364596 +423484 +271230 +105552 +143143 +285751 +49994 +204162 +80646 +381393 +123415 +118417 +30932 +425412 +388130 +551243 +468337 +484893 +25014 +174390 +463781 +124647 +60823 +361964 +425702 +575110 +532390 +230881 +84592 +189997 +221307 +361472 +32364 +71918 +316365 +492378 +234251 +48504 +418070 +89884 +562045 +506552 +66360 +122962 +262605 +529939 +345229 +294853 +344397 +56091 +8599 +459823 +175785 +226128 +259983 +354515 +379144 +384995 +205253 +116786 +441432 +448810 +83452 +465129 +506906 +90616 +551959 +406404 +157891 +362090 +439630 +45099 +61960 +478430 +489605 +127050 +579872 +475798 +64510 +447733 +33066 +102848 +538819 +323760 +200401 +179765 +251317 +239376 +83836 +578092 +522452 +393056 +278848 +27787 +377239 +473427 +83065 +377005 +576539 +248019 +473370 +536369 +92648 +332461 +437609 +274800 +388846 +323048 +193407 +541898 +480140 +46526 +26432 +339738 +325991 +37705 +528033 +542922 +313420 +190463 +531000 +454907 +26448 +238199 +476652 +457147 +364256 +72632 +430380 +315448 +353320 +18158 +91527 +454252 +546987 +386370 +38064 +19763 +64152 +453216 +55223 +361860 +522566 +509531 +438432 +31164 +163290 +389197 +333440 +173464 +447842 +381615 +99961 +156126 +103134 +394940 +165638 +261706 +378311 +534081 +373848 +401642 +338019 +378096 +289610 +547421 +174672 +133343 +191360 +293751 +520892 +145214 +167668 +37456 +460962 +465267 +292804 +347529 +203661 +10766 +27371 +203845 +155736 +136715 +463588 +26640 +547612 +131453 +184274 +442456 +265085 +223256 +129420 +23019 +536467 +194532 +127585 +392637 +330408 +524775 +31993 +433924 +502852 +553129 +559364 +297343 +71360 +225537 +271148 +345499 +475893 +237463 +5278 +501243 +413235 +444236 +541071 +380088 +468063 +94858 +225913 +295614 +210276 +170975 +205570 +422375 +550365 +308702 +484627 +565031 +98979 +480345 +579548 +272673 +436875 +287874 +16502 +274917 +281809 +442968 +289263 +347766 +160933 +84533 +266409 +122199 +396200 +30958 +504541 +1591 +89432 +387150 +306383 +15260 +154515 +50752 +166913 +102644 +100196 +160278 +349579 +442536 +17923 +310564 +62020 +152004 +578330 +126299 +527025 +83494 +226400 +268435 +445334 +310391 +505156 +19157 +44677 +318171 +447765 +354369 +527486 +329939 +184771 +134856 +467675 +517133 +89697 +447080 +70685 +144938 +519673 +485758 +454957 +564851 +189451 +408757 +192616 +280734 +305060 +243946 +99179 +303971 +170519 +48917 +549965 +300245 +384101 +576607 +186709 +516341 +241668 +133470 +134811 +500825 +464689 +29833 +343820 +213429 +387434 +279305 +444207 +210777 +372043 +189868 +572229 +8495 +370090 +450282 +277080 +199158 +109612 +567708 +245659 +485129 +268363 +23448 +5352 +235597 +6871 +348720 +94113 +314613 +63729 +114458 +215394 +460460 +240387 +398726 +135604 +571728 +415770 +286908 +138151 +146272 +344094 +345209 +241187 +282768 +113037 +545583 +219283 +145873 +285957 +489235 +157271 +197458 +502671 +499845 +334884 +79084 +505573 +115618 +561491 +354202 +279838 +190734 +134738 +269450 +482784 +144610 +52774 +290659 +440646 +25807 +442952 +159215 +318224 +73445 +211653 +527960 +401862 +431026 +488755 +292278 +400554 +272630 +382668 +470298 +166426 +129645 +28820 +161227 +417696 +560677 +283216 +28978 +310302 +154419 +230450 +328289 +73118 +104691 +15085 +405574 +510548 +470005 +102928 +569249 +413126 +77282 +96732 +359020 +42182 +250875 +106206 +354929 +320796 +453341 +237318 +254834 +137265 +399865 +292685 +152252 +319579 +81484 +16599 +162257 +351034 +396051 +502275 +308278 +34483 +13333 +320290 +321579 +349794 +99219 +200162 +369470 +487583 +62703 +251639 +138246 +157170 +477112 +283963 +74860 +307057 +364075 +295491 +34757 +400161 +170194 +120874 +492817 +3817 +183973 +135436 +512989 +114744 +379210 +201072 +293785 +578385 +237420 +7888 +18224 +155317 +522406 +441440 +110482 +173400 +183348 +552504 +475660 +166948 +147025 +443259 +578792 +245227 +546687 +474519 +393284 +249668 +87493 +151651 +100306 +540466 +546556 +212675 +282942 +21310 +385535 +7304 +303409 +386116 +574297 +514550 +217133 +533553 +447152 +578703 +45392 +166205 +180154 +25143 +338802 +330110 +261389 +343506 +442726 +285388 +554934 +421316 +479912 +85192 +34874 +487266 +226173 +20748 +360660 +574509 +543364 +1554 +125539 +566931 +312889 +466945 +444804 +257187 +568587 +427160 +71123 +563849 +138589 +162841 +129663 +107226 +140686 +321663 +437117 +179808 +321718 +62398 +16497 +468933 +219841 +355430 +293554 +293044 +109516 +485887 +490620 +579893 +427135 +31636 +217919 +432441 +314396 +119802 +393682 +201764 +146193 +116358 +84825 +208311 +419774 +177468 +72052 +142585 +519598 +464006 +556083 +412136 +169361 +442929 +84567 +549932 +75560 +74656 +93314 +393838 +383018 +372433 +431281 +556278 +5513 +108503 +500478 +148588 +138713 +368153 +22646 +303778 +270758 +276706 +275429 +492025 +169111 +494328 +35891 +70258 +400528 +165229 +460494 +269311 +307658 +98283 +369294 +319345 +414578 +541550 +425388 +129855 +99477 +383073 +387906 +293124 +155873 +549224 +266021 +52869 +1584 +421902 +498535 +277235 +153013 +452013 +553561 +138040 +20820 +58483 +423506 +569001 +325153 +383039 +213421 +38825 +453283 +384661 +127702 +238147 +104893 +577826 +64974 +240655 +459153 +145665 +49810 +65008 +545385 +125070 +46433 +143329 +429174 +52947 +321314 +253341 +157365 +453162 +111910 +339019 +239575 +362219 +80652 +247317 +460286 +365724 +160875 +372220 +483389 +572181 +146190 +580975 +54761 +348488 +416104 +468778 +18833 +251537 +234366 +510078 +14723 +338595 +153797 +513098 +467138 +404618 +261982 +545730 +135846 +108244 +562557 +180524 +227370 +341856 +131743 +255691 +497878 +68878 +430640 +441473 +347664 +214369 +347018 +225238 +421762 +317024 +6180 +172004 +303101 +22488 +193494 +199346 +409627 +315350 +263463 +190722 +523292 +363902 +573778 +437290 +389812 +517082 +145073 +37907 +489763 +456261 +270386 +508917 +566823 +543897 +362482 +130966 +66632 +181962 +274613 +135708 +549746 +323766 +366714 +353295 +318813 +153307 +213693 +293378 +149446 +199927 +580543 +331727 +238488 +472833 +308645 +424225 +228746 +110435 +495377 +240646 +274491 +130921 +140006 +4688 +115241 +76962 +66650 +47718 +224991 +434187 +272048 +11169 +158222 +154000 +507436 +443499 +109937 +309692 +534018 +22797 +163339 +168683 +210098 +246069 +137954 +143320 +262587 +414795 +226938 +536831 +128791 +459590 +50514 +30067 +317479 +378655 +229968 +522702 +11122 +515266 +136600 +224509 +149912 +97656 +120747 +349480 +155199 +528731 +523807 +168544 +325664 +229981 +434410 +431208 +508996 +63791 +89225 +513690 +136740 +224364 +515424 +508302 +418175 +465552 +439907 +272097 +451087 +396304 +342273 +52507 +300066 +380089 +326248 +167906 +37846 +262993 +60090 +499249 +90432 +74456 +264660 +325598 +480985 +245411 +425644 +224724 +475439 +246478 +487438 +563731 +441854 +522665 +245915 +85747 +315162 +108761 +407521 +388528 +389453 +298331 +447791 +368820 +440034 +305677 +122208 +182369 +543531 +151820 +63650 +457580 +563381 +320899 +14869 +137260 +61925 +376307 +80367 +269089 +203705 +274835 +267321 +418106 +471273 +74037 +227855 +519758 +89045 +321217 +324203 +479129 +503431 +368528 +527718 +278579 +13525 +291582 +301837 +31667 +68120 +14007 +114158 +124262 +33626 +53949 +187585 +192247 +208844 +212766 +318671 +575012 +439339 +364073 +419624 +178078 +427783 +302159 +339368 +190680 +23807 +288579 +312720 +15778 +553558 +571834 +574376 +122161 +493815 +472376 +483432 +149123 +51628 +264628 +26609 +23696 +485081 +441323 +451679 +42055 +378795 +86439 +366493 +520996 +332869 +18014 +554523 +83476 +6040 +421834 +424392 +308160 +335233 +249809 +349098 +358090 +187349 +61782 +35498 +386514 +207108 +578418 +84447 +104108 +126107 +211674 +111909 +490708 +477025 +206757 +556205 +142484 +454296 +464366 +358254 +215482 +468548 +82680 +100909 +405432 +85764 +94651 +63973 +8131 +288592 +257470 +47597 +321557 +34520 +134066 +246701 +317797 +282365 +78176 +29577 +311075 +331937 +190395 +5802 +245112 +111032 +140556 +199127 +376491 +305253 +300375 +545903 +357782 +377911 +74963 +329336 +25057 +3244 +252020 +293474 +171050 +239306 +189772 +238090 +160031 +36761 +445675 +252716 +152214 +239466 +55155 +479829 +420281 +445812 +118106 +434576 +451104 +316708 +438535 +300322 +167952 +390072 +487220 +20247 +9400 +43944 +35770 +487351 +425462 +212203 +9668 +8981 +574241 +332096 +535563 +192944 +498733 +276151 +550645 +507037 +9769 +404249 +236747 +376416 +306415 +45966 +191296 +576875 +493932 +225075 +536444 +79920 +561681 +60700 +99874 +219437 +509819 +466665 +579326 +428739 +394611 +263083 +379554 +279391 +178516 +133690 +77396 +300137 +6861 +435359 +314108 +444152 +500139 +92749 +89188 +300233 +414201 +443204 +211097 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_bbox_trainable_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_bbox_trainable_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..863e4f31d719cd148fd56c981e219257334f9c7e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_bbox_trainable_label_map.pbtxt @@ -0,0 +1,2725 @@ +item { + name: "/m/01g317" + id: 1 + display_name: "Person" +} +item { + name: "/m/09j2d" + id: 2 + display_name: "Clothing" +} +item { + name: "/m/04yx4" + id: 3 + display_name: "Man" +} +item { + name: "/m/0dzct" + id: 4 + display_name: "Face" +} +item { + name: "/m/07j7r" + id: 5 + display_name: "Tree" +} +item { + name: "/m/05s2s" + id: 6 + display_name: "Plant" +} +item { + name: "/m/03bt1vf" + id: 7 + display_name: "Woman" +} +item { + name: "/m/07yv9" + id: 8 + display_name: "Vehicle" +} +item { + name: "/m/0cgh4" + id: 9 + display_name: "Building" +} +item { + name: "/m/01prls" + id: 10 + display_name: "Land vehicle" +} +item { + name: "/m/09j5n" + id: 11 + display_name: "Footwear" +} +item { + name: "/m/05r655" + id: 12 + display_name: "Girl" +} +item { + name: "/m/0jbk" + id: 13 + display_name: "Animal" +} +item { + name: "/m/0k4j" + id: 14 + display_name: "Car" +} +item { + name: "/m/02wbm" + id: 15 + display_name: "Food" +} +item { + name: "/m/083wq" + id: 16 + display_name: "Wheel" +} +item { + name: "/m/0c9ph5" + id: 17 + display_name: "Flower" +} +item { + name: "/m/0c_jw" + id: 18 + display_name: "Furniture" +} +item { + name: "/m/0d4v4" + id: 19 + display_name: "Window" +} +item { + name: "/m/03jm5" + id: 20 + display_name: "House" +} +item { + name: "/m/01bl7v" + id: 21 + display_name: "Boy" +} +item { + name: "/m/0463sg" + id: 22 + display_name: "Fashion accessory" +} +item { + name: "/m/04bcr3" + id: 23 + display_name: "Table" +} +item { + name: "/m/0jyfg" + id: 24 + display_name: "Glasses" +} +item { + name: "/m/01xyhv" + id: 25 + display_name: "Suit" +} +item { + name: "/m/08dz3q" + id: 26 + display_name: "Auto part" +} +item { + name: "/m/015p6" + id: 27 + display_name: "Bird" +} +item { + name: "/m/05y5lj" + id: 28 + display_name: "Sports equipment" +} +item { + name: "/m/01d40f" + id: 29 + display_name: "Dress" +} +item { + name: "/m/0bt9lr" + id: 30 + display_name: "Dog" +} +item { + name: "/m/01lrl" + id: 31 + display_name: "Carnivore" +} +item { + name: "/m/02p0tk3" + id: 32 + display_name: "Human body" +} +item { + name: "/m/0fly7" + id: 33 + display_name: "Jeans" +} +item { + name: "/m/04szw" + id: 34 + display_name: "Musical instrument" +} +item { + name: "/m/0271t" + id: 35 + display_name: "Drink" +} +item { + name: "/m/019jd" + id: 36 + display_name: "Boat" +} +item { + name: "/m/03q69" + id: 37 + display_name: "Hair" +} +item { + name: "/m/0h9mv" + id: 38 + display_name: "Tire" +} +item { + name: "/m/04hgtk" + id: 39 + display_name: "Head" +} +item { + name: "/m/01yrx" + id: 40 + display_name: "Cat" +} +item { + name: "/m/01rzcn" + id: 41 + display_name: "Watercraft" +} +item { + name: "/m/01mzpv" + id: 42 + display_name: "Chair" +} +item { + name: "/m/0199g" + id: 43 + display_name: "Bike" +} +item { + name: "/m/01fdzj" + id: 44 + display_name: "Tower" +} +item { + name: "/m/04rky" + id: 45 + display_name: "Mammal" +} +item { + name: "/m/079cl" + id: 46 + display_name: "Skyscraper" +} +item { + name: "/m/0dzf4" + id: 47 + display_name: "Arm" +} +item { + name: "/m/0138tl" + id: 48 + display_name: "Toy" +} +item { + name: "/m/06msq" + id: 49 + display_name: "Sculpture" +} +item { + name: "/m/03xxp" + id: 50 + display_name: "Invertebrate" +} +item { + name: "/m/0hg7b" + id: 51 + display_name: "Microphone" +} +item { + name: "/m/01n5jq" + id: 52 + display_name: "Poster" +} +item { + name: "/m/03vt0" + id: 53 + display_name: "Insect" +} +item { + name: "/m/0342h" + id: 54 + display_name: "Guitar" +} +item { + name: "/m/0k0pj" + id: 55 + display_name: "Nose" +} +item { + name: "/m/02dl1y" + id: 56 + display_name: "Hat" +} +item { + name: "/m/04brg2" + id: 57 + display_name: "Tableware" +} +item { + name: "/m/02dgv" + id: 58 + display_name: "Door" +} +item { + name: "/m/01bqk0" + id: 59 + display_name: "Bicycle wheel" +} +item { + name: "/m/017ftj" + id: 60 + display_name: "Sunglasses" +} +item { + name: "/m/052lwg6" + id: 61 + display_name: "Baked goods" +} +item { + name: "/m/014sv8" + id: 62 + display_name: "Eye" +} +item { + name: "/m/0270h" + id: 63 + display_name: "Dessert" +} +item { + name: "/m/0283dt1" + id: 64 + display_name: "Mouth" +} +item { + name: "/m/0k5j" + id: 65 + display_name: "Aircraft" +} +item { + name: "/m/0cmf2" + id: 66 + display_name: "Airplane" +} +item { + name: "/m/07jdr" + id: 67 + display_name: "Train" +} +item { + name: "/m/032b3c" + id: 68 + display_name: "Jacket" +} +item { + name: "/m/033rq4" + id: 69 + display_name: "Street light" +} +item { + name: "/m/0k65p" + id: 70 + display_name: "Hand" +} +item { + name: "/m/01ww8y" + id: 71 + display_name: "Snack" +} +item { + name: "/m/0zvk5" + id: 72 + display_name: "Helmet" +} +item { + name: "/m/07mhn" + id: 73 + display_name: "Trousers" +} +item { + name: "/m/04dr76w" + id: 74 + display_name: "Bottle" +} +item { + name: "/m/03fp41" + id: 75 + display_name: "Houseplant" +} +item { + name: "/m/03k3r" + id: 76 + display_name: "Horse" +} +item { + name: "/m/01y9k5" + id: 77 + display_name: "Desk" +} +item { + name: "/m/0cdl1" + id: 78 + display_name: "Palm tree" +} +item { + name: "/m/0f4s2w" + id: 79 + display_name: "Vegetable" +} +item { + name: "/m/02xwb" + id: 80 + display_name: "Fruit" +} +item { + name: "/m/035r7c" + id: 81 + display_name: "Leg" +} +item { + name: "/m/0bt_c3" + id: 82 + display_name: "Book" +} +item { + name: "/m/01_bhs" + id: 83 + display_name: "Fast food" +} +item { + name: "/m/01599" + id: 84 + display_name: "Beer" +} +item { + name: "/m/03120" + id: 85 + display_name: "Flag" +} +item { + name: "/m/026t6" + id: 86 + display_name: "Drum" +} +item { + name: "/m/01bjv" + id: 87 + display_name: "Bus" +} +item { + name: "/m/07r04" + id: 88 + display_name: "Truck" +} +item { + name: "/m/018xm" + id: 89 + display_name: "Ball" +} +item { + name: "/m/01rkbr" + id: 90 + display_name: "Tie" +} +item { + name: "/m/0fm3zh" + id: 91 + display_name: "Flowerpot" +} +item { + name: "/m/02_n6y" + id: 92 + display_name: "Goggles" +} +item { + name: "/m/04_sv" + id: 93 + display_name: "Motorcycle" +} +item { + name: "/m/06z37_" + id: 94 + display_name: "Picture frame" +} +item { + name: "/m/01bfm9" + id: 95 + display_name: "Shorts" +} +item { + name: "/m/0h8mhzd" + id: 96 + display_name: "Sports uniform" +} +item { + name: "/m/0d_2m" + id: 97 + display_name: "Moths and butterflies" +} +item { + name: "/m/0gjbg72" + id: 98 + display_name: "Shelf" +} +item { + name: "/m/01n4qj" + id: 99 + display_name: "Shirt" +} +item { + name: "/m/0ch_cf" + id: 100 + display_name: "Fish" +} +item { + name: "/m/06m11" + id: 101 + display_name: "Rose" +} +item { + name: "/m/01jfm_" + id: 102 + display_name: "Licence plate" +} +item { + name: "/m/02crq1" + id: 103 + display_name: "Couch" +} +item { + name: "/m/083kb" + id: 104 + display_name: "Weapon" +} +item { + name: "/m/01c648" + id: 105 + display_name: "Laptop" +} +item { + name: "/m/09tvcd" + id: 106 + display_name: "Wine glass" +} +item { + name: "/m/0h2r6" + id: 107 + display_name: "Van" +} +item { + name: "/m/081qc" + id: 108 + display_name: "Wine" +} +item { + name: "/m/09ddx" + id: 109 + display_name: "Duck" +} +item { + name: "/m/03p3bw" + id: 110 + display_name: "Bicycle helmet" +} +item { + name: "/m/0cyf8" + id: 111 + display_name: "Butterfly" +} +item { + name: "/m/0b_rs" + id: 112 + display_name: "Swimming pool" +} +item { + name: "/m/039xj_" + id: 113 + display_name: "Ear" +} +item { + name: "/m/021sj1" + id: 114 + display_name: "Office" +} +item { + name: "/m/0dv5r" + id: 115 + display_name: "Camera" +} +item { + name: "/m/01lynh" + id: 116 + display_name: "Stairs" +} +item { + name: "/m/06bt6" + id: 117 + display_name: "Reptile" +} +item { + name: "/m/01226z" + id: 118 + display_name: "Football" +} +item { + name: "/m/0fszt" + id: 119 + display_name: "Cake" +} +item { + name: "/m/050k8" + id: 120 + display_name: "Mobile phone" +} +item { + name: "/m/02wbtzl" + id: 121 + display_name: "Sun hat" +} +item { + name: "/m/02p5f1q" + id: 122 + display_name: "Coffee cup" +} +item { + name: "/m/025nd" + id: 123 + display_name: "Christmas tree" +} +item { + name: "/m/02522" + id: 124 + display_name: "Computer monitor" +} +item { + name: "/m/09ct_" + id: 125 + display_name: "Helicopter" +} +item { + name: "/m/0cvnqh" + id: 126 + display_name: "Bench" +} +item { + name: "/m/0d5gx" + id: 127 + display_name: "Castle" +} +item { + name: "/m/01xygc" + id: 128 + display_name: "Coat" +} +item { + name: "/m/04m6gz" + id: 129 + display_name: "Porch" +} +item { + name: "/m/01gkx_" + id: 130 + display_name: "Swimwear" +} +item { + name: "/m/01s105" + id: 131 + display_name: "Cabinetry" +} +item { + name: "/m/01j61q" + id: 132 + display_name: "Tent" +} +item { + name: "/m/0hnnb" + id: 133 + display_name: "Umbrella" +} +item { + name: "/m/01j51" + id: 134 + display_name: "Balloon" +} +item { + name: "/m/01knjb" + id: 135 + display_name: "Billboard" +} +item { + name: "/m/03__z0" + id: 136 + display_name: "Bookcase" +} +item { + name: "/m/01m2v" + id: 137 + display_name: "Computer keyboard" +} +item { + name: "/m/0167gd" + id: 138 + display_name: "Doll" +} +item { + name: "/m/0284d" + id: 139 + display_name: "Dairy" +} +item { + name: "/m/03ssj5" + id: 140 + display_name: "Bed" +} +item { + name: "/m/02fq_6" + id: 141 + display_name: "Fedora" +} +item { + name: "/m/06nwz" + id: 142 + display_name: "Seafood" +} +item { + name: "/m/0220r2" + id: 143 + display_name: "Fountain" +} +item { + name: "/m/01mqdt" + id: 144 + display_name: "Traffic sign" +} +item { + name: "/m/0268lbt" + id: 145 + display_name: "Hiking equipment" +} +item { + name: "/m/07c52" + id: 146 + display_name: "Television" +} +item { + name: "/m/0grw1" + id: 147 + display_name: "Salad" +} +item { + name: "/m/01h3n" + id: 148 + display_name: "Bee" +} +item { + name: "/m/078n6m" + id: 149 + display_name: "Coffee table" +} +item { + name: "/m/01xq0k1" + id: 150 + display_name: "Cattle" +} +item { + name: "/m/0gd2v" + id: 151 + display_name: "Marine mammal" +} +item { + name: "/m/0dbvp" + id: 152 + display_name: "Goose" +} +item { + name: "/m/03rszm" + id: 153 + display_name: "Curtain" +} +item { + name: "/m/0h8n5zk" + id: 154 + display_name: "Kitchen & dining room table" +} +item { + name: "/m/019dx1" + id: 155 + display_name: "Home appliance" +} +item { + name: "/m/03hl4l9" + id: 156 + display_name: "Marine invertebrates" +} +item { + name: "/m/0b3fp9" + id: 157 + display_name: "Countertop" +} +item { + name: "/m/02rdsp" + id: 158 + display_name: "Office supplies" +} +item { + name: "/m/0hf58v5" + id: 159 + display_name: "Luggage and bags" +} +item { + name: "/m/04h7h" + id: 160 + display_name: "Lighthouse" +} +item { + name: "/m/024g6" + id: 161 + display_name: "Cocktail" +} +item { + name: "/m/0cffdh" + id: 162 + display_name: "Maple" +} +item { + name: "/m/03q5c7" + id: 163 + display_name: "Saucer" +} +item { + name: "/m/014y4n" + id: 164 + display_name: "Paddle" +} +item { + name: "/m/01yx86" + id: 165 + display_name: "Bronze sculpture" +} +item { + name: "/m/020jm" + id: 166 + display_name: "Beetle" +} +item { + name: "/m/025dyy" + id: 167 + display_name: "Box" +} +item { + name: "/m/01llwg" + id: 168 + display_name: "Necklace" +} +item { + name: "/m/08pbxl" + id: 169 + display_name: "Monkey" +} +item { + name: "/m/02d9qx" + id: 170 + display_name: "Whiteboard" +} +item { + name: "/m/02pkr5" + id: 171 + display_name: "Plumbing fixture" +} +item { + name: "/m/0h99cwc" + id: 172 + display_name: "Kitchen appliance" +} +item { + name: "/m/050gv4" + id: 173 + display_name: "Plate" +} +item { + name: "/m/02vqfm" + id: 174 + display_name: "Coffee" +} +item { + name: "/m/09kx5" + id: 175 + display_name: "Deer" +} +item { + name: "/m/019w40" + id: 176 + display_name: "Surfboard" +} +item { + name: "/m/09dzg" + id: 177 + display_name: "Turtle" +} +item { + name: "/m/07k1x" + id: 178 + display_name: "Tool" +} +item { + name: "/m/080hkjn" + id: 179 + display_name: "Handbag" +} +item { + name: "/m/07qxg_" + id: 180 + display_name: "Football helmet" +} +item { + name: "/m/0ph39" + id: 181 + display_name: "Canoe" +} +item { + name: "/m/018p4k" + id: 182 + display_name: "Cart" +} +item { + name: "/m/02h19r" + id: 183 + display_name: "Scarf" +} +item { + name: "/m/015h_t" + id: 184 + display_name: "Beard" +} +item { + name: "/m/0fqfqc" + id: 185 + display_name: "Drawer" +} +item { + name: "/m/025rp__" + id: 186 + display_name: "Cowboy hat" +} +item { + name: "/m/01x3z" + id: 187 + display_name: "Clock" +} +item { + name: "/m/0crjs" + id: 188 + display_name: "Convenience store" +} +item { + name: "/m/0l515" + id: 189 + display_name: "Sandwich" +} +item { + name: "/m/015qff" + id: 190 + display_name: "Traffic light" +} +item { + name: "/m/09kmb" + id: 191 + display_name: "Spider" +} +item { + name: "/m/09728" + id: 192 + display_name: "Bread" +} +item { + name: "/m/071qp" + id: 193 + display_name: "Squirrel" +} +item { + name: "/m/02s195" + id: 194 + display_name: "Vase" +} +item { + name: "/m/06c54" + id: 195 + display_name: "Rifle" +} +item { + name: "/m/01xqw" + id: 196 + display_name: "Cello" +} +item { + name: "/m/05zsy" + id: 197 + display_name: "Pumpkin" +} +item { + name: "/m/0bwd_0j" + id: 198 + display_name: "Elephant" +} +item { + name: "/m/04m9y" + id: 199 + display_name: "Lizard" +} +item { + name: "/m/052sf" + id: 200 + display_name: "Mushroom" +} +item { + name: "/m/03grzl" + id: 201 + display_name: "Baseball glove" +} +item { + name: "/m/01z1kdw" + id: 202 + display_name: "Juice" +} +item { + name: "/m/02wv6h6" + id: 203 + display_name: "Skirt" +} +item { + name: "/m/016m2d" + id: 204 + display_name: "Skull" +} +item { + name: "/m/0dtln" + id: 205 + display_name: "Lamp" +} +item { + name: "/m/057cc" + id: 206 + display_name: "Musical keyboard" +} +item { + name: "/m/06k2mb" + id: 207 + display_name: "High heels" +} +item { + name: "/m/0f6wt" + id: 208 + display_name: "Falcon" +} +item { + name: "/m/0cxn2" + id: 209 + display_name: "Ice cream" +} +item { + name: "/m/02jvh9" + id: 210 + display_name: "Mug" +} +item { + name: "/m/0gjkl" + id: 211 + display_name: "Watch" +} +item { + name: "/m/01b638" + id: 212 + display_name: "Boot" +} +item { + name: "/m/071p9" + id: 213 + display_name: "Ski" +} +item { + name: "/m/0pg52" + id: 214 + display_name: "Taxi" +} +item { + name: "/m/0ftb8" + id: 215 + display_name: "Sunflower" +} +item { + name: "/m/0hnyx" + id: 216 + display_name: "Pastry" +} +item { + name: "/m/02jz0l" + id: 217 + display_name: "Tap" +} +item { + name: "/m/04kkgm" + id: 218 + display_name: "Bowl" +} +item { + name: "/m/0174n1" + id: 219 + display_name: "Glove" +} +item { + name: "/m/0gv1x" + id: 220 + display_name: "Parrot" +} +item { + name: "/m/09csl" + id: 221 + display_name: "Eagle" +} +item { + name: "/m/02jnhm" + id: 222 + display_name: "Tin can" +} +item { + name: "/m/099ssp" + id: 223 + display_name: "Platter" +} +item { + name: "/m/03nfch" + id: 224 + display_name: "Sandal" +} +item { + name: "/m/07y_7" + id: 225 + display_name: "Violin" +} +item { + name: "/m/05z6w" + id: 226 + display_name: "Penguin" +} +item { + name: "/m/03m3pdh" + id: 227 + display_name: "Sofa bed" +} +item { + name: "/m/09ld4" + id: 228 + display_name: "Frog" +} +item { + name: "/m/09b5t" + id: 229 + display_name: "Chicken" +} +item { + name: "/m/054xkw" + id: 230 + display_name: "Lifejacket" +} +item { + name: "/m/0130jx" + id: 231 + display_name: "Sink" +} +item { + name: "/m/07fbm7" + id: 232 + display_name: "Strawberry" +} +item { + name: "/m/01dws" + id: 233 + display_name: "Bear" +} +item { + name: "/m/01tcjp" + id: 234 + display_name: "Muffin" +} +item { + name: "/m/0dftk" + id: 235 + display_name: "Swan" +} +item { + name: "/m/0c06p" + id: 236 + display_name: "Candle" +} +item { + name: "/m/034c16" + id: 237 + display_name: "Pillow" +} +item { + name: "/m/09d5_" + id: 238 + display_name: "Owl" +} +item { + name: "/m/03hlz0c" + id: 239 + display_name: "Kitchen utensil" +} +item { + name: "/m/0ft9s" + id: 240 + display_name: "Dragonfly" +} +item { + name: "/m/011k07" + id: 241 + display_name: "Tortoise" +} +item { + name: "/m/054_l" + id: 242 + display_name: "Mirror" +} +item { + name: "/m/0jqgx" + id: 243 + display_name: "Lily" +} +item { + name: "/m/0663v" + id: 244 + display_name: "Pizza" +} +item { + name: "/m/0242l" + id: 245 + display_name: "Coin" +} +item { + name: "/m/014trl" + id: 246 + display_name: "Cosmetics" +} +item { + name: "/m/05r5c" + id: 247 + display_name: "Piano" +} +item { + name: "/m/07j87" + id: 248 + display_name: "Tomato" +} +item { + name: "/m/05kyg_" + id: 249 + display_name: "Chest of drawers" +} +item { + name: "/m/0kmg4" + id: 250 + display_name: "Teddy bear" +} +item { + name: "/m/07cmd" + id: 251 + display_name: "Tank" +} +item { + name: "/m/0dv77" + id: 252 + display_name: "Squash" +} +item { + name: "/m/096mb" + id: 253 + display_name: "Lion" +} +item { + name: "/m/01gmv2" + id: 254 + display_name: "Brassiere" +} +item { + name: "/m/07bgp" + id: 255 + display_name: "Sheep" +} +item { + name: "/m/0cmx8" + id: 256 + display_name: "Spoon" +} +item { + name: "/m/029tx" + id: 257 + display_name: "Dinosaur" +} +item { + name: "/m/073bxn" + id: 258 + display_name: "Tripod" +} +item { + name: "/m/0bh9flk" + id: 259 + display_name: "Tablet computer" +} +item { + name: "/m/06mf6" + id: 260 + display_name: "Rabbit" +} +item { + name: "/m/06_fw" + id: 261 + display_name: "Skateboard" +} +item { + name: "/m/078jl" + id: 262 + display_name: "Snake" +} +item { + name: "/m/0fbdv" + id: 263 + display_name: "Shellfish" +} +item { + name: "/m/0h23m" + id: 264 + display_name: "Sparrow" +} +item { + name: "/m/014j1m" + id: 265 + display_name: "Apple" +} +item { + name: "/m/03fwl" + id: 266 + display_name: "Goat" +} +item { + name: "/m/02y6n" + id: 267 + display_name: "French fries" +} +item { + name: "/m/06c7f7" + id: 268 + display_name: "Lipstick" +} +item { + name: "/m/026qbn5" + id: 269 + display_name: "studio couch" +} +item { + name: "/m/0cdn1" + id: 270 + display_name: "Hamburger" +} +item { + name: "/m/07clx" + id: 271 + display_name: "Tea" +} +item { + name: "/m/07cx4" + id: 272 + display_name: "Telephone" +} +item { + name: "/m/03g8mr" + id: 273 + display_name: "Baseball bat" +} +item { + name: "/m/0cnyhnx" + id: 274 + display_name: "Bull" +} +item { + name: "/m/01b7fy" + id: 275 + display_name: "Headphones" +} +item { + name: "/m/04gth" + id: 276 + display_name: "Lavender" +} +item { + name: "/m/0cyfs" + id: 277 + display_name: "Parachute" +} +item { + name: "/m/021mn" + id: 278 + display_name: "Cookie" +} +item { + name: "/m/07dm6" + id: 279 + display_name: "Tiger" +} +item { + name: "/m/0k1tl" + id: 280 + display_name: "Pen" +} +item { + name: "/m/0dv9c" + id: 281 + display_name: "Racket" +} +item { + name: "/m/0dt3t" + id: 282 + display_name: "Fork" +} +item { + name: "/m/04yqq2" + id: 283 + display_name: "Bust" +} +item { + name: "/m/01cmb2" + id: 284 + display_name: "Miniskirt" +} +item { + name: "/m/0gd36" + id: 285 + display_name: "Sea lion" +} +item { + name: "/m/033cnk" + id: 286 + display_name: "Egg" +} +item { + name: "/m/06ncr" + id: 287 + display_name: "Saxophone" +} +item { + name: "/m/03bk1" + id: 288 + display_name: "Giraffe" +} +item { + name: "/m/0bjyj5" + id: 289 + display_name: "Waste container" +} +item { + name: "/m/06__v" + id: 290 + display_name: "Snowboard" +} +item { + name: "/m/0qmmr" + id: 291 + display_name: "Wheelchair" +} +item { + name: "/m/01xgg_" + id: 292 + display_name: "Medical equipment" +} +item { + name: "/m/0czz2" + id: 293 + display_name: "Antelope" +} +item { + name: "/m/02l8p9" + id: 294 + display_name: "Harbor seal" +} +item { + name: "/m/09g1w" + id: 295 + display_name: "Toilet" +} +item { + name: "/m/0ll1f78" + id: 296 + display_name: "Shrimp" +} +item { + name: "/m/0cyhj_" + id: 297 + display_name: "Orange" +} +item { + name: "/m/0642b4" + id: 298 + display_name: "Cupboard" +} +item { + name: "/m/0h8mzrc" + id: 299 + display_name: "Wall clock" +} +item { + name: "/m/068zj" + id: 300 + display_name: "Pig" +} +item { + name: "/m/02z51p" + id: 301 + display_name: "Nightstand" +} +item { + name: "/m/0h8nr_l" + id: 302 + display_name: "Bathroom accessory" +} +item { + name: "/m/0388q" + id: 303 + display_name: "Grape" +} +item { + name: "/m/02hj4" + id: 304 + display_name: "Dolphin" +} +item { + name: "/m/01jfsr" + id: 305 + display_name: "Lantern" +} +item { + name: "/m/07gql" + id: 306 + display_name: "Trumpet" +} +item { + name: "/m/0h8my_4" + id: 307 + display_name: "Tennis racket" +} +item { + name: "/m/0n28_" + id: 308 + display_name: "Crab" +} +item { + name: "/m/0120dh" + id: 309 + display_name: "Sea turtle" +} +item { + name: "/m/020kz" + id: 310 + display_name: "Cannon" +} +item { + name: "/m/0mkg" + id: 311 + display_name: "Accordion" +} +item { + name: "/m/03c7gz" + id: 312 + display_name: "Door handle" +} +item { + name: "/m/09k_b" + id: 313 + display_name: "Lemon" +} +item { + name: "/m/031n1" + id: 314 + display_name: "Foot" +} +item { + name: "/m/04rmv" + id: 315 + display_name: "Mouse" +} +item { + name: "/m/084rd" + id: 316 + display_name: "Wok" +} +item { + name: "/m/02rgn06" + id: 317 + display_name: "Volleyball" +} +item { + name: "/m/05z55" + id: 318 + display_name: "Pasta" +} +item { + name: "/m/01r546" + id: 319 + display_name: "Earrings" +} +item { + name: "/m/09qck" + id: 320 + display_name: "Banana" +} +item { + name: "/m/012w5l" + id: 321 + display_name: "Ladder" +} +item { + name: "/m/01940j" + id: 322 + display_name: "Backpack" +} +item { + name: "/m/09f_2" + id: 323 + display_name: "Crocodile" +} +item { + name: "/m/02p3w7d" + id: 324 + display_name: "Roller skates" +} +item { + name: "/m/057p5t" + id: 325 + display_name: "Scoreboard" +} +item { + name: "/m/0d8zb" + id: 326 + display_name: "Jellyfish" +} +item { + name: "/m/01nq26" + id: 327 + display_name: "Sock" +} +item { + name: "/m/01x_v" + id: 328 + display_name: "Camel" +} +item { + name: "/m/05gqfk" + id: 329 + display_name: "Plastic bag" +} +item { + name: "/m/0cydv" + id: 330 + display_name: "Caterpillar" +} +item { + name: "/m/07030" + id: 331 + display_name: "Sushi" +} +item { + name: "/m/084zz" + id: 332 + display_name: "Whale" +} +item { + name: "/m/0c29q" + id: 333 + display_name: "Leopard" +} +item { + name: "/m/02zn6n" + id: 334 + display_name: "Barrel" +} +item { + name: "/m/03tw93" + id: 335 + display_name: "Fireplace" +} +item { + name: "/m/0fqt361" + id: 336 + display_name: "Stool" +} +item { + name: "/m/0f9_l" + id: 337 + display_name: "Snail" +} +item { + name: "/m/0gm28" + id: 338 + display_name: "Candy" +} +item { + name: "/m/09rvcxw" + id: 339 + display_name: "Rocket" +} +item { + name: "/m/01nkt" + id: 340 + display_name: "Cheese" +} +item { + name: "/m/04p0qw" + id: 341 + display_name: "Billiard table" +} +item { + name: "/m/03hj559" + id: 342 + display_name: "Mixing bowl" +} +item { + name: "/m/07pj7bq" + id: 343 + display_name: "Bowling equipment" +} +item { + name: "/m/04ctx" + id: 344 + display_name: "Knife" +} +item { + name: "/m/0703r8" + id: 345 + display_name: "Loveseat" +} +item { + name: "/m/03qrc" + id: 346 + display_name: "Hamster" +} +item { + name: "/m/020lf" + id: 347 + display_name: "Mouse" +} +item { + name: "/m/0by6g" + id: 348 + display_name: "Shark" +} +item { + name: "/m/01fh4r" + id: 349 + display_name: "Teapot" +} +item { + name: "/m/07c6l" + id: 350 + display_name: "Trombone" +} +item { + name: "/m/03bj1" + id: 351 + display_name: "Panda" +} +item { + name: "/m/0898b" + id: 352 + display_name: "Zebra" +} +item { + name: "/m/02x984l" + id: 353 + display_name: "Mechanical fan" +} +item { + name: "/m/0fj52s" + id: 354 + display_name: "Carrot" +} +item { + name: "/m/0cd4d" + id: 355 + display_name: "Cheetah" +} +item { + name: "/m/02068x" + id: 356 + display_name: "Gondola" +} +item { + name: "/m/01vbnl" + id: 357 + display_name: "Bidet" +} +item { + name: "/m/0449p" + id: 358 + display_name: "Jaguar" +} +item { + name: "/m/0gj37" + id: 359 + display_name: "Ladybug" +} +item { + name: "/m/0nl46" + id: 360 + display_name: "Crown" +} +item { + name: "/m/0152hh" + id: 361 + display_name: "Snowman" +} +item { + name: "/m/03dnzn" + id: 362 + display_name: "Bathtub" +} +item { + name: "/m/05_5p_0" + id: 363 + display_name: "Table tennis racket" +} +item { + name: "/m/02jfl0" + id: 364 + display_name: "Sombrero" +} +item { + name: "/m/01dxs" + id: 365 + display_name: "Brown bear" +} +item { + name: "/m/0cjq5" + id: 366 + display_name: "Lobster" +} +item { + name: "/m/040b_t" + id: 367 + display_name: "Refrigerator" +} +item { + name: "/m/0_cp5" + id: 368 + display_name: "Oyster" +} +item { + name: "/m/0gxl3" + id: 369 + display_name: "Handgun" +} +item { + name: "/m/029bxz" + id: 370 + display_name: "Oven" +} +item { + name: "/m/02zt3" + id: 371 + display_name: "Kite" +} +item { + name: "/m/03d443" + id: 372 + display_name: "Rhinoceros" +} +item { + name: "/m/0306r" + id: 373 + display_name: "Fox" +} +item { + name: "/m/0h8l4fh" + id: 374 + display_name: "Light bulb" +} +item { + name: "/m/0633h" + id: 375 + display_name: "Polar bear" +} +item { + name: "/m/01s55n" + id: 376 + display_name: "Suitcase" +} +item { + name: "/m/0hkxq" + id: 377 + display_name: "Broccoli" +} +item { + name: "/m/0cn6p" + id: 378 + display_name: "Otter" +} +item { + name: "/m/0dbzx" + id: 379 + display_name: "Mule" +} +item { + name: "/m/01dy8n" + id: 380 + display_name: "Woodpecker" +} +item { + name: "/m/01h8tj" + id: 381 + display_name: "Starfish" +} +item { + name: "/m/03s_tn" + id: 382 + display_name: "Kettle" +} +item { + name: "/m/01xs3r" + id: 383 + display_name: "Jet ski" +} +item { + name: "/m/031b6r" + id: 384 + display_name: "Window blind" +} +item { + name: "/m/06j2d" + id: 385 + display_name: "Raven" +} +item { + name: "/m/0hqkz" + id: 386 + display_name: "Grapefruit" +} +item { + name: "/m/01_5g" + id: 387 + display_name: "Chopsticks" +} +item { + name: "/m/02zvsm" + id: 388 + display_name: "Tart" +} +item { + name: "/m/0kpqd" + id: 389 + display_name: "Watermelon" +} +item { + name: "/m/015x4r" + id: 390 + display_name: "Cucumber" +} +item { + name: "/m/061hd_" + id: 391 + display_name: "Infant bed" +} +item { + name: "/m/04ylt" + id: 392 + display_name: "Missile" +} +item { + name: "/m/02wv84t" + id: 393 + display_name: "Gas stove" +} +item { + name: "/m/04y4h8h" + id: 394 + display_name: "Bathroom cabinet" +} +item { + name: "/m/01gllr" + id: 395 + display_name: "Beehive" +} +item { + name: "/m/0pcr" + id: 396 + display_name: "Alpaca" +} +item { + name: "/m/0jy4k" + id: 397 + display_name: "Doughnut" +} +item { + name: "/m/09f20" + id: 398 + display_name: "Hippopotamus" +} +item { + name: "/m/0mcx2" + id: 399 + display_name: "Ipod" +} +item { + name: "/m/04c0y" + id: 400 + display_name: "Kangaroo" +} +item { + name: "/m/0_k2" + id: 401 + display_name: "Ant" +} +item { + name: "/m/0jg57" + id: 402 + display_name: "Bell pepper" +} +item { + name: "/m/03fj2" + id: 403 + display_name: "Goldfish" +} +item { + name: "/m/03ldnb" + id: 404 + display_name: "Ceiling fan" +} +item { + name: "/m/06nrc" + id: 405 + display_name: "Shotgun" +} +item { + name: "/m/01btn" + id: 406 + display_name: "Barge" +} +item { + name: "/m/05vtc" + id: 407 + display_name: "Potato" +} +item { + name: "/m/08hvt4" + id: 408 + display_name: "Jug" +} +item { + name: "/m/0fx9l" + id: 409 + display_name: "Microwave oven" +} +item { + name: "/m/01h44" + id: 410 + display_name: "Bat" +} +item { + name: "/m/05n4y" + id: 411 + display_name: "Ostrich" +} +item { + name: "/m/0jly1" + id: 412 + display_name: "Turkey" +} +item { + name: "/m/06y5r" + id: 413 + display_name: "Sword" +} +item { + name: "/m/05ctyq" + id: 414 + display_name: "Tennis ball" +} +item { + name: "/m/0fp6w" + id: 415 + display_name: "Pineapple" +} +item { + name: "/m/0d4w1" + id: 416 + display_name: "Closet" +} +item { + name: "/m/02pv19" + id: 417 + display_name: "Stop sign" +} +item { + name: "/m/07crc" + id: 418 + display_name: "Taco" +} +item { + name: "/m/01dwwc" + id: 419 + display_name: "Pancake" +} +item { + name: "/m/01b9xk" + id: 420 + display_name: "Hot dog" +} +item { + name: "/m/013y1f" + id: 421 + display_name: "Organ" +} +item { + name: "/m/0m53l" + id: 422 + display_name: "Rays and skates" +} +item { + name: "/m/0174k2" + id: 423 + display_name: "Washing machine" +} +item { + name: "/m/01dwsz" + id: 424 + display_name: "Waffle" +} +item { + name: "/m/04vv5k" + id: 425 + display_name: "Snowplow" +} +item { + name: "/m/04cp_" + id: 426 + display_name: "Koala" +} +item { + name: "/m/0fz0h" + id: 427 + display_name: "Honeycomb" +} +item { + name: "/m/0llzx" + id: 428 + display_name: "Sewing machine" +} +item { + name: "/m/0319l" + id: 429 + display_name: "Horn" +} +item { + name: "/m/04v6l4" + id: 430 + display_name: "Frying pan" +} +item { + name: "/m/0dkzw" + id: 431 + display_name: "Seat belt" +} +item { + name: "/m/027pcv" + id: 432 + display_name: "Zucchini" +} +item { + name: "/m/0323sq" + id: 433 + display_name: "Golf cart" +} +item { + name: "/m/054fyh" + id: 434 + display_name: "Pitcher" +} +item { + name: "/m/01pns0" + id: 435 + display_name: "Fire hydrant" +} +item { + name: "/m/012n7d" + id: 436 + display_name: "Ambulance" +} +item { + name: "/m/044r5d" + id: 437 + display_name: "Golf ball" +} +item { + name: "/m/01krhy" + id: 438 + display_name: "Tiara" +} +item { + name: "/m/0dq75" + id: 439 + display_name: "Raccoon" +} +item { + name: "/m/0176mf" + id: 440 + display_name: "Belt" +} +item { + name: "/m/0h8lkj8" + id: 441 + display_name: "Corded phone" +} +item { + name: "/m/04tn4x" + id: 442 + display_name: "Swim cap" +} +item { + name: "/m/06l9r" + id: 443 + display_name: "Red panda" +} +item { + name: "/m/0cjs7" + id: 444 + display_name: "Asparagus" +} +item { + name: "/m/01lsmm" + id: 445 + display_name: "Scissors" +} +item { + name: "/m/01lcw4" + id: 446 + display_name: "Limousine" +} +item { + name: "/m/047j0r" + id: 447 + display_name: "Filing cabinet" +} +item { + name: "/m/01fb_0" + id: 448 + display_name: "Bagel" +} +item { + name: "/m/04169hn" + id: 449 + display_name: "Wood-burning stove" +} +item { + name: "/m/076bq" + id: 450 + display_name: "Segway" +} +item { + name: "/m/0hdln" + id: 451 + display_name: "Ruler" +} +item { + name: "/m/01g3x7" + id: 452 + display_name: "Bow and arrow" +} +item { + name: "/m/0l3ms" + id: 453 + display_name: "Balance beam" +} +item { + name: "/m/058qzx" + id: 454 + display_name: "Kitchen knife" +} +item { + name: "/m/0h8n6ft" + id: 455 + display_name: "Cake stand" +} +item { + name: "/m/018j2" + id: 456 + display_name: "Banjo" +} +item { + name: "/m/0l14j_" + id: 457 + display_name: "Flute" +} +item { + name: "/m/0wdt60w" + id: 458 + display_name: "Rugby ball" +} +item { + name: "/m/02gzp" + id: 459 + display_name: "Dagger" +} +item { + name: "/m/0h8n6f9" + id: 460 + display_name: "Dog bed" +} +item { + name: "/m/0fbw6" + id: 461 + display_name: "Cabbage" +} +item { + name: "/m/07kng9" + id: 462 + display_name: "Picnic basket" +} +item { + name: "/m/0dj6p" + id: 463 + display_name: "Peach" +} +item { + name: "/m/06pcq" + id: 464 + display_name: "Submarine sandwich" +} +item { + name: "/m/061_f" + id: 465 + display_name: "Pear" +} +item { + name: "/m/04g2r" + id: 466 + display_name: "Lynx" +} +item { + name: "/m/0jwn_" + id: 467 + display_name: "Pomegranate" +} +item { + name: "/m/02f9f_" + id: 468 + display_name: "Shower" +} +item { + name: "/m/01f8m5" + id: 469 + display_name: "Blue jay" +} +item { + name: "/m/01m4t" + id: 470 + display_name: "Printer" +} +item { + name: "/m/0cl4p" + id: 471 + display_name: "Hedgehog" +} +item { + name: "/m/07xyvk" + id: 472 + display_name: "Coffeemaker" +} +item { + name: "/m/084hf" + id: 473 + display_name: "Worm" +} +item { + name: "/m/03v5tg" + id: 474 + display_name: "Drinking straw" +} +item { + name: "/m/0qjjc" + id: 475 + display_name: "Remote control" +} +item { + name: "/m/015x5n" + id: 476 + display_name: "Radish" +} +item { + name: "/m/0ccs93" + id: 477 + display_name: "Canary" +} +item { + name: "/m/0nybt" + id: 478 + display_name: "Seahorse" +} +item { + name: "/m/02vkqh8" + id: 479 + display_name: "Wardrobe" +} +item { + name: "/m/09gtd" + id: 480 + display_name: "Toilet paper" +} +item { + name: "/m/019h78" + id: 481 + display_name: "Centipede" +} +item { + name: "/m/015wgc" + id: 482 + display_name: "Croissant" +} +item { + name: "/m/01x3jk" + id: 483 + display_name: "Snowmobile" +} +item { + name: "/m/01j3zr" + id: 484 + display_name: "Burrito" +} +item { + name: "/m/0c568" + id: 485 + display_name: "Porcupine" +} +item { + name: "/m/02pdsw" + id: 486 + display_name: "Cutting board" +} +item { + name: "/m/029b3" + id: 487 + display_name: "Dice" +} +item { + name: "/m/03q5t" + id: 488 + display_name: "Harpsichord" +} +item { + name: "/m/0p833" + id: 489 + display_name: "Perfume" +} +item { + name: "/m/01d380" + id: 490 + display_name: "Drill" +} +item { + name: "/m/024d2" + id: 491 + display_name: "Calculator" +} +item { + name: "/m/0mw_6" + id: 492 + display_name: "Willow" +} +item { + name: "/m/01f91_" + id: 493 + display_name: "Pretzel" +} +item { + name: "/m/02g30s" + id: 494 + display_name: "Guacamole" +} +item { + name: "/m/01hrv5" + id: 495 + display_name: "Popcorn" +} +item { + name: "/m/03m5k" + id: 496 + display_name: "Harp" +} +item { + name: "/m/0162_1" + id: 497 + display_name: "Towel" +} +item { + name: "/m/063rgb" + id: 498 + display_name: "Mixer" +} +item { + name: "/m/06_72j" + id: 499 + display_name: "Digital clock" +} +item { + name: "/m/046dlr" + id: 500 + display_name: "Alarm clock" +} +item { + name: "/m/047v4b" + id: 501 + display_name: "Artichoke" +} +item { + name: "/m/04zpv" + id: 502 + display_name: "Milk" +} +item { + name: "/m/043nyj" + id: 503 + display_name: "Common fig" +} +item { + name: "/m/03bbps" + id: 504 + display_name: "Power plugs and sockets" +} +item { + name: "/m/02w3r3" + id: 505 + display_name: "Paper towel" +} +item { + name: "/m/02pjr4" + id: 506 + display_name: "Blender" +} +item { + name: "/m/0755b" + id: 507 + display_name: "Scorpion" +} +item { + name: "/m/02lbcq" + id: 508 + display_name: "Stretcher" +} +item { + name: "/m/0fldg" + id: 509 + display_name: "Mango" +} +item { + name: "/m/012074" + id: 510 + display_name: "Magpie" +} +item { + name: "/m/035vxb" + id: 511 + display_name: "Isopod" +} +item { + name: "/m/02w3_ws" + id: 512 + display_name: "Personal care" +} +item { + name: "/m/0f6nr" + id: 513 + display_name: "Unicycle" +} +item { + name: "/m/0420v5" + id: 514 + display_name: "Punching bag" +} +item { + name: "/m/0frqm" + id: 515 + display_name: "Envelope" +} +item { + name: "/m/03txqz" + id: 516 + display_name: "Scale" +} +item { + name: "/m/0271qf7" + id: 517 + display_name: "Wine rack" +} +item { + name: "/m/074d1" + id: 518 + display_name: "Submarine" +} +item { + name: "/m/08p92x" + id: 519 + display_name: "Cream" +} +item { + name: "/m/01j4z9" + id: 520 + display_name: "Chainsaw" +} +item { + name: "/m/0kpt_" + id: 521 + display_name: "Cantaloupe" +} +item { + name: "/m/0h8n27j" + id: 522 + display_name: "Serving tray" +} +item { + name: "/m/03y6mg" + id: 523 + display_name: "Food processor" +} +item { + name: "/m/04h8sr" + id: 524 + display_name: "Dumbbell" +} +item { + name: "/m/065h6l" + id: 525 + display_name: "Jacuzzi" +} +item { + name: "/m/02tsc9" + id: 526 + display_name: "Slow cooker" +} +item { + name: "/m/012ysf" + id: 527 + display_name: "Syringe" +} +item { + name: "/m/0ky7b" + id: 528 + display_name: "Dishwasher" +} +item { + name: "/m/02wg_p" + id: 529 + display_name: "Tree house" +} +item { + name: "/m/0584n8" + id: 530 + display_name: "Briefcase" +} +item { + name: "/m/03kt2w" + id: 531 + display_name: "Stationary bicycle" +} +item { + name: "/m/05kms" + id: 532 + display_name: "Oboe" +} +item { + name: "/m/030610" + id: 533 + display_name: "Treadmill" +} +item { + name: "/m/0lt4_" + id: 534 + display_name: "Binoculars" +} +item { + name: "/m/076lb9" + id: 535 + display_name: "Bench" +} +item { + name: "/m/02ctlc" + id: 536 + display_name: "Cricket ball" +} +item { + name: "/m/02x8cch" + id: 537 + display_name: "Salt and pepper shakers" +} +item { + name: "/m/09gys" + id: 538 + display_name: "Squid" +} +item { + name: "/m/03jbxj" + id: 539 + display_name: "Light switch" +} +item { + name: "/m/012xff" + id: 540 + display_name: "Toothbrush" +} +item { + name: "/m/0h8kx63" + id: 541 + display_name: "Spice rack" +} +item { + name: "/m/073g6" + id: 542 + display_name: "Stethoscope" +} +item { + name: "/m/02cvgx" + id: 543 + display_name: "Winter melon" +} +item { + name: "/m/027rl48" + id: 544 + display_name: "Ladle" +} +item { + name: "/m/01kb5b" + id: 545 + display_name: "Flashlight" +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..044f6d4c813729a693cac761f43a2246e07f7b6a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt @@ -0,0 +1,2500 @@ +item { + name: "/m/061hd_" + id: 1 + display_name: "Infant bed" +} +item { + name: "/m/06m11" + id: 2 + display_name: "Rose" +} +item { + name: "/m/03120" + id: 3 + display_name: "Flag" +} +item { + name: "/m/01kb5b" + id: 4 + display_name: "Flashlight" +} +item { + name: "/m/0120dh" + id: 5 + display_name: "Sea turtle" +} +item { + name: "/m/0dv5r" + id: 6 + display_name: "Camera" +} +item { + name: "/m/0jbk" + id: 7 + display_name: "Animal" +} +item { + name: "/m/0174n1" + id: 8 + display_name: "Glove" +} +item { + name: "/m/09f_2" + id: 9 + display_name: "Crocodile" +} +item { + name: "/m/01xq0k1" + id: 10 + display_name: "Cattle" +} +item { + name: "/m/03jm5" + id: 11 + display_name: "House" +} +item { + name: "/m/02g30s" + id: 12 + display_name: "Guacamole" +} +item { + name: "/m/05z6w" + id: 13 + display_name: "Penguin" +} +item { + name: "/m/01jfm_" + id: 14 + display_name: "Vehicle registration plate" +} +item { + name: "/m/076lb9" + id: 15 + display_name: "Training bench" +} +item { + name: "/m/0gj37" + id: 16 + display_name: "Ladybug" +} +item { + name: "/m/0k0pj" + id: 17 + display_name: "Human nose" +} +item { + name: "/m/0kpqd" + id: 18 + display_name: "Watermelon" +} +item { + name: "/m/0l14j_" + id: 19 + display_name: "Flute" +} +item { + name: "/m/0cyf8" + id: 20 + display_name: "Butterfly" +} +item { + name: "/m/0174k2" + id: 21 + display_name: "Washing machine" +} +item { + name: "/m/0dq75" + id: 22 + display_name: "Raccoon" +} +item { + name: "/m/076bq" + id: 23 + display_name: "Segway" +} +item { + name: "/m/07crc" + id: 24 + display_name: "Taco" +} +item { + name: "/m/0d8zb" + id: 25 + display_name: "Jellyfish" +} +item { + name: "/m/0fszt" + id: 26 + display_name: "Cake" +} +item { + name: "/m/0k1tl" + id: 27 + display_name: "Pen" +} +item { + name: "/m/020kz" + id: 28 + display_name: "Cannon" +} +item { + name: "/m/09728" + id: 29 + display_name: "Bread" +} +item { + name: "/m/07j7r" + id: 30 + display_name: "Tree" +} +item { + name: "/m/0fbdv" + id: 31 + display_name: "Shellfish" +} +item { + name: "/m/03ssj5" + id: 32 + display_name: "Bed" +} +item { + name: "/m/03qrc" + id: 33 + display_name: "Hamster" +} +item { + name: "/m/02dl1y" + id: 34 + display_name: "Hat" +} +item { + name: "/m/01k6s3" + id: 35 + display_name: "Toaster" +} +item { + name: "/m/02jfl0" + id: 36 + display_name: "Sombrero" +} +item { + name: "/m/01krhy" + id: 37 + display_name: "Tiara" +} +item { + name: "/m/04kkgm" + id: 38 + display_name: "Bowl" +} +item { + name: "/m/0ft9s" + id: 39 + display_name: "Dragonfly" +} +item { + name: "/m/0d_2m" + id: 40 + display_name: "Moths and butterflies" +} +item { + name: "/m/0czz2" + id: 41 + display_name: "Antelope" +} +item { + name: "/m/0f4s2w" + id: 42 + display_name: "Vegetable" +} +item { + name: "/m/07dd4" + id: 43 + display_name: "Torch" +} +item { + name: "/m/0cgh4" + id: 44 + display_name: "Building" +} +item { + name: "/m/03bbps" + id: 45 + display_name: "Power plugs and sockets" +} +item { + name: "/m/02pjr4" + id: 46 + display_name: "Blender" +} +item { + name: "/m/04p0qw" + id: 47 + display_name: "Billiard table" +} +item { + name: "/m/02pdsw" + id: 48 + display_name: "Cutting board" +} +item { + name: "/m/01yx86" + id: 49 + display_name: "Bronze sculpture" +} +item { + name: "/m/09dzg" + id: 50 + display_name: "Turtle" +} +item { + name: "/m/0hkxq" + id: 51 + display_name: "Broccoli" +} +item { + name: "/m/07dm6" + id: 52 + display_name: "Tiger" +} +item { + name: "/m/054_l" + id: 53 + display_name: "Mirror" +} +item { + name: "/m/01dws" + id: 54 + display_name: "Bear" +} +item { + name: "/m/027pcv" + id: 55 + display_name: "Zucchini" +} +item { + name: "/m/01d40f" + id: 56 + display_name: "Dress" +} +item { + name: "/m/02rgn06" + id: 57 + display_name: "Volleyball" +} +item { + name: "/m/0342h" + id: 58 + display_name: "Guitar" +} +item { + name: "/m/06bt6" + id: 59 + display_name: "Reptile" +} +item { + name: "/m/0323sq" + id: 60 + display_name: "Golf cart" +} +item { + name: "/m/02zvsm" + id: 61 + display_name: "Tart" +} +item { + name: "/m/02fq_6" + id: 62 + display_name: "Fedora" +} +item { + name: "/m/01lrl" + id: 63 + display_name: "Carnivore" +} +item { + name: "/m/0k4j" + id: 64 + display_name: "Car" +} +item { + name: "/m/04h7h" + id: 65 + display_name: "Lighthouse" +} +item { + name: "/m/07xyvk" + id: 66 + display_name: "Coffeemaker" +} +item { + name: "/m/03y6mg" + id: 67 + display_name: "Food processor" +} +item { + name: "/m/07r04" + id: 68 + display_name: "Truck" +} +item { + name: "/m/03__z0" + id: 69 + display_name: "Bookcase" +} +item { + name: "/m/019w40" + id: 70 + display_name: "Surfboard" +} +item { + name: "/m/09j5n" + id: 71 + display_name: "Footwear" +} +item { + name: "/m/0cvnqh" + id: 72 + display_name: "Bench" +} +item { + name: "/m/01llwg" + id: 73 + display_name: "Necklace" +} +item { + name: "/m/0c9ph5" + id: 74 + display_name: "Flower" +} +item { + name: "/m/015x5n" + id: 75 + display_name: "Radish" +} +item { + name: "/m/0gd2v" + id: 76 + display_name: "Marine mammal" +} +item { + name: "/m/04v6l4" + id: 77 + display_name: "Frying pan" +} +item { + name: "/m/02jz0l" + id: 78 + display_name: "Tap" +} +item { + name: "/m/0dj6p" + id: 79 + display_name: "Peach" +} +item { + name: "/m/04ctx" + id: 80 + display_name: "Knife" +} +item { + name: "/m/080hkjn" + id: 81 + display_name: "Handbag" +} +item { + name: "/m/01c648" + id: 82 + display_name: "Laptop" +} +item { + name: "/m/01j61q" + id: 83 + display_name: "Tent" +} +item { + name: "/m/012n7d" + id: 84 + display_name: "Ambulance" +} +item { + name: "/m/025nd" + id: 85 + display_name: "Christmas tree" +} +item { + name: "/m/09csl" + id: 86 + display_name: "Eagle" +} +item { + name: "/m/01lcw4" + id: 87 + display_name: "Limousine" +} +item { + name: "/m/0h8n5zk" + id: 88 + display_name: "Kitchen & dining room table" +} +item { + name: "/m/0633h" + id: 89 + display_name: "Polar bear" +} +item { + name: "/m/01fdzj" + id: 90 + display_name: "Tower" +} +item { + name: "/m/01226z" + id: 91 + display_name: "Football" +} +item { + name: "/m/0mw_6" + id: 92 + display_name: "Willow" +} +item { + name: "/m/04hgtk" + id: 93 + display_name: "Human head" +} +item { + name: "/m/02pv19" + id: 94 + display_name: "Stop sign" +} +item { + name: "/m/09qck" + id: 95 + display_name: "Banana" +} +item { + name: "/m/063rgb" + id: 96 + display_name: "Mixer" +} +item { + name: "/m/0lt4_" + id: 97 + display_name: "Binoculars" +} +item { + name: "/m/0270h" + id: 98 + display_name: "Dessert" +} +item { + name: "/m/01h3n" + id: 99 + display_name: "Bee" +} +item { + name: "/m/01mzpv" + id: 100 + display_name: "Chair" +} +item { + name: "/m/04169hn" + id: 101 + display_name: "Wood-burning stove" +} +item { + name: "/m/0fm3zh" + id: 102 + display_name: "Flowerpot" +} +item { + name: "/m/0d20w4" + id: 103 + display_name: "Beaker" +} +item { + name: "/m/0_cp5" + id: 104 + display_name: "Oyster" +} +item { + name: "/m/01dy8n" + id: 105 + display_name: "Woodpecker" +} +item { + name: "/m/03m5k" + id: 106 + display_name: "Harp" +} +item { + name: "/m/03dnzn" + id: 107 + display_name: "Bathtub" +} +item { + name: "/m/0h8mzrc" + id: 108 + display_name: "Wall clock" +} +item { + name: "/m/0h8mhzd" + id: 109 + display_name: "Sports uniform" +} +item { + name: "/m/03d443" + id: 110 + display_name: "Rhinoceros" +} +item { + name: "/m/01gllr" + id: 111 + display_name: "Beehive" +} +item { + name: "/m/0642b4" + id: 112 + display_name: "Cupboard" +} +item { + name: "/m/09b5t" + id: 113 + display_name: "Chicken" +} +item { + name: "/m/04yx4" + id: 114 + display_name: "Man" +} +item { + name: "/m/01f8m5" + id: 115 + display_name: "Blue jay" +} +item { + name: "/m/015x4r" + id: 116 + display_name: "Cucumber" +} +item { + name: "/m/01j51" + id: 117 + display_name: "Balloon" +} +item { + name: "/m/02zt3" + id: 118 + display_name: "Kite" +} +item { + name: "/m/03tw93" + id: 119 + display_name: "Fireplace" +} +item { + name: "/m/01jfsr" + id: 120 + display_name: "Lantern" +} +item { + name: "/m/04ylt" + id: 121 + display_name: "Missile" +} +item { + name: "/m/0bt_c3" + id: 122 + display_name: "Book" +} +item { + name: "/m/0cmx8" + id: 123 + display_name: "Spoon" +} +item { + name: "/m/0hqkz" + id: 124 + display_name: "Grapefruit" +} +item { + name: "/m/071qp" + id: 125 + display_name: "Squirrel" +} +item { + name: "/m/0cyhj_" + id: 126 + display_name: "Orange" +} +item { + name: "/m/01xygc" + id: 127 + display_name: "Coat" +} +item { + name: "/m/0420v5" + id: 128 + display_name: "Punching bag" +} +item { + name: "/m/0898b" + id: 129 + display_name: "Zebra" +} +item { + name: "/m/01knjb" + id: 130 + display_name: "Billboard" +} +item { + name: "/m/0199g" + id: 131 + display_name: "Bicycle" +} +item { + name: "/m/03c7gz" + id: 132 + display_name: "Door handle" +} +item { + name: "/m/02x984l" + id: 133 + display_name: "Mechanical fan" +} +item { + name: "/m/04zwwv" + id: 134 + display_name: "Ring binder" +} +item { + name: "/m/04bcr3" + id: 135 + display_name: "Table" +} +item { + name: "/m/0gv1x" + id: 136 + display_name: "Parrot" +} +item { + name: "/m/01nq26" + id: 137 + display_name: "Sock" +} +item { + name: "/m/02s195" + id: 138 + display_name: "Vase" +} +item { + name: "/m/083kb" + id: 139 + display_name: "Weapon" +} +item { + name: "/m/06nrc" + id: 140 + display_name: "Shotgun" +} +item { + name: "/m/0jyfg" + id: 141 + display_name: "Glasses" +} +item { + name: "/m/0nybt" + id: 142 + display_name: "Seahorse" +} +item { + name: "/m/0176mf" + id: 143 + display_name: "Belt" +} +item { + name: "/m/01rzcn" + id: 144 + display_name: "Watercraft" +} +item { + name: "/m/0d4v4" + id: 145 + display_name: "Window" +} +item { + name: "/m/03bk1" + id: 146 + display_name: "Giraffe" +} +item { + name: "/m/096mb" + id: 147 + display_name: "Lion" +} +item { + name: "/m/0h9mv" + id: 148 + display_name: "Tire" +} +item { + name: "/m/07yv9" + id: 149 + display_name: "Vehicle" +} +item { + name: "/m/0ph39" + id: 150 + display_name: "Canoe" +} +item { + name: "/m/01rkbr" + id: 151 + display_name: "Tie" +} +item { + name: "/m/0gjbg72" + id: 152 + display_name: "Shelf" +} +item { + name: "/m/06z37_" + id: 153 + display_name: "Picture frame" +} +item { + name: "/m/01m4t" + id: 154 + display_name: "Printer" +} +item { + name: "/m/035r7c" + id: 155 + display_name: "Human leg" +} +item { + name: "/m/019jd" + id: 156 + display_name: "Boat" +} +item { + name: "/m/02tsc9" + id: 157 + display_name: "Slow cooker" +} +item { + name: "/m/015wgc" + id: 158 + display_name: "Croissant" +} +item { + name: "/m/0c06p" + id: 159 + display_name: "Candle" +} +item { + name: "/m/01dwwc" + id: 160 + display_name: "Pancake" +} +item { + name: "/m/034c16" + id: 161 + display_name: "Pillow" +} +item { + name: "/m/0242l" + id: 162 + display_name: "Coin" +} +item { + name: "/m/02lbcq" + id: 163 + display_name: "Stretcher" +} +item { + name: "/m/03nfch" + id: 164 + display_name: "Sandal" +} +item { + name: "/m/03bt1vf" + id: 165 + display_name: "Woman" +} +item { + name: "/m/01lynh" + id: 166 + display_name: "Stairs" +} +item { + name: "/m/03q5t" + id: 167 + display_name: "Harpsichord" +} +item { + name: "/m/0fqt361" + id: 168 + display_name: "Stool" +} +item { + name: "/m/01bjv" + id: 169 + display_name: "Bus" +} +item { + name: "/m/01s55n" + id: 170 + display_name: "Suitcase" +} +item { + name: "/m/0283dt1" + id: 171 + display_name: "Human mouth" +} +item { + name: "/m/01z1kdw" + id: 172 + display_name: "Juice" +} +item { + name: "/m/016m2d" + id: 173 + display_name: "Skull" +} +item { + name: "/m/02dgv" + id: 174 + display_name: "Door" +} +item { + name: "/m/07y_7" + id: 175 + display_name: "Violin" +} +item { + name: "/m/01_5g" + id: 176 + display_name: "Chopsticks" +} +item { + name: "/m/06_72j" + id: 177 + display_name: "Digital clock" +} +item { + name: "/m/0ftb8" + id: 178 + display_name: "Sunflower" +} +item { + name: "/m/0c29q" + id: 179 + display_name: "Leopard" +} +item { + name: "/m/0jg57" + id: 180 + display_name: "Bell pepper" +} +item { + name: "/m/02l8p9" + id: 181 + display_name: "Harbor seal" +} +item { + name: "/m/078jl" + id: 182 + display_name: "Snake" +} +item { + name: "/m/0llzx" + id: 183 + display_name: "Sewing machine" +} +item { + name: "/m/0dbvp" + id: 184 + display_name: "Goose" +} +item { + name: "/m/09ct_" + id: 185 + display_name: "Helicopter" +} +item { + name: "/m/0dkzw" + id: 186 + display_name: "Seat belt" +} +item { + name: "/m/02p5f1q" + id: 187 + display_name: "Coffee cup" +} +item { + name: "/m/0fx9l" + id: 188 + display_name: "Microwave oven" +} +item { + name: "/m/01b9xk" + id: 189 + display_name: "Hot dog" +} +item { + name: "/m/0b3fp9" + id: 190 + display_name: "Countertop" +} +item { + name: "/m/0h8n27j" + id: 191 + display_name: "Serving tray" +} +item { + name: "/m/0h8n6f9" + id: 192 + display_name: "Dog bed" +} +item { + name: "/m/01599" + id: 193 + display_name: "Beer" +} +item { + name: "/m/017ftj" + id: 194 + display_name: "Sunglasses" +} +item { + name: "/m/044r5d" + id: 195 + display_name: "Golf ball" +} +item { + name: "/m/01dwsz" + id: 196 + display_name: "Waffle" +} +item { + name: "/m/0cdl1" + id: 197 + display_name: "Palm tree" +} +item { + name: "/m/07gql" + id: 198 + display_name: "Trumpet" +} +item { + name: "/m/0hdln" + id: 199 + display_name: "Ruler" +} +item { + name: "/m/0zvk5" + id: 200 + display_name: "Helmet" +} +item { + name: "/m/012w5l" + id: 201 + display_name: "Ladder" +} +item { + name: "/m/021sj1" + id: 202 + display_name: "Office building" +} +item { + name: "/m/0bh9flk" + id: 203 + display_name: "Tablet computer" +} +item { + name: "/m/09gtd" + id: 204 + display_name: "Toilet paper" +} +item { + name: "/m/0jwn_" + id: 205 + display_name: "Pomegranate" +} +item { + name: "/m/02wv6h6" + id: 206 + display_name: "Skirt" +} +item { + name: "/m/02wv84t" + id: 207 + display_name: "Gas stove" +} +item { + name: "/m/021mn" + id: 208 + display_name: "Cookie" +} +item { + name: "/m/018p4k" + id: 209 + display_name: "Cart" +} +item { + name: "/m/06j2d" + id: 210 + display_name: "Raven" +} +item { + name: "/m/033cnk" + id: 211 + display_name: "Egg" +} +item { + name: "/m/01j3zr" + id: 212 + display_name: "Burrito" +} +item { + name: "/m/03fwl" + id: 213 + display_name: "Goat" +} +item { + name: "/m/058qzx" + id: 214 + display_name: "Kitchen knife" +} +item { + name: "/m/06_fw" + id: 215 + display_name: "Skateboard" +} +item { + name: "/m/02x8cch" + id: 216 + display_name: "Salt and pepper shakers" +} +item { + name: "/m/04g2r" + id: 217 + display_name: "Lynx" +} +item { + name: "/m/01b638" + id: 218 + display_name: "Boot" +} +item { + name: "/m/099ssp" + id: 219 + display_name: "Platter" +} +item { + name: "/m/071p9" + id: 220 + display_name: "Ski" +} +item { + name: "/m/01gkx_" + id: 221 + display_name: "Swimwear" +} +item { + name: "/m/0b_rs" + id: 222 + display_name: "Swimming pool" +} +item { + name: "/m/03v5tg" + id: 223 + display_name: "Drinking straw" +} +item { + name: "/m/01j5ks" + id: 224 + display_name: "Wrench" +} +item { + name: "/m/026t6" + id: 225 + display_name: "Drum" +} +item { + name: "/m/0_k2" + id: 226 + display_name: "Ant" +} +item { + name: "/m/039xj_" + id: 227 + display_name: "Human ear" +} +item { + name: "/m/01b7fy" + id: 228 + display_name: "Headphones" +} +item { + name: "/m/0220r2" + id: 229 + display_name: "Fountain" +} +item { + name: "/m/015p6" + id: 230 + display_name: "Bird" +} +item { + name: "/m/0fly7" + id: 231 + display_name: "Jeans" +} +item { + name: "/m/07c52" + id: 232 + display_name: "Television" +} +item { + name: "/m/0n28_" + id: 233 + display_name: "Crab" +} +item { + name: "/m/0hg7b" + id: 234 + display_name: "Microphone" +} +item { + name: "/m/019dx1" + id: 235 + display_name: "Home appliance" +} +item { + name: "/m/04vv5k" + id: 236 + display_name: "Snowplow" +} +item { + name: "/m/020jm" + id: 237 + display_name: "Beetle" +} +item { + name: "/m/047v4b" + id: 238 + display_name: "Artichoke" +} +item { + name: "/m/01xs3r" + id: 239 + display_name: "Jet ski" +} +item { + name: "/m/03kt2w" + id: 240 + display_name: "Stationary bicycle" +} +item { + name: "/m/03q69" + id: 241 + display_name: "Human hair" +} +item { + name: "/m/01dxs" + id: 242 + display_name: "Brown bear" +} +item { + name: "/m/01h8tj" + id: 243 + display_name: "Starfish" +} +item { + name: "/m/0dt3t" + id: 244 + display_name: "Fork" +} +item { + name: "/m/0cjq5" + id: 245 + display_name: "Lobster" +} +item { + name: "/m/0h8lkj8" + id: 246 + display_name: "Corded phone" +} +item { + name: "/m/0271t" + id: 247 + display_name: "Drink" +} +item { + name: "/m/03q5c7" + id: 248 + display_name: "Saucer" +} +item { + name: "/m/0fj52s" + id: 249 + display_name: "Carrot" +} +item { + name: "/m/03vt0" + id: 250 + display_name: "Insect" +} +item { + name: "/m/01x3z" + id: 251 + display_name: "Clock" +} +item { + name: "/m/0d5gx" + id: 252 + display_name: "Castle" +} +item { + name: "/m/0h8my_4" + id: 253 + display_name: "Tennis racket" +} +item { + name: "/m/03ldnb" + id: 254 + display_name: "Ceiling fan" +} +item { + name: "/m/0cjs7" + id: 255 + display_name: "Asparagus" +} +item { + name: "/m/0449p" + id: 256 + display_name: "Jaguar" +} +item { + name: "/m/04szw" + id: 257 + display_name: "Musical instrument" +} +item { + name: "/m/07jdr" + id: 258 + display_name: "Train" +} +item { + name: "/m/01yrx" + id: 259 + display_name: "Cat" +} +item { + name: "/m/06c54" + id: 260 + display_name: "Rifle" +} +item { + name: "/m/04h8sr" + id: 261 + display_name: "Dumbbell" +} +item { + name: "/m/050k8" + id: 262 + display_name: "Mobile phone" +} +item { + name: "/m/0pg52" + id: 263 + display_name: "Taxi" +} +item { + name: "/m/02f9f_" + id: 264 + display_name: "Shower" +} +item { + name: "/m/054fyh" + id: 265 + display_name: "Pitcher" +} +item { + name: "/m/09k_b" + id: 266 + display_name: "Lemon" +} +item { + name: "/m/03xxp" + id: 267 + display_name: "Invertebrate" +} +item { + name: "/m/0jly1" + id: 268 + display_name: "Turkey" +} +item { + name: "/m/06k2mb" + id: 269 + display_name: "High heels" +} +item { + name: "/m/04yqq2" + id: 270 + display_name: "Bust" +} +item { + name: "/m/0bwd_0j" + id: 271 + display_name: "Elephant" +} +item { + name: "/m/02h19r" + id: 272 + display_name: "Scarf" +} +item { + name: "/m/02zn6n" + id: 273 + display_name: "Barrel" +} +item { + name: "/m/07c6l" + id: 274 + display_name: "Trombone" +} +item { + name: "/m/05zsy" + id: 275 + display_name: "Pumpkin" +} +item { + name: "/m/025dyy" + id: 276 + display_name: "Box" +} +item { + name: "/m/07j87" + id: 277 + display_name: "Tomato" +} +item { + name: "/m/09ld4" + id: 278 + display_name: "Frog" +} +item { + name: "/m/01vbnl" + id: 279 + display_name: "Bidet" +} +item { + name: "/m/0dzct" + id: 280 + display_name: "Human face" +} +item { + name: "/m/03fp41" + id: 281 + display_name: "Houseplant" +} +item { + name: "/m/0h2r6" + id: 282 + display_name: "Van" +} +item { + name: "/m/0by6g" + id: 283 + display_name: "Shark" +} +item { + name: "/m/0cxn2" + id: 284 + display_name: "Ice cream" +} +item { + name: "/m/04tn4x" + id: 285 + display_name: "Swim cap" +} +item { + name: "/m/0f6wt" + id: 286 + display_name: "Falcon" +} +item { + name: "/m/05n4y" + id: 287 + display_name: "Ostrich" +} +item { + name: "/m/0gxl3" + id: 288 + display_name: "Handgun" +} +item { + name: "/m/02d9qx" + id: 289 + display_name: "Whiteboard" +} +item { + name: "/m/04m9y" + id: 290 + display_name: "Lizard" +} +item { + name: "/m/05z55" + id: 291 + display_name: "Pasta" +} +item { + name: "/m/01x3jk" + id: 292 + display_name: "Snowmobile" +} +item { + name: "/m/0h8l4fh" + id: 293 + display_name: "Light bulb" +} +item { + name: "/m/031b6r" + id: 294 + display_name: "Window blind" +} +item { + name: "/m/01tcjp" + id: 295 + display_name: "Muffin" +} +item { + name: "/m/01f91_" + id: 296 + display_name: "Pretzel" +} +item { + name: "/m/02522" + id: 297 + display_name: "Computer monitor" +} +item { + name: "/m/0319l" + id: 298 + display_name: "Horn" +} +item { + name: "/m/0c_jw" + id: 299 + display_name: "Furniture" +} +item { + name: "/m/0l515" + id: 300 + display_name: "Sandwich" +} +item { + name: "/m/0306r" + id: 301 + display_name: "Fox" +} +item { + name: "/m/0crjs" + id: 302 + display_name: "Convenience store" +} +item { + name: "/m/0ch_cf" + id: 303 + display_name: "Fish" +} +item { + name: "/m/02xwb" + id: 304 + display_name: "Fruit" +} +item { + name: "/m/01r546" + id: 305 + display_name: "Earrings" +} +item { + name: "/m/03rszm" + id: 306 + display_name: "Curtain" +} +item { + name: "/m/0388q" + id: 307 + display_name: "Grape" +} +item { + name: "/m/03m3pdh" + id: 308 + display_name: "Sofa bed" +} +item { + name: "/m/03k3r" + id: 309 + display_name: "Horse" +} +item { + name: "/m/0hf58v5" + id: 310 + display_name: "Luggage and bags" +} +item { + name: "/m/01y9k5" + id: 311 + display_name: "Desk" +} +item { + name: "/m/05441v" + id: 312 + display_name: "Crutch" +} +item { + name: "/m/03p3bw" + id: 313 + display_name: "Bicycle helmet" +} +item { + name: "/m/0175cv" + id: 314 + display_name: "Tick" +} +item { + name: "/m/0cmf2" + id: 315 + display_name: "Airplane" +} +item { + name: "/m/0ccs93" + id: 316 + display_name: "Canary" +} +item { + name: "/m/02d1br" + id: 317 + display_name: "Spatula" +} +item { + name: "/m/0gjkl" + id: 318 + display_name: "Watch" +} +item { + name: "/m/0jqgx" + id: 319 + display_name: "Lily" +} +item { + name: "/m/0h99cwc" + id: 320 + display_name: "Kitchen appliance" +} +item { + name: "/m/047j0r" + id: 321 + display_name: "Filing cabinet" +} +item { + name: "/m/0k5j" + id: 322 + display_name: "Aircraft" +} +item { + name: "/m/0h8n6ft" + id: 323 + display_name: "Cake stand" +} +item { + name: "/m/0gm28" + id: 324 + display_name: "Candy" +} +item { + name: "/m/0130jx" + id: 325 + display_name: "Sink" +} +item { + name: "/m/04rmv" + id: 326 + display_name: "Mouse" +} +item { + name: "/m/081qc" + id: 327 + display_name: "Wine" +} +item { + name: "/m/0qmmr" + id: 328 + display_name: "Wheelchair" +} +item { + name: "/m/03fj2" + id: 329 + display_name: "Goldfish" +} +item { + name: "/m/040b_t" + id: 330 + display_name: "Refrigerator" +} +item { + name: "/m/02y6n" + id: 331 + display_name: "French fries" +} +item { + name: "/m/0fqfqc" + id: 332 + display_name: "Drawer" +} +item { + name: "/m/030610" + id: 333 + display_name: "Treadmill" +} +item { + name: "/m/07kng9" + id: 334 + display_name: "Picnic basket" +} +item { + name: "/m/029b3" + id: 335 + display_name: "Dice" +} +item { + name: "/m/0fbw6" + id: 336 + display_name: "Cabbage" +} +item { + name: "/m/07qxg_" + id: 337 + display_name: "Football helmet" +} +item { + name: "/m/068zj" + id: 338 + display_name: "Pig" +} +item { + name: "/m/01g317" + id: 339 + display_name: "Person" +} +item { + name: "/m/01bfm9" + id: 340 + display_name: "Shorts" +} +item { + name: "/m/02068x" + id: 341 + display_name: "Gondola" +} +item { + name: "/m/0fz0h" + id: 342 + display_name: "Honeycomb" +} +item { + name: "/m/0jy4k" + id: 343 + display_name: "Doughnut" +} +item { + name: "/m/05kyg_" + id: 344 + display_name: "Chest of drawers" +} +item { + name: "/m/01prls" + id: 345 + display_name: "Land vehicle" +} +item { + name: "/m/01h44" + id: 346 + display_name: "Bat" +} +item { + name: "/m/08pbxl" + id: 347 + display_name: "Monkey" +} +item { + name: "/m/02gzp" + id: 348 + display_name: "Dagger" +} +item { + name: "/m/04brg2" + id: 349 + display_name: "Tableware" +} +item { + name: "/m/031n1" + id: 350 + display_name: "Human foot" +} +item { + name: "/m/02jvh9" + id: 351 + display_name: "Mug" +} +item { + name: "/m/046dlr" + id: 352 + display_name: "Alarm clock" +} +item { + name: "/m/0h8ntjv" + id: 353 + display_name: "Pressure cooker" +} +item { + name: "/m/0k65p" + id: 354 + display_name: "Human hand" +} +item { + name: "/m/011k07" + id: 355 + display_name: "Tortoise" +} +item { + name: "/m/03grzl" + id: 356 + display_name: "Baseball glove" +} +item { + name: "/m/06y5r" + id: 357 + display_name: "Sword" +} +item { + name: "/m/061_f" + id: 358 + display_name: "Pear" +} +item { + name: "/m/01cmb2" + id: 359 + display_name: "Miniskirt" +} +item { + name: "/m/01mqdt" + id: 360 + display_name: "Traffic sign" +} +item { + name: "/m/05r655" + id: 361 + display_name: "Girl" +} +item { + name: "/m/02p3w7d" + id: 362 + display_name: "Roller skates" +} +item { + name: "/m/029tx" + id: 363 + display_name: "Dinosaur" +} +item { + name: "/m/04m6gz" + id: 364 + display_name: "Porch" +} +item { + name: "/m/015h_t" + id: 365 + display_name: "Human beard" +} +item { + name: "/m/06pcq" + id: 366 + display_name: "Submarine sandwich" +} +item { + name: "/m/01bms0" + id: 367 + display_name: "Screwdriver" +} +item { + name: "/m/07fbm7" + id: 368 + display_name: "Strawberry" +} +item { + name: "/m/09tvcd" + id: 369 + display_name: "Wine glass" +} +item { + name: "/m/06nwz" + id: 370 + display_name: "Seafood" +} +item { + name: "/m/0dv9c" + id: 371 + display_name: "Racket" +} +item { + name: "/m/083wq" + id: 372 + display_name: "Wheel" +} +item { + name: "/m/0gd36" + id: 373 + display_name: "Sea lion" +} +item { + name: "/m/0138tl" + id: 374 + display_name: "Toy" +} +item { + name: "/m/07clx" + id: 375 + display_name: "Tea" +} +item { + name: "/m/05ctyq" + id: 376 + display_name: "Tennis ball" +} +item { + name: "/m/0bjyj5" + id: 377 + display_name: "Waste container" +} +item { + name: "/m/0dbzx" + id: 378 + display_name: "Mule" +} +item { + name: "/m/02ctlc" + id: 379 + display_name: "Cricket ball" +} +item { + name: "/m/0fp6w" + id: 380 + display_name: "Pineapple" +} +item { + name: "/m/0djtd" + id: 381 + display_name: "Coconut" +} +item { + name: "/m/0167gd" + id: 382 + display_name: "Doll" +} +item { + name: "/m/078n6m" + id: 383 + display_name: "Coffee table" +} +item { + name: "/m/0152hh" + id: 384 + display_name: "Snowman" +} +item { + name: "/m/04gth" + id: 385 + display_name: "Lavender" +} +item { + name: "/m/0ll1f78" + id: 386 + display_name: "Shrimp" +} +item { + name: "/m/0cffdh" + id: 387 + display_name: "Maple" +} +item { + name: "/m/025rp__" + id: 388 + display_name: "Cowboy hat" +} +item { + name: "/m/02_n6y" + id: 389 + display_name: "Goggles" +} +item { + name: "/m/0wdt60w" + id: 390 + display_name: "Rugby ball" +} +item { + name: "/m/0cydv" + id: 391 + display_name: "Caterpillar" +} +item { + name: "/m/01n5jq" + id: 392 + display_name: "Poster" +} +item { + name: "/m/09rvcxw" + id: 393 + display_name: "Rocket" +} +item { + name: "/m/013y1f" + id: 394 + display_name: "Organ" +} +item { + name: "/m/06ncr" + id: 395 + display_name: "Saxophone" +} +item { + name: "/m/015qff" + id: 396 + display_name: "Traffic light" +} +item { + name: "/m/024g6" + id: 397 + display_name: "Cocktail" +} +item { + name: "/m/05gqfk" + id: 398 + display_name: "Plastic bag" +} +item { + name: "/m/0dv77" + id: 399 + display_name: "Squash" +} +item { + name: "/m/052sf" + id: 400 + display_name: "Mushroom" +} +item { + name: "/m/0cdn1" + id: 401 + display_name: "Hamburger" +} +item { + name: "/m/03jbxj" + id: 402 + display_name: "Light switch" +} +item { + name: "/m/0cyfs" + id: 403 + display_name: "Parachute" +} +item { + name: "/m/0kmg4" + id: 404 + display_name: "Teddy bear" +} +item { + name: "/m/02cvgx" + id: 405 + display_name: "Winter melon" +} +item { + name: "/m/09kx5" + id: 406 + display_name: "Deer" +} +item { + name: "/m/057cc" + id: 407 + display_name: "Musical keyboard" +} +item { + name: "/m/02pkr5" + id: 408 + display_name: "Plumbing fixture" +} +item { + name: "/m/057p5t" + id: 409 + display_name: "Scoreboard" +} +item { + name: "/m/03g8mr" + id: 410 + display_name: "Baseball bat" +} +item { + name: "/m/0frqm" + id: 411 + display_name: "Envelope" +} +item { + name: "/m/03m3vtv" + id: 412 + display_name: "Adhesive tape" +} +item { + name: "/m/0584n8" + id: 413 + display_name: "Briefcase" +} +item { + name: "/m/014y4n" + id: 414 + display_name: "Paddle" +} +item { + name: "/m/01g3x7" + id: 415 + display_name: "Bow and arrow" +} +item { + name: "/m/07cx4" + id: 416 + display_name: "Telephone" +} +item { + name: "/m/07bgp" + id: 417 + display_name: "Sheep" +} +item { + name: "/m/032b3c" + id: 418 + display_name: "Jacket" +} +item { + name: "/m/01bl7v" + id: 419 + display_name: "Boy" +} +item { + name: "/m/0663v" + id: 420 + display_name: "Pizza" +} +item { + name: "/m/0cn6p" + id: 421 + display_name: "Otter" +} +item { + name: "/m/02rdsp" + id: 422 + display_name: "Office supplies" +} +item { + name: "/m/02crq1" + id: 423 + display_name: "Couch" +} +item { + name: "/m/01xqw" + id: 424 + display_name: "Cello" +} +item { + name: "/m/0cnyhnx" + id: 425 + display_name: "Bull" +} +item { + name: "/m/01x_v" + id: 426 + display_name: "Camel" +} +item { + name: "/m/018xm" + id: 427 + display_name: "Ball" +} +item { + name: "/m/09ddx" + id: 428 + display_name: "Duck" +} +item { + name: "/m/084zz" + id: 429 + display_name: "Whale" +} +item { + name: "/m/01n4qj" + id: 430 + display_name: "Shirt" +} +item { + name: "/m/07cmd" + id: 431 + display_name: "Tank" +} +item { + name: "/m/04_sv" + id: 432 + display_name: "Motorcycle" +} +item { + name: "/m/0mkg" + id: 433 + display_name: "Accordion" +} +item { + name: "/m/09d5_" + id: 434 + display_name: "Owl" +} +item { + name: "/m/0c568" + id: 435 + display_name: "Porcupine" +} +item { + name: "/m/02wbtzl" + id: 436 + display_name: "Sun hat" +} +item { + name: "/m/05bm6" + id: 437 + display_name: "Nail" +} +item { + name: "/m/01lsmm" + id: 438 + display_name: "Scissors" +} +item { + name: "/m/0dftk" + id: 439 + display_name: "Swan" +} +item { + name: "/m/0dtln" + id: 440 + display_name: "Lamp" +} +item { + name: "/m/0nl46" + id: 441 + display_name: "Crown" +} +item { + name: "/m/05r5c" + id: 442 + display_name: "Piano" +} +item { + name: "/m/06msq" + id: 443 + display_name: "Sculpture" +} +item { + name: "/m/0cd4d" + id: 444 + display_name: "Cheetah" +} +item { + name: "/m/05kms" + id: 445 + display_name: "Oboe" +} +item { + name: "/m/02jnhm" + id: 446 + display_name: "Tin can" +} +item { + name: "/m/0fldg" + id: 447 + display_name: "Mango" +} +item { + name: "/m/073bxn" + id: 448 + display_name: "Tripod" +} +item { + name: "/m/029bxz" + id: 449 + display_name: "Oven" +} +item { + name: "/m/020lf" + id: 450 + display_name: "Computer mouse" +} +item { + name: "/m/01btn" + id: 451 + display_name: "Barge" +} +item { + name: "/m/02vqfm" + id: 452 + display_name: "Coffee" +} +item { + name: "/m/06__v" + id: 453 + display_name: "Snowboard" +} +item { + name: "/m/043nyj" + id: 454 + display_name: "Common fig" +} +item { + name: "/m/0grw1" + id: 455 + display_name: "Salad" +} +item { + name: "/m/03hl4l9" + id: 456 + display_name: "Marine invertebrates" +} +item { + name: "/m/0hnnb" + id: 457 + display_name: "Umbrella" +} +item { + name: "/m/04c0y" + id: 458 + display_name: "Kangaroo" +} +item { + name: "/m/0dzf4" + id: 459 + display_name: "Human arm" +} +item { + name: "/m/07v9_z" + id: 460 + display_name: "Measuring cup" +} +item { + name: "/m/0f9_l" + id: 461 + display_name: "Snail" +} +item { + name: "/m/0703r8" + id: 462 + display_name: "Loveseat" +} +item { + name: "/m/01xyhv" + id: 463 + display_name: "Suit" +} +item { + name: "/m/01fh4r" + id: 464 + display_name: "Teapot" +} +item { + name: "/m/04dr76w" + id: 465 + display_name: "Bottle" +} +item { + name: "/m/0pcr" + id: 466 + display_name: "Alpaca" +} +item { + name: "/m/03s_tn" + id: 467 + display_name: "Kettle" +} +item { + name: "/m/07mhn" + id: 468 + display_name: "Trousers" +} +item { + name: "/m/01hrv5" + id: 469 + display_name: "Popcorn" +} +item { + name: "/m/019h78" + id: 470 + display_name: "Centipede" +} +item { + name: "/m/09kmb" + id: 471 + display_name: "Spider" +} +item { + name: "/m/0h23m" + id: 472 + display_name: "Sparrow" +} +item { + name: "/m/050gv4" + id: 473 + display_name: "Plate" +} +item { + name: "/m/01fb_0" + id: 474 + display_name: "Bagel" +} +item { + name: "/m/02w3_ws" + id: 475 + display_name: "Personal care" +} +item { + name: "/m/014j1m" + id: 476 + display_name: "Apple" +} +item { + name: "/m/01gmv2" + id: 477 + display_name: "Brassiere" +} +item { + name: "/m/04y4h8h" + id: 478 + display_name: "Bathroom cabinet" +} +item { + name: "/m/026qbn5" + id: 479 + display_name: "Studio couch" +} +item { + name: "/m/01m2v" + id: 480 + display_name: "Computer keyboard" +} +item { + name: "/m/05_5p_0" + id: 481 + display_name: "Table tennis racket" +} +item { + name: "/m/07030" + id: 482 + display_name: "Sushi" +} +item { + name: "/m/01s105" + id: 483 + display_name: "Cabinetry" +} +item { + name: "/m/033rq4" + id: 484 + display_name: "Street light" +} +item { + name: "/m/0162_1" + id: 485 + display_name: "Towel" +} +item { + name: "/m/02z51p" + id: 486 + display_name: "Nightstand" +} +item { + name: "/m/06mf6" + id: 487 + display_name: "Rabbit" +} +item { + name: "/m/02hj4" + id: 488 + display_name: "Dolphin" +} +item { + name: "/m/0bt9lr" + id: 489 + display_name: "Dog" +} +item { + name: "/m/08hvt4" + id: 490 + display_name: "Jug" +} +item { + name: "/m/084rd" + id: 491 + display_name: "Wok" +} +item { + name: "/m/01pns0" + id: 492 + display_name: "Fire hydrant" +} +item { + name: "/m/014sv8" + id: 493 + display_name: "Human eye" +} +item { + name: "/m/079cl" + id: 494 + display_name: "Skyscraper" +} +item { + name: "/m/01940j" + id: 495 + display_name: "Backpack" +} +item { + name: "/m/05vtc" + id: 496 + display_name: "Potato" +} +item { + name: "/m/02w3r3" + id: 497 + display_name: "Paper towel" +} +item { + name: "/m/054xkw" + id: 498 + display_name: "Lifejacket" +} +item { + name: "/m/01bqk0" + id: 499 + display_name: "Bicycle wheel" +} +item { + name: "/m/09g1w" + id: 500 + display_name: "Toilet" +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_v4_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_v4_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..643b9e8ed5d9239a3248b895fb32f3b51caa92f3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/oid_v4_label_map.pbtxt @@ -0,0 +1,3005 @@ +item { + name: "/m/011k07" + id: 1 + display_name: "Tortoise" +} +item { + name: "/m/011q46kg" + id: 2 + display_name: "Container" +} +item { + name: "/m/012074" + id: 3 + display_name: "Magpie" +} +item { + name: "/m/0120dh" + id: 4 + display_name: "Sea turtle" +} +item { + name: "/m/01226z" + id: 5 + display_name: "Football" +} +item { + name: "/m/012n7d" + id: 6 + display_name: "Ambulance" +} +item { + name: "/m/012w5l" + id: 7 + display_name: "Ladder" +} +item { + name: "/m/012xff" + id: 8 + display_name: "Toothbrush" +} +item { + name: "/m/012ysf" + id: 9 + display_name: "Syringe" +} +item { + name: "/m/0130jx" + id: 10 + display_name: "Sink" +} +item { + name: "/m/0138tl" + id: 11 + display_name: "Toy" +} +item { + name: "/m/013y1f" + id: 12 + display_name: "Organ" +} +item { + name: "/m/01432t" + id: 13 + display_name: "Cassette deck" +} +item { + name: "/m/014j1m" + id: 14 + display_name: "Apple" +} +item { + name: "/m/014sv8" + id: 15 + display_name: "Human eye" +} +item { + name: "/m/014trl" + id: 16 + display_name: "Cosmetics" +} +item { + name: "/m/014y4n" + id: 17 + display_name: "Paddle" +} +item { + name: "/m/0152hh" + id: 18 + display_name: "Snowman" +} +item { + name: "/m/01599" + id: 19 + display_name: "Beer" +} +item { + name: "/m/01_5g" + id: 20 + display_name: "Chopsticks" +} +item { + name: "/m/015h_t" + id: 21 + display_name: "Human beard" +} +item { + name: "/m/015p6" + id: 22 + display_name: "Bird" +} +item { + name: "/m/015qbp" + id: 23 + display_name: "Parking meter" +} +item { + name: "/m/015qff" + id: 24 + display_name: "Traffic light" +} +item { + name: "/m/015wgc" + id: 25 + display_name: "Croissant" +} +item { + name: "/m/015x4r" + id: 26 + display_name: "Cucumber" +} +item { + name: "/m/015x5n" + id: 27 + display_name: "Radish" +} +item { + name: "/m/0162_1" + id: 28 + display_name: "Towel" +} +item { + name: "/m/0167gd" + id: 29 + display_name: "Doll" +} +item { + name: "/m/016m2d" + id: 30 + display_name: "Skull" +} +item { + name: "/m/0174k2" + id: 31 + display_name: "Washing machine" +} +item { + name: "/m/0174n1" + id: 32 + display_name: "Glove" +} +item { + name: "/m/0175cv" + id: 33 + display_name: "Tick" +} +item { + name: "/m/0176mf" + id: 34 + display_name: "Belt" +} +item { + name: "/m/017ftj" + id: 35 + display_name: "Sunglasses" +} +item { + name: "/m/018j2" + id: 36 + display_name: "Banjo" +} +item { + name: "/m/018p4k" + id: 37 + display_name: "Cart" +} +item { + name: "/m/018xm" + id: 38 + display_name: "Ball" +} +item { + name: "/m/01940j" + id: 39 + display_name: "Backpack" +} +item { + name: "/m/0199g" + id: 40 + display_name: "Bicycle" +} +item { + name: "/m/019dx1" + id: 41 + display_name: "Home appliance" +} +item { + name: "/m/019h78" + id: 42 + display_name: "Centipede" +} +item { + name: "/m/019jd" + id: 43 + display_name: "Boat" +} +item { + name: "/m/019w40" + id: 44 + display_name: "Surfboard" +} +item { + name: "/m/01b638" + id: 45 + display_name: "Boot" +} +item { + name: "/m/01b7fy" + id: 46 + display_name: "Headphones" +} +item { + name: "/m/01b9xk" + id: 47 + display_name: "Hot dog" +} +item { + name: "/m/01bfm9" + id: 48 + display_name: "Shorts" +} +item { + name: "/m/01_bhs" + id: 49 + display_name: "Fast food" +} +item { + name: "/m/01bjv" + id: 50 + display_name: "Bus" +} +item { + name: "/m/01bl7v" + id: 51 + display_name: "Boy" +} +item { + name: "/m/01bms0" + id: 52 + display_name: "Screwdriver" +} +item { + name: "/m/01bqk0" + id: 53 + display_name: "Bicycle wheel" +} +item { + name: "/m/01btn" + id: 54 + display_name: "Barge" +} +item { + name: "/m/01c648" + id: 55 + display_name: "Laptop" +} +item { + name: "/m/01cmb2" + id: 56 + display_name: "Miniskirt" +} +item { + name: "/m/01d380" + id: 57 + display_name: "Drill" +} +item { + name: "/m/01d40f" + id: 58 + display_name: "Dress" +} +item { + name: "/m/01dws" + id: 59 + display_name: "Bear" +} +item { + name: "/m/01dwsz" + id: 60 + display_name: "Waffle" +} +item { + name: "/m/01dwwc" + id: 61 + display_name: "Pancake" +} +item { + name: "/m/01dxs" + id: 62 + display_name: "Brown bear" +} +item { + name: "/m/01dy8n" + id: 63 + display_name: "Woodpecker" +} +item { + name: "/m/01f8m5" + id: 64 + display_name: "Blue jay" +} +item { + name: "/m/01f91_" + id: 65 + display_name: "Pretzel" +} +item { + name: "/m/01fb_0" + id: 66 + display_name: "Bagel" +} +item { + name: "/m/01fdzj" + id: 67 + display_name: "Tower" +} +item { + name: "/m/01fh4r" + id: 68 + display_name: "Teapot" +} +item { + name: "/m/01g317" + id: 69 + display_name: "Person" +} +item { + name: "/m/01g3x7" + id: 70 + display_name: "Bow and arrow" +} +item { + name: "/m/01gkx_" + id: 71 + display_name: "Swimwear" +} +item { + name: "/m/01gllr" + id: 72 + display_name: "Beehive" +} +item { + name: "/m/01gmv2" + id: 73 + display_name: "Brassiere" +} +item { + name: "/m/01h3n" + id: 74 + display_name: "Bee" +} +item { + name: "/m/01h44" + id: 75 + display_name: "Bat" +} +item { + name: "/m/01h8tj" + id: 76 + display_name: "Starfish" +} +item { + name: "/m/01hrv5" + id: 77 + display_name: "Popcorn" +} +item { + name: "/m/01j3zr" + id: 78 + display_name: "Burrito" +} +item { + name: "/m/01j4z9" + id: 79 + display_name: "Chainsaw" +} +item { + name: "/m/01j51" + id: 80 + display_name: "Balloon" +} +item { + name: "/m/01j5ks" + id: 81 + display_name: "Wrench" +} +item { + name: "/m/01j61q" + id: 82 + display_name: "Tent" +} +item { + name: "/m/01jfm_" + id: 83 + display_name: "Vehicle registration plate" +} +item { + name: "/m/01jfsr" + id: 84 + display_name: "Lantern" +} +item { + name: "/m/01k6s3" + id: 85 + display_name: "Toaster" +} +item { + name: "/m/01kb5b" + id: 86 + display_name: "Flashlight" +} +item { + name: "/m/01knjb" + id: 87 + display_name: "Billboard" +} +item { + name: "/m/01krhy" + id: 88 + display_name: "Tiara" +} +item { + name: "/m/01lcw4" + id: 89 + display_name: "Limousine" +} +item { + name: "/m/01llwg" + id: 90 + display_name: "Necklace" +} +item { + name: "/m/01lrl" + id: 91 + display_name: "Carnivore" +} +item { + name: "/m/01lsmm" + id: 92 + display_name: "Scissors" +} +item { + name: "/m/01lynh" + id: 93 + display_name: "Stairs" +} +item { + name: "/m/01m2v" + id: 94 + display_name: "Computer keyboard" +} +item { + name: "/m/01m4t" + id: 95 + display_name: "Printer" +} +item { + name: "/m/01mqdt" + id: 96 + display_name: "Traffic sign" +} +item { + name: "/m/01mzpv" + id: 97 + display_name: "Chair" +} +item { + name: "/m/01n4qj" + id: 98 + display_name: "Shirt" +} +item { + name: "/m/01n5jq" + id: 99 + display_name: "Poster" +} +item { + name: "/m/01nkt" + id: 100 + display_name: "Cheese" +} +item { + name: "/m/01nq26" + id: 101 + display_name: "Sock" +} +item { + name: "/m/01pns0" + id: 102 + display_name: "Fire hydrant" +} +item { + name: "/m/01prls" + id: 103 + display_name: "Land vehicle" +} +item { + name: "/m/01r546" + id: 104 + display_name: "Earrings" +} +item { + name: "/m/01rkbr" + id: 105 + display_name: "Tie" +} +item { + name: "/m/01rzcn" + id: 106 + display_name: "Watercraft" +} +item { + name: "/m/01s105" + id: 107 + display_name: "Cabinetry" +} +item { + name: "/m/01s55n" + id: 108 + display_name: "Suitcase" +} +item { + name: "/m/01tcjp" + id: 109 + display_name: "Muffin" +} +item { + name: "/m/01vbnl" + id: 110 + display_name: "Bidet" +} +item { + name: "/m/01ww8y" + id: 111 + display_name: "Snack" +} +item { + name: "/m/01x3jk" + id: 112 + display_name: "Snowmobile" +} +item { + name: "/m/01x3z" + id: 113 + display_name: "Clock" +} +item { + name: "/m/01xgg_" + id: 114 + display_name: "Medical equipment" +} +item { + name: "/m/01xq0k1" + id: 115 + display_name: "Cattle" +} +item { + name: "/m/01xqw" + id: 116 + display_name: "Cello" +} +item { + name: "/m/01xs3r" + id: 117 + display_name: "Jet ski" +} +item { + name: "/m/01x_v" + id: 118 + display_name: "Camel" +} +item { + name: "/m/01xygc" + id: 119 + display_name: "Coat" +} +item { + name: "/m/01xyhv" + id: 120 + display_name: "Suit" +} +item { + name: "/m/01y9k5" + id: 121 + display_name: "Desk" +} +item { + name: "/m/01yrx" + id: 122 + display_name: "Cat" +} +item { + name: "/m/01yx86" + id: 123 + display_name: "Bronze sculpture" +} +item { + name: "/m/01z1kdw" + id: 124 + display_name: "Juice" +} +item { + name: "/m/02068x" + id: 125 + display_name: "Gondola" +} +item { + name: "/m/020jm" + id: 126 + display_name: "Beetle" +} +item { + name: "/m/020kz" + id: 127 + display_name: "Cannon" +} +item { + name: "/m/020lf" + id: 128 + display_name: "Computer mouse" +} +item { + name: "/m/021mn" + id: 129 + display_name: "Cookie" +} +item { + name: "/m/021sj1" + id: 130 + display_name: "Office building" +} +item { + name: "/m/0220r2" + id: 131 + display_name: "Fountain" +} +item { + name: "/m/0242l" + id: 132 + display_name: "Coin" +} +item { + name: "/m/024d2" + id: 133 + display_name: "Calculator" +} +item { + name: "/m/024g6" + id: 134 + display_name: "Cocktail" +} +item { + name: "/m/02522" + id: 135 + display_name: "Computer monitor" +} +item { + name: "/m/025dyy" + id: 136 + display_name: "Box" +} +item { + name: "/m/025fsf" + id: 137 + display_name: "Stapler" +} +item { + name: "/m/025nd" + id: 138 + display_name: "Christmas tree" +} +item { + name: "/m/025rp__" + id: 139 + display_name: "Cowboy hat" +} +item { + name: "/m/0268lbt" + id: 140 + display_name: "Hiking equipment" +} +item { + name: "/m/026qbn5" + id: 141 + display_name: "Studio couch" +} +item { + name: "/m/026t6" + id: 142 + display_name: "Drum" +} +item { + name: "/m/0270h" + id: 143 + display_name: "Dessert" +} +item { + name: "/m/0271qf7" + id: 144 + display_name: "Wine rack" +} +item { + name: "/m/0271t" + id: 145 + display_name: "Drink" +} +item { + name: "/m/027pcv" + id: 146 + display_name: "Zucchini" +} +item { + name: "/m/027rl48" + id: 147 + display_name: "Ladle" +} +item { + name: "/m/0283dt1" + id: 148 + display_name: "Human mouth" +} +item { + name: "/m/0284d" + id: 149 + display_name: "Dairy" +} +item { + name: "/m/029b3" + id: 150 + display_name: "Dice" +} +item { + name: "/m/029bxz" + id: 151 + display_name: "Oven" +} +item { + name: "/m/029tx" + id: 152 + display_name: "Dinosaur" +} +item { + name: "/m/02bm9n" + id: 153 + display_name: "Ratchet" +} +item { + name: "/m/02crq1" + id: 154 + display_name: "Couch" +} +item { + name: "/m/02ctlc" + id: 155 + display_name: "Cricket ball" +} +item { + name: "/m/02cvgx" + id: 156 + display_name: "Winter melon" +} +item { + name: "/m/02d1br" + id: 157 + display_name: "Spatula" +} +item { + name: "/m/02d9qx" + id: 158 + display_name: "Whiteboard" +} +item { + name: "/m/02ddwp" + id: 159 + display_name: "Pencil sharpener" +} +item { + name: "/m/02dgv" + id: 160 + display_name: "Door" +} +item { + name: "/m/02dl1y" + id: 161 + display_name: "Hat" +} +item { + name: "/m/02f9f_" + id: 162 + display_name: "Shower" +} +item { + name: "/m/02fh7f" + id: 163 + display_name: "Eraser" +} +item { + name: "/m/02fq_6" + id: 164 + display_name: "Fedora" +} +item { + name: "/m/02g30s" + id: 165 + display_name: "Guacamole" +} +item { + name: "/m/02gzp" + id: 166 + display_name: "Dagger" +} +item { + name: "/m/02h19r" + id: 167 + display_name: "Scarf" +} +item { + name: "/m/02hj4" + id: 168 + display_name: "Dolphin" +} +item { + name: "/m/02jfl0" + id: 169 + display_name: "Sombrero" +} +item { + name: "/m/02jnhm" + id: 170 + display_name: "Tin can" +} +item { + name: "/m/02jvh9" + id: 171 + display_name: "Mug" +} +item { + name: "/m/02jz0l" + id: 172 + display_name: "Tap" +} +item { + name: "/m/02l8p9" + id: 173 + display_name: "Harbor seal" +} +item { + name: "/m/02lbcq" + id: 174 + display_name: "Stretcher" +} +item { + name: "/m/02mqfb" + id: 175 + display_name: "Can opener" +} +item { + name: "/m/02_n6y" + id: 176 + display_name: "Goggles" +} +item { + name: "/m/02p0tk3" + id: 177 + display_name: "Human body" +} +item { + name: "/m/02p3w7d" + id: 178 + display_name: "Roller skates" +} +item { + name: "/m/02p5f1q" + id: 179 + display_name: "Coffee cup" +} +item { + name: "/m/02pdsw" + id: 180 + display_name: "Cutting board" +} +item { + name: "/m/02pjr4" + id: 181 + display_name: "Blender" +} +item { + name: "/m/02pkr5" + id: 182 + display_name: "Plumbing fixture" +} +item { + name: "/m/02pv19" + id: 183 + display_name: "Stop sign" +} +item { + name: "/m/02rdsp" + id: 184 + display_name: "Office supplies" +} +item { + name: "/m/02rgn06" + id: 185 + display_name: "Volleyball" +} +item { + name: "/m/02s195" + id: 186 + display_name: "Vase" +} +item { + name: "/m/02tsc9" + id: 187 + display_name: "Slow cooker" +} +item { + name: "/m/02vkqh8" + id: 188 + display_name: "Wardrobe" +} +item { + name: "/m/02vqfm" + id: 189 + display_name: "Coffee" +} +item { + name: "/m/02vwcm" + id: 190 + display_name: "Whisk" +} +item { + name: "/m/02w3r3" + id: 191 + display_name: "Paper towel" +} +item { + name: "/m/02w3_ws" + id: 192 + display_name: "Personal care" +} +item { + name: "/m/02wbm" + id: 193 + display_name: "Food" +} +item { + name: "/m/02wbtzl" + id: 194 + display_name: "Sun hat" +} +item { + name: "/m/02wg_p" + id: 195 + display_name: "Tree house" +} +item { + name: "/m/02wmf" + id: 196 + display_name: "Flying disc" +} +item { + name: "/m/02wv6h6" + id: 197 + display_name: "Skirt" +} +item { + name: "/m/02wv84t" + id: 198 + display_name: "Gas stove" +} +item { + name: "/m/02x8cch" + id: 199 + display_name: "Salt and pepper shakers" +} +item { + name: "/m/02x984l" + id: 200 + display_name: "Mechanical fan" +} +item { + name: "/m/02xb7qb" + id: 201 + display_name: "Face powder" +} +item { + name: "/m/02xqq" + id: 202 + display_name: "Fax" +} +item { + name: "/m/02xwb" + id: 203 + display_name: "Fruit" +} +item { + name: "/m/02y6n" + id: 204 + display_name: "French fries" +} +item { + name: "/m/02z51p" + id: 205 + display_name: "Nightstand" +} +item { + name: "/m/02zn6n" + id: 206 + display_name: "Barrel" +} +item { + name: "/m/02zt3" + id: 207 + display_name: "Kite" +} +item { + name: "/m/02zvsm" + id: 208 + display_name: "Tart" +} +item { + name: "/m/030610" + id: 209 + display_name: "Treadmill" +} +item { + name: "/m/0306r" + id: 210 + display_name: "Fox" +} +item { + name: "/m/03120" + id: 211 + display_name: "Flag" +} +item { + name: "/m/0319l" + id: 212 + display_name: "Horn" +} +item { + name: "/m/031b6r" + id: 213 + display_name: "Window blind" +} +item { + name: "/m/031n1" + id: 214 + display_name: "Human foot" +} +item { + name: "/m/0323sq" + id: 215 + display_name: "Golf cart" +} +item { + name: "/m/032b3c" + id: 216 + display_name: "Jacket" +} +item { + name: "/m/033cnk" + id: 217 + display_name: "Egg" +} +item { + name: "/m/033rq4" + id: 218 + display_name: "Street light" +} +item { + name: "/m/0342h" + id: 219 + display_name: "Guitar" +} +item { + name: "/m/034c16" + id: 220 + display_name: "Pillow" +} +item { + name: "/m/035r7c" + id: 221 + display_name: "Human leg" +} +item { + name: "/m/035vxb" + id: 222 + display_name: "Isopod" +} +item { + name: "/m/0388q" + id: 223 + display_name: "Grape" +} +item { + name: "/m/039xj_" + id: 224 + display_name: "Human ear" +} +item { + name: "/m/03bbps" + id: 225 + display_name: "Power plugs and sockets" +} +item { + name: "/m/03bj1" + id: 226 + display_name: "Panda" +} +item { + name: "/m/03bk1" + id: 227 + display_name: "Giraffe" +} +item { + name: "/m/03bt1vf" + id: 228 + display_name: "Woman" +} +item { + name: "/m/03c7gz" + id: 229 + display_name: "Door handle" +} +item { + name: "/m/03d443" + id: 230 + display_name: "Rhinoceros" +} +item { + name: "/m/03dnzn" + id: 231 + display_name: "Bathtub" +} +item { + name: "/m/03fj2" + id: 232 + display_name: "Goldfish" +} +item { + name: "/m/03fp41" + id: 233 + display_name: "Houseplant" +} +item { + name: "/m/03fwl" + id: 234 + display_name: "Goat" +} +item { + name: "/m/03g8mr" + id: 235 + display_name: "Baseball bat" +} +item { + name: "/m/03grzl" + id: 236 + display_name: "Baseball glove" +} +item { + name: "/m/03hj559" + id: 237 + display_name: "Mixing bowl" +} +item { + name: "/m/03hl4l9" + id: 238 + display_name: "Marine invertebrates" +} +item { + name: "/m/03hlz0c" + id: 239 + display_name: "Kitchen utensil" +} +item { + name: "/m/03jbxj" + id: 240 + display_name: "Light switch" +} +item { + name: "/m/03jm5" + id: 241 + display_name: "House" +} +item { + name: "/m/03k3r" + id: 242 + display_name: "Horse" +} +item { + name: "/m/03kt2w" + id: 243 + display_name: "Stationary bicycle" +} +item { + name: "/m/03l9g" + id: 244 + display_name: "Hammer" +} +item { + name: "/m/03ldnb" + id: 245 + display_name: "Ceiling fan" +} +item { + name: "/m/03m3pdh" + id: 246 + display_name: "Sofa bed" +} +item { + name: "/m/03m3vtv" + id: 247 + display_name: "Adhesive tape" +} +item { + name: "/m/03m5k" + id: 248 + display_name: "Harp" +} +item { + name: "/m/03nfch" + id: 249 + display_name: "Sandal" +} +item { + name: "/m/03p3bw" + id: 250 + display_name: "Bicycle helmet" +} +item { + name: "/m/03q5c7" + id: 251 + display_name: "Saucer" +} +item { + name: "/m/03q5t" + id: 252 + display_name: "Harpsichord" +} +item { + name: "/m/03q69" + id: 253 + display_name: "Human hair" +} +item { + name: "/m/03qhv5" + id: 254 + display_name: "Heater" +} +item { + name: "/m/03qjg" + id: 255 + display_name: "Harmonica" +} +item { + name: "/m/03qrc" + id: 256 + display_name: "Hamster" +} +item { + name: "/m/03rszm" + id: 257 + display_name: "Curtain" +} +item { + name: "/m/03ssj5" + id: 258 + display_name: "Bed" +} +item { + name: "/m/03s_tn" + id: 259 + display_name: "Kettle" +} +item { + name: "/m/03tw93" + id: 260 + display_name: "Fireplace" +} +item { + name: "/m/03txqz" + id: 261 + display_name: "Scale" +} +item { + name: "/m/03v5tg" + id: 262 + display_name: "Drinking straw" +} +item { + name: "/m/03vt0" + id: 263 + display_name: "Insect" +} +item { + name: "/m/03wvsk" + id: 264 + display_name: "Hair dryer" +} +item { + name: "/m/03_wxk" + id: 265 + display_name: "Kitchenware" +} +item { + name: "/m/03wym" + id: 266 + display_name: "Indoor rower" +} +item { + name: "/m/03xxp" + id: 267 + display_name: "Invertebrate" +} +item { + name: "/m/03y6mg" + id: 268 + display_name: "Food processor" +} +item { + name: "/m/03__z0" + id: 269 + display_name: "Bookcase" +} +item { + name: "/m/040b_t" + id: 270 + display_name: "Refrigerator" +} +item { + name: "/m/04169hn" + id: 271 + display_name: "Wood-burning stove" +} +item { + name: "/m/0420v5" + id: 272 + display_name: "Punching bag" +} +item { + name: "/m/043nyj" + id: 273 + display_name: "Common fig" +} +item { + name: "/m/0440zs" + id: 274 + display_name: "Cocktail shaker" +} +item { + name: "/m/0449p" + id: 275 + display_name: "Jaguar" +} +item { + name: "/m/044r5d" + id: 276 + display_name: "Golf ball" +} +item { + name: "/m/0463sg" + id: 277 + display_name: "Fashion accessory" +} +item { + name: "/m/046dlr" + id: 278 + display_name: "Alarm clock" +} +item { + name: "/m/047j0r" + id: 279 + display_name: "Filing cabinet" +} +item { + name: "/m/047v4b" + id: 280 + display_name: "Artichoke" +} +item { + name: "/m/04bcr3" + id: 281 + display_name: "Table" +} +item { + name: "/m/04brg2" + id: 282 + display_name: "Tableware" +} +item { + name: "/m/04c0y" + id: 283 + display_name: "Kangaroo" +} +item { + name: "/m/04cp_" + id: 284 + display_name: "Koala" +} +item { + name: "/m/04ctx" + id: 285 + display_name: "Knife" +} +item { + name: "/m/04dr76w" + id: 286 + display_name: "Bottle" +} +item { + name: "/m/04f5ws" + id: 287 + display_name: "Bottle opener" +} +item { + name: "/m/04g2r" + id: 288 + display_name: "Lynx" +} +item { + name: "/m/04gth" + id: 289 + display_name: "Lavender" +} +item { + name: "/m/04h7h" + id: 290 + display_name: "Lighthouse" +} +item { + name: "/m/04h8sr" + id: 291 + display_name: "Dumbbell" +} +item { + name: "/m/04hgtk" + id: 292 + display_name: "Human head" +} +item { + name: "/m/04kkgm" + id: 293 + display_name: "Bowl" +} +item { + name: "/m/04lvq_" + id: 294 + display_name: "Humidifier" +} +item { + name: "/m/04m6gz" + id: 295 + display_name: "Porch" +} +item { + name: "/m/04m9y" + id: 296 + display_name: "Lizard" +} +item { + name: "/m/04p0qw" + id: 297 + display_name: "Billiard table" +} +item { + name: "/m/04rky" + id: 298 + display_name: "Mammal" +} +item { + name: "/m/04rmv" + id: 299 + display_name: "Mouse" +} +item { + name: "/m/04_sv" + id: 300 + display_name: "Motorcycle" +} +item { + name: "/m/04szw" + id: 301 + display_name: "Musical instrument" +} +item { + name: "/m/04tn4x" + id: 302 + display_name: "Swim cap" +} +item { + name: "/m/04v6l4" + id: 303 + display_name: "Frying pan" +} +item { + name: "/m/04vv5k" + id: 304 + display_name: "Snowplow" +} +item { + name: "/m/04y4h8h" + id: 305 + display_name: "Bathroom cabinet" +} +item { + name: "/m/04ylt" + id: 306 + display_name: "Missile" +} +item { + name: "/m/04yqq2" + id: 307 + display_name: "Bust" +} +item { + name: "/m/04yx4" + id: 308 + display_name: "Man" +} +item { + name: "/m/04z4wx" + id: 309 + display_name: "Waffle iron" +} +item { + name: "/m/04zpv" + id: 310 + display_name: "Milk" +} +item { + name: "/m/04zwwv" + id: 311 + display_name: "Ring binder" +} +item { + name: "/m/050gv4" + id: 312 + display_name: "Plate" +} +item { + name: "/m/050k8" + id: 313 + display_name: "Mobile phone" +} +item { + name: "/m/052lwg6" + id: 314 + display_name: "Baked goods" +} +item { + name: "/m/052sf" + id: 315 + display_name: "Mushroom" +} +item { + name: "/m/05441v" + id: 316 + display_name: "Crutch" +} +item { + name: "/m/054fyh" + id: 317 + display_name: "Pitcher" +} +item { + name: "/m/054_l" + id: 318 + display_name: "Mirror" +} +item { + name: "/m/054xkw" + id: 319 + display_name: "Lifejacket" +} +item { + name: "/m/05_5p_0" + id: 320 + display_name: "Table tennis racket" +} +item { + name: "/m/05676x" + id: 321 + display_name: "Pencil case" +} +item { + name: "/m/057cc" + id: 322 + display_name: "Musical keyboard" +} +item { + name: "/m/057p5t" + id: 323 + display_name: "Scoreboard" +} +item { + name: "/m/0584n8" + id: 324 + display_name: "Briefcase" +} +item { + name: "/m/058qzx" + id: 325 + display_name: "Kitchen knife" +} +item { + name: "/m/05bm6" + id: 326 + display_name: "Nail" +} +item { + name: "/m/05ctyq" + id: 327 + display_name: "Tennis ball" +} +item { + name: "/m/05gqfk" + id: 328 + display_name: "Plastic bag" +} +item { + name: "/m/05kms" + id: 329 + display_name: "Oboe" +} +item { + name: "/m/05kyg_" + id: 330 + display_name: "Chest of drawers" +} +item { + name: "/m/05n4y" + id: 331 + display_name: "Ostrich" +} +item { + name: "/m/05r5c" + id: 332 + display_name: "Piano" +} +item { + name: "/m/05r655" + id: 333 + display_name: "Girl" +} +item { + name: "/m/05s2s" + id: 334 + display_name: "Plant" +} +item { + name: "/m/05vtc" + id: 335 + display_name: "Potato" +} +item { + name: "/m/05w9t9" + id: 336 + display_name: "Hair spray" +} +item { + name: "/m/05y5lj" + id: 337 + display_name: "Sports equipment" +} +item { + name: "/m/05z55" + id: 338 + display_name: "Pasta" +} +item { + name: "/m/05z6w" + id: 339 + display_name: "Penguin" +} +item { + name: "/m/05zsy" + id: 340 + display_name: "Pumpkin" +} +item { + name: "/m/061_f" + id: 341 + display_name: "Pear" +} +item { + name: "/m/061hd_" + id: 342 + display_name: "Infant bed" +} +item { + name: "/m/0633h" + id: 343 + display_name: "Polar bear" +} +item { + name: "/m/063rgb" + id: 344 + display_name: "Mixer" +} +item { + name: "/m/0642b4" + id: 345 + display_name: "Cupboard" +} +item { + name: "/m/065h6l" + id: 346 + display_name: "Jacuzzi" +} +item { + name: "/m/0663v" + id: 347 + display_name: "Pizza" +} +item { + name: "/m/06_72j" + id: 348 + display_name: "Digital clock" +} +item { + name: "/m/068zj" + id: 349 + display_name: "Pig" +} +item { + name: "/m/06bt6" + id: 350 + display_name: "Reptile" +} +item { + name: "/m/06c54" + id: 351 + display_name: "Rifle" +} +item { + name: "/m/06c7f7" + id: 352 + display_name: "Lipstick" +} +item { + name: "/m/06_fw" + id: 353 + display_name: "Skateboard" +} +item { + name: "/m/06j2d" + id: 354 + display_name: "Raven" +} +item { + name: "/m/06k2mb" + id: 355 + display_name: "High heels" +} +item { + name: "/m/06l9r" + id: 356 + display_name: "Red panda" +} +item { + name: "/m/06m11" + id: 357 + display_name: "Rose" +} +item { + name: "/m/06mf6" + id: 358 + display_name: "Rabbit" +} +item { + name: "/m/06msq" + id: 359 + display_name: "Sculpture" +} +item { + name: "/m/06ncr" + id: 360 + display_name: "Saxophone" +} +item { + name: "/m/06nrc" + id: 361 + display_name: "Shotgun" +} +item { + name: "/m/06nwz" + id: 362 + display_name: "Seafood" +} +item { + name: "/m/06pcq" + id: 363 + display_name: "Submarine sandwich" +} +item { + name: "/m/06__v" + id: 364 + display_name: "Snowboard" +} +item { + name: "/m/06y5r" + id: 365 + display_name: "Sword" +} +item { + name: "/m/06z37_" + id: 366 + display_name: "Picture frame" +} +item { + name: "/m/07030" + id: 367 + display_name: "Sushi" +} +item { + name: "/m/0703r8" + id: 368 + display_name: "Loveseat" +} +item { + name: "/m/071p9" + id: 369 + display_name: "Ski" +} +item { + name: "/m/071qp" + id: 370 + display_name: "Squirrel" +} +item { + name: "/m/073bxn" + id: 371 + display_name: "Tripod" +} +item { + name: "/m/073g6" + id: 372 + display_name: "Stethoscope" +} +item { + name: "/m/074d1" + id: 373 + display_name: "Submarine" +} +item { + name: "/m/0755b" + id: 374 + display_name: "Scorpion" +} +item { + name: "/m/076bq" + id: 375 + display_name: "Segway" +} +item { + name: "/m/076lb9" + id: 376 + display_name: "Training bench" +} +item { + name: "/m/078jl" + id: 377 + display_name: "Snake" +} +item { + name: "/m/078n6m" + id: 378 + display_name: "Coffee table" +} +item { + name: "/m/079cl" + id: 379 + display_name: "Skyscraper" +} +item { + name: "/m/07bgp" + id: 380 + display_name: "Sheep" +} +item { + name: "/m/07c52" + id: 381 + display_name: "Television" +} +item { + name: "/m/07c6l" + id: 382 + display_name: "Trombone" +} +item { + name: "/m/07clx" + id: 383 + display_name: "Tea" +} +item { + name: "/m/07cmd" + id: 384 + display_name: "Tank" +} +item { + name: "/m/07crc" + id: 385 + display_name: "Taco" +} +item { + name: "/m/07cx4" + id: 386 + display_name: "Telephone" +} +item { + name: "/m/07dd4" + id: 387 + display_name: "Torch" +} +item { + name: "/m/07dm6" + id: 388 + display_name: "Tiger" +} +item { + name: "/m/07fbm7" + id: 389 + display_name: "Strawberry" +} +item { + name: "/m/07gql" + id: 390 + display_name: "Trumpet" +} +item { + name: "/m/07j7r" + id: 391 + display_name: "Tree" +} +item { + name: "/m/07j87" + id: 392 + display_name: "Tomato" +} +item { + name: "/m/07jdr" + id: 393 + display_name: "Train" +} +item { + name: "/m/07k1x" + id: 394 + display_name: "Tool" +} +item { + name: "/m/07kng9" + id: 395 + display_name: "Picnic basket" +} +item { + name: "/m/07mcwg" + id: 396 + display_name: "Cooking spray" +} +item { + name: "/m/07mhn" + id: 397 + display_name: "Trousers" +} +item { + name: "/m/07pj7bq" + id: 398 + display_name: "Bowling equipment" +} +item { + name: "/m/07qxg_" + id: 399 + display_name: "Football helmet" +} +item { + name: "/m/07r04" + id: 400 + display_name: "Truck" +} +item { + name: "/m/07v9_z" + id: 401 + display_name: "Measuring cup" +} +item { + name: "/m/07xyvk" + id: 402 + display_name: "Coffeemaker" +} +item { + name: "/m/07y_7" + id: 403 + display_name: "Violin" +} +item { + name: "/m/07yv9" + id: 404 + display_name: "Vehicle" +} +item { + name: "/m/080hkjn" + id: 405 + display_name: "Handbag" +} +item { + name: "/m/080n7g" + id: 406 + display_name: "Paper cutter" +} +item { + name: "/m/081qc" + id: 407 + display_name: "Wine" +} +item { + name: "/m/083kb" + id: 408 + display_name: "Weapon" +} +item { + name: "/m/083wq" + id: 409 + display_name: "Wheel" +} +item { + name: "/m/084hf" + id: 410 + display_name: "Worm" +} +item { + name: "/m/084rd" + id: 411 + display_name: "Wok" +} +item { + name: "/m/084zz" + id: 412 + display_name: "Whale" +} +item { + name: "/m/0898b" + id: 413 + display_name: "Zebra" +} +item { + name: "/m/08dz3q" + id: 414 + display_name: "Auto part" +} +item { + name: "/m/08hvt4" + id: 415 + display_name: "Jug" +} +item { + name: "/m/08ks85" + id: 416 + display_name: "Pizza cutter" +} +item { + name: "/m/08p92x" + id: 417 + display_name: "Cream" +} +item { + name: "/m/08pbxl" + id: 418 + display_name: "Monkey" +} +item { + name: "/m/096mb" + id: 419 + display_name: "Lion" +} +item { + name: "/m/09728" + id: 420 + display_name: "Bread" +} +item { + name: "/m/099ssp" + id: 421 + display_name: "Platter" +} +item { + name: "/m/09b5t" + id: 422 + display_name: "Chicken" +} +item { + name: "/m/09csl" + id: 423 + display_name: "Eagle" +} +item { + name: "/m/09ct_" + id: 424 + display_name: "Helicopter" +} +item { + name: "/m/09d5_" + id: 425 + display_name: "Owl" +} +item { + name: "/m/09ddx" + id: 426 + display_name: "Duck" +} +item { + name: "/m/09dzg" + id: 427 + display_name: "Turtle" +} +item { + name: "/m/09f20" + id: 428 + display_name: "Hippopotamus" +} +item { + name: "/m/09f_2" + id: 429 + display_name: "Crocodile" +} +item { + name: "/m/09g1w" + id: 430 + display_name: "Toilet" +} +item { + name: "/m/09gtd" + id: 431 + display_name: "Toilet paper" +} +item { + name: "/m/09gys" + id: 432 + display_name: "Squid" +} +item { + name: "/m/09j2d" + id: 433 + display_name: "Clothing" +} +item { + name: "/m/09j5n" + id: 434 + display_name: "Footwear" +} +item { + name: "/m/09k_b" + id: 435 + display_name: "Lemon" +} +item { + name: "/m/09kmb" + id: 436 + display_name: "Spider" +} +item { + name: "/m/09kx5" + id: 437 + display_name: "Deer" +} +item { + name: "/m/09ld4" + id: 438 + display_name: "Frog" +} +item { + name: "/m/09qck" + id: 439 + display_name: "Banana" +} +item { + name: "/m/09rvcxw" + id: 440 + display_name: "Rocket" +} +item { + name: "/m/09tvcd" + id: 441 + display_name: "Wine glass" +} +item { + name: "/m/0b3fp9" + id: 442 + display_name: "Countertop" +} +item { + name: "/m/0bh9flk" + id: 443 + display_name: "Tablet computer" +} +item { + name: "/m/0bjyj5" + id: 444 + display_name: "Waste container" +} +item { + name: "/m/0b_rs" + id: 445 + display_name: "Swimming pool" +} +item { + name: "/m/0bt9lr" + id: 446 + display_name: "Dog" +} +item { + name: "/m/0bt_c3" + id: 447 + display_name: "Book" +} +item { + name: "/m/0bwd_0j" + id: 448 + display_name: "Elephant" +} +item { + name: "/m/0by6g" + id: 449 + display_name: "Shark" +} +item { + name: "/m/0c06p" + id: 450 + display_name: "Candle" +} +item { + name: "/m/0c29q" + id: 451 + display_name: "Leopard" +} +item { + name: "/m/0c2jj" + id: 452 + display_name: "Axe" +} +item { + name: "/m/0c3m8g" + id: 453 + display_name: "Hand dryer" +} +item { + name: "/m/0c3mkw" + id: 454 + display_name: "Soap dispenser" +} +item { + name: "/m/0c568" + id: 455 + display_name: "Porcupine" +} +item { + name: "/m/0c9ph5" + id: 456 + display_name: "Flower" +} +item { + name: "/m/0ccs93" + id: 457 + display_name: "Canary" +} +item { + name: "/m/0cd4d" + id: 458 + display_name: "Cheetah" +} +item { + name: "/m/0cdl1" + id: 459 + display_name: "Palm tree" +} +item { + name: "/m/0cdn1" + id: 460 + display_name: "Hamburger" +} +item { + name: "/m/0cffdh" + id: 461 + display_name: "Maple" +} +item { + name: "/m/0cgh4" + id: 462 + display_name: "Building" +} +item { + name: "/m/0ch_cf" + id: 463 + display_name: "Fish" +} +item { + name: "/m/0cjq5" + id: 464 + display_name: "Lobster" +} +item { + name: "/m/0cjs7" + id: 465 + display_name: "Asparagus" +} +item { + name: "/m/0c_jw" + id: 466 + display_name: "Furniture" +} +item { + name: "/m/0cl4p" + id: 467 + display_name: "Hedgehog" +} +item { + name: "/m/0cmf2" + id: 468 + display_name: "Airplane" +} +item { + name: "/m/0cmx8" + id: 469 + display_name: "Spoon" +} +item { + name: "/m/0cn6p" + id: 470 + display_name: "Otter" +} +item { + name: "/m/0cnyhnx" + id: 471 + display_name: "Bull" +} +item { + name: "/m/0_cp5" + id: 472 + display_name: "Oyster" +} +item { + name: "/m/0cqn2" + id: 473 + display_name: "Horizontal bar" +} +item { + name: "/m/0crjs" + id: 474 + display_name: "Convenience store" +} +item { + name: "/m/0ct4f" + id: 475 + display_name: "Bomb" +} +item { + name: "/m/0cvnqh" + id: 476 + display_name: "Bench" +} +item { + name: "/m/0cxn2" + id: 477 + display_name: "Ice cream" +} +item { + name: "/m/0cydv" + id: 478 + display_name: "Caterpillar" +} +item { + name: "/m/0cyf8" + id: 479 + display_name: "Butterfly" +} +item { + name: "/m/0cyfs" + id: 480 + display_name: "Parachute" +} +item { + name: "/m/0cyhj_" + id: 481 + display_name: "Orange" +} +item { + name: "/m/0czz2" + id: 482 + display_name: "Antelope" +} +item { + name: "/m/0d20w4" + id: 483 + display_name: "Beaker" +} +item { + name: "/m/0d_2m" + id: 484 + display_name: "Moths and butterflies" +} +item { + name: "/m/0d4v4" + id: 485 + display_name: "Window" +} +item { + name: "/m/0d4w1" + id: 486 + display_name: "Closet" +} +item { + name: "/m/0d5gx" + id: 487 + display_name: "Castle" +} +item { + name: "/m/0d8zb" + id: 488 + display_name: "Jellyfish" +} +item { + name: "/m/0dbvp" + id: 489 + display_name: "Goose" +} +item { + name: "/m/0dbzx" + id: 490 + display_name: "Mule" +} +item { + name: "/m/0dftk" + id: 491 + display_name: "Swan" +} +item { + name: "/m/0dj6p" + id: 492 + display_name: "Peach" +} +item { + name: "/m/0djtd" + id: 493 + display_name: "Coconut" +} +item { + name: "/m/0dkzw" + id: 494 + display_name: "Seat belt" +} +item { + name: "/m/0dq75" + id: 495 + display_name: "Raccoon" +} +item { + name: "/m/0_dqb" + id: 496 + display_name: "Chisel" +} +item { + name: "/m/0dt3t" + id: 497 + display_name: "Fork" +} +item { + name: "/m/0dtln" + id: 498 + display_name: "Lamp" +} +item { + name: "/m/0dv5r" + id: 499 + display_name: "Camera" +} +item { + name: "/m/0dv77" + id: 500 + display_name: "Squash" +} +item { + name: "/m/0dv9c" + id: 501 + display_name: "Racket" +} +item { + name: "/m/0dzct" + id: 502 + display_name: "Human face" +} +item { + name: "/m/0dzf4" + id: 503 + display_name: "Human arm" +} +item { + name: "/m/0f4s2w" + id: 504 + display_name: "Vegetable" +} +item { + name: "/m/0f571" + id: 505 + display_name: "Diaper" +} +item { + name: "/m/0f6nr" + id: 506 + display_name: "Unicycle" +} +item { + name: "/m/0f6wt" + id: 507 + display_name: "Falcon" +} +item { + name: "/m/0f8s22" + id: 508 + display_name: "Chime" +} +item { + name: "/m/0f9_l" + id: 509 + display_name: "Snail" +} +item { + name: "/m/0fbdv" + id: 510 + display_name: "Shellfish" +} +item { + name: "/m/0fbw6" + id: 511 + display_name: "Cabbage" +} +item { + name: "/m/0fj52s" + id: 512 + display_name: "Carrot" +} +item { + name: "/m/0fldg" + id: 513 + display_name: "Mango" +} +item { + name: "/m/0fly7" + id: 514 + display_name: "Jeans" +} +item { + name: "/m/0fm3zh" + id: 515 + display_name: "Flowerpot" +} +item { + name: "/m/0fp6w" + id: 516 + display_name: "Pineapple" +} +item { + name: "/m/0fqfqc" + id: 517 + display_name: "Drawer" +} +item { + name: "/m/0fqt361" + id: 518 + display_name: "Stool" +} +item { + name: "/m/0frqm" + id: 519 + display_name: "Envelope" +} +item { + name: "/m/0fszt" + id: 520 + display_name: "Cake" +} +item { + name: "/m/0ft9s" + id: 521 + display_name: "Dragonfly" +} +item { + name: "/m/0ftb8" + id: 522 + display_name: "Sunflower" +} +item { + name: "/m/0fx9l" + id: 523 + display_name: "Microwave oven" +} +item { + name: "/m/0fz0h" + id: 524 + display_name: "Honeycomb" +} +item { + name: "/m/0gd2v" + id: 525 + display_name: "Marine mammal" +} +item { + name: "/m/0gd36" + id: 526 + display_name: "Sea lion" +} +item { + name: "/m/0gj37" + id: 527 + display_name: "Ladybug" +} +item { + name: "/m/0gjbg72" + id: 528 + display_name: "Shelf" +} +item { + name: "/m/0gjkl" + id: 529 + display_name: "Watch" +} +item { + name: "/m/0gm28" + id: 530 + display_name: "Candy" +} +item { + name: "/m/0grw1" + id: 531 + display_name: "Salad" +} +item { + name: "/m/0gv1x" + id: 532 + display_name: "Parrot" +} +item { + name: "/m/0gxl3" + id: 533 + display_name: "Handgun" +} +item { + name: "/m/0h23m" + id: 534 + display_name: "Sparrow" +} +item { + name: "/m/0h2r6" + id: 535 + display_name: "Van" +} +item { + name: "/m/0h8jyh6" + id: 536 + display_name: "Grinder" +} +item { + name: "/m/0h8kx63" + id: 537 + display_name: "Spice rack" +} +item { + name: "/m/0h8l4fh" + id: 538 + display_name: "Light bulb" +} +item { + name: "/m/0h8lkj8" + id: 539 + display_name: "Corded phone" +} +item { + name: "/m/0h8mhzd" + id: 540 + display_name: "Sports uniform" +} +item { + name: "/m/0h8my_4" + id: 541 + display_name: "Tennis racket" +} +item { + name: "/m/0h8mzrc" + id: 542 + display_name: "Wall clock" +} +item { + name: "/m/0h8n27j" + id: 543 + display_name: "Serving tray" +} +item { + name: "/m/0h8n5zk" + id: 544 + display_name: "Kitchen & dining room table" +} +item { + name: "/m/0h8n6f9" + id: 545 + display_name: "Dog bed" +} +item { + name: "/m/0h8n6ft" + id: 546 + display_name: "Cake stand" +} +item { + name: "/m/0h8nm9j" + id: 547 + display_name: "Cat furniture" +} +item { + name: "/m/0h8nr_l" + id: 548 + display_name: "Bathroom accessory" +} +item { + name: "/m/0h8nsvg" + id: 549 + display_name: "Facial tissue holder" +} +item { + name: "/m/0h8ntjv" + id: 550 + display_name: "Pressure cooker" +} +item { + name: "/m/0h99cwc" + id: 551 + display_name: "Kitchen appliance" +} +item { + name: "/m/0h9mv" + id: 552 + display_name: "Tire" +} +item { + name: "/m/0hdln" + id: 553 + display_name: "Ruler" +} +item { + name: "/m/0hf58v5" + id: 554 + display_name: "Luggage and bags" +} +item { + name: "/m/0hg7b" + id: 555 + display_name: "Microphone" +} +item { + name: "/m/0hkxq" + id: 556 + display_name: "Broccoli" +} +item { + name: "/m/0hnnb" + id: 557 + display_name: "Umbrella" +} +item { + name: "/m/0hnyx" + id: 558 + display_name: "Pastry" +} +item { + name: "/m/0hqkz" + id: 559 + display_name: "Grapefruit" +} +item { + name: "/m/0j496" + id: 560 + display_name: "Band-aid" +} +item { + name: "/m/0jbk" + id: 561 + display_name: "Animal" +} +item { + name: "/m/0jg57" + id: 562 + display_name: "Bell pepper" +} +item { + name: "/m/0jly1" + id: 563 + display_name: "Turkey" +} +item { + name: "/m/0jqgx" + id: 564 + display_name: "Lily" +} +item { + name: "/m/0jwn_" + id: 565 + display_name: "Pomegranate" +} +item { + name: "/m/0jy4k" + id: 566 + display_name: "Doughnut" +} +item { + name: "/m/0jyfg" + id: 567 + display_name: "Glasses" +} +item { + name: "/m/0k0pj" + id: 568 + display_name: "Human nose" +} +item { + name: "/m/0k1tl" + id: 569 + display_name: "Pen" +} +item { + name: "/m/0_k2" + id: 570 + display_name: "Ant" +} +item { + name: "/m/0k4j" + id: 571 + display_name: "Car" +} +item { + name: "/m/0k5j" + id: 572 + display_name: "Aircraft" +} +item { + name: "/m/0k65p" + id: 573 + display_name: "Human hand" +} +item { + name: "/m/0km7z" + id: 574 + display_name: "Skunk" +} +item { + name: "/m/0kmg4" + id: 575 + display_name: "Teddy bear" +} +item { + name: "/m/0kpqd" + id: 576 + display_name: "Watermelon" +} +item { + name: "/m/0kpt_" + id: 577 + display_name: "Cantaloupe" +} +item { + name: "/m/0ky7b" + id: 578 + display_name: "Dishwasher" +} +item { + name: "/m/0l14j_" + id: 579 + display_name: "Flute" +} +item { + name: "/m/0l3ms" + id: 580 + display_name: "Balance beam" +} +item { + name: "/m/0l515" + id: 581 + display_name: "Sandwich" +} +item { + name: "/m/0ll1f78" + id: 582 + display_name: "Shrimp" +} +item { + name: "/m/0llzx" + id: 583 + display_name: "Sewing machine" +} +item { + name: "/m/0lt4_" + id: 584 + display_name: "Binoculars" +} +item { + name: "/m/0m53l" + id: 585 + display_name: "Rays and skates" +} +item { + name: "/m/0mcx2" + id: 586 + display_name: "Ipod" +} +item { + name: "/m/0mkg" + id: 587 + display_name: "Accordion" +} +item { + name: "/m/0mw_6" + id: 588 + display_name: "Willow" +} +item { + name: "/m/0n28_" + id: 589 + display_name: "Crab" +} +item { + name: "/m/0nl46" + id: 590 + display_name: "Crown" +} +item { + name: "/m/0nybt" + id: 591 + display_name: "Seahorse" +} +item { + name: "/m/0p833" + id: 592 + display_name: "Perfume" +} +item { + name: "/m/0pcr" + id: 593 + display_name: "Alpaca" +} +item { + name: "/m/0pg52" + id: 594 + display_name: "Taxi" +} +item { + name: "/m/0ph39" + id: 595 + display_name: "Canoe" +} +item { + name: "/m/0qjjc" + id: 596 + display_name: "Remote control" +} +item { + name: "/m/0qmmr" + id: 597 + display_name: "Wheelchair" +} +item { + name: "/m/0wdt60w" + id: 598 + display_name: "Rugby ball" +} +item { + name: "/m/0xfy" + id: 599 + display_name: "Armadillo" +} +item { + name: "/m/0xzly" + id: 600 + display_name: "Maracas" +} +item { + name: "/m/0zvk5" + id: 601 + display_name: "Helmet" +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/pascal_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/pascal_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..c9e9e2affcd73ae5cb272a51b44306a74cf22eea --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/pascal_label_map.pbtxt @@ -0,0 +1,99 @@ +item { + id: 1 + name: 'aeroplane' +} + +item { + id: 2 + name: 'bicycle' +} + +item { + id: 3 + name: 'bird' +} + +item { + id: 4 + name: 'boat' +} + +item { + id: 5 + name: 'bottle' +} + +item { + id: 6 + name: 'bus' +} + +item { + id: 7 + name: 'car' +} + +item { + id: 8 + name: 'cat' +} + +item { + id: 9 + name: 'chair' +} + +item { + id: 10 + name: 'cow' +} + +item { + id: 11 + name: 'diningtable' +} + +item { + id: 12 + name: 'dog' +} + +item { + id: 13 + name: 'horse' +} + +item { + id: 14 + name: 'motorbike' +} + +item { + id: 15 + name: 'person' +} + +item { + id: 16 + name: 'pottedplant' +} + +item { + id: 17 + name: 'sheep' +} + +item { + id: 18 + name: 'sofa' +} + +item { + id: 19 + name: 'train' +} + +item { + id: 20 + name: 'tvmonitor' +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/pet_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/pet_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..54d7d3518941ceb0d2dc3465bdf702d4eaac3f07 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/pet_label_map.pbtxt @@ -0,0 +1,184 @@ +item { + id: 1 + name: 'Abyssinian' +} + +item { + id: 2 + name: 'american_bulldog' +} + +item { + id: 3 + name: 'american_pit_bull_terrier' +} + +item { + id: 4 + name: 'basset_hound' +} + +item { + id: 5 + name: 'beagle' +} + +item { + id: 6 + name: 'Bengal' +} + +item { + id: 7 + name: 'Birman' +} + +item { + id: 8 + name: 'Bombay' +} + +item { + id: 9 + name: 'boxer' +} + +item { + id: 10 + name: 'British_Shorthair' +} + +item { + id: 11 + name: 'chihuahua' +} + +item { + id: 12 + name: 'Egyptian_Mau' +} + +item { + id: 13 + name: 'english_cocker_spaniel' +} + +item { + id: 14 + name: 'english_setter' +} + +item { + id: 15 + name: 'german_shorthaired' +} + +item { + id: 16 + name: 'great_pyrenees' +} + +item { + id: 17 + name: 'havanese' +} + +item { + id: 18 + name: 'japanese_chin' +} + +item { + id: 19 + name: 'keeshond' +} + +item { + id: 20 + name: 'leonberger' +} + +item { + id: 21 + name: 'Maine_Coon' +} + +item { + id: 22 + name: 'miniature_pinscher' +} + +item { + id: 23 + name: 'newfoundland' +} + +item { + id: 24 + name: 'Persian' +} + +item { + id: 25 + name: 'pomeranian' +} + +item { + id: 26 + name: 'pug' +} + +item { + id: 27 + name: 'Ragdoll' +} + +item { + id: 28 + name: 'Russian_Blue' +} + +item { + id: 29 + name: 'saint_bernard' +} + +item { + id: 30 + name: 'samoyed' +} + +item { + id: 31 + name: 'scottish_terrier' +} + +item { + id: 32 + name: 'shiba_inu' +} + +item { + id: 33 + name: 'Siamese' +} + +item { + id: 34 + name: 'Sphynx' +} + +item { + id: 35 + name: 'staffordshire_bull_terrier' +} + +item { + id: 36 + name: 'wheaten_terrier' +} + +item { + id: 37 + name: 'yorkshire_terrier' +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/snapshot_serengeti_label_map.pbtxt b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/snapshot_serengeti_label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..57555d179f968e479557fbec940d7dce4252d764 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/snapshot_serengeti_label_map.pbtxt @@ -0,0 +1,240 @@ +item { + id: 1 + name: 'human' +} + +item { + id: 2 + name: 'gazelleGrants' +} + +item { + id: 3 + name: 'reedbuck' +} + +item { + id: 4 + name: 'dikDik' +} + +item { + id: 5 + name: 'zebra' +} + +item { + id: 6 + name: 'porcupine' +} + +item { + id: 7 + name: 'gazelleThomsons' +} + +item { + id: 8 + name: 'hyenaSpotted' +} + +item { + id: 9 + name: 'warthog' +} + +item { + id: 10 + name: 'impala' +} + +item { + id: 11 + name: 'elephant' +} + +item { + id: 12 + name: 'giraffe' +} + +item { + id: 13 + name: 'mongoose' +} + +item { + id: 14 + name: 'buffalo' +} + +item { + id: 15 + name: 'hartebeest' +} + +item { + id: 16 + name: 'guineaFowl' +} + +item { + id: 17 + name: 'wildebeest' +} + +item { + id: 18 + name: 'leopard' +} + +item { + id: 19 + name: 'ostrich' +} + +item { + id: 20 + name: 'lionFemale' +} + +item { + id: 21 + name: 'koriBustard' +} + +item { + id: 22 + name: 'otherBird' +} + +item { + id: 23 + name: 'batEaredFox' +} + +item { + id: 24 + name: 'bushbuck' +} + +item { + id: 25 + name: 'jackal' +} + +item { + id: 26 + name: 'cheetah' +} + +item { + id: 27 + name: 'eland' +} + +item { + id: 28 + name: 'aardwolf' +} + +item { + id: 29 + name: 'hippopotamus' +} + +item { + id: 30 + name: 'hyenaStriped' +} + +item { + id: 31 + name: 'aardvark' +} + +item { + id: 32 + name: 'hare' +} + +item { + id: 33 + name: 'baboon' +} + +item { + id: 34 + name: 'vervetMonkey' +} + +item { + id: 35 + name: 'waterbuck' +} + +item { + id: 36 + name: 'secretaryBird' +} + +item { + id: 37 + name: 'serval' +} + +item { + id: 38 + name: 'lionMale' +} + +item { + id: 39 + name: 'topi' +} + +item { + id: 40 + name: 'honeyBadger' +} + +item { + id: 41 + name: 'rodents' +} + +item { + id: 42 + name: 'wildcat' +} + +item { + id: 43 + name: 'civet' +} + +item { + id: 44 + name: 'genet' +} + +item { + id: 45 + name: 'caracal' +} + +item { + id: 46 + name: 'rhinoceros' +} + +item { + id: 47 + name: 'reptiles' +} + +item { + id: 48 + name: 'zorilla' +} + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/test_labels.csv b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/test_labels.csv new file mode 100644 index 0000000000000000000000000000000000000000..04b242c1d7b9d1b707257e104f7bf5aca93d4882 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/test_labels.csv @@ -0,0 +1,121 @@ +filename,width,height,class,xmin,ymin,xmax,ymax +frame1035.jpg,800,800,myrobot,272,232,352,442 +frame1035.jpg,800,800,corobot,1,166,106,379 +frame1035.jpg,800,800,corobot,171,54,257,296 +frame1035.jpg,800,800,corobot,453,95,547,320 +frame1035.jpg,800,800,corobot,200,375,282,540 +frame1035.jpg,800,800,corobot,407,487,504,645 +frame4045.jpg,800,800,corobot,134,1,224,232 +frame4045.jpg,800,800,corobot,79,386,177,558 +frame4045.jpg,800,800,corobot,357,146,437,358 +frame4045.jpg,800,800,corobot,384,414,469,581 +frame4045.jpg,800,800,corobot,493,299,592,480 +frame4045.jpg,800,800,myrobot,267,232,348,445 +frame2035.jpg,800,800,corobot,113,50,211,286 +frame2035.jpg,800,800,corobot,80,352,178,524 +frame2035.jpg,800,800,corobot,194,514,280,661 +frame2035.jpg,800,800,corobot,382,315,473,500 +frame2035.jpg,800,800,corobot,489,113,598,322 +frame2035.jpg,800,800,myrobot,268,263,341,446 +frame2040.jpg,800,800,corobot,110,44,210,285 +frame2040.jpg,800,800,corobot,79,347,180,531 +frame2040.jpg,800,800,corobot,382,309,480,500 +frame2040.jpg,800,800,corobot,202,498,295,651 +frame2040.jpg,800,800,corobot,495,109,604,330 +frame2040.jpg,800,800,myrobot,267,256,341,446 +frame1040.jpg,800,800,corobot,7,182,121,392 +frame1040.jpg,800,800,corobot,162,59,257,303 +frame1040.jpg,800,800,corobot,201,372,285,542 +frame1040.jpg,800,800,corobot,445,96,548,323 +frame1040.jpg,800,800,corobot,399,495,494,651 +frame1040.jpg,800,800,myrobot,268,235,353,447 +frame2045.jpg,800,800,corobot,146,85,240,310 +frame2045.jpg,800,800,corobot,110,389,207,559 +frame2045.jpg,800,800,corobot,238,465,332,621 +frame2045.jpg,800,800,corobot,351,350,442,532 +frame2045.jpg,800,800,corobot,514,137,630,354 +frame2045.jpg,800,800,myrobot,264,256,344,446 +frame3045.jpg,800,800,corobot,1,359,100,530 +frame3045.jpg,800,800,corobot,386,409,470,570 +frame3045.jpg,800,800,corobot,413,102,498,325 +frame3045.jpg,800,800,corobot,181,135,265,351 +frame3045.jpg,800,800,corobot,292,21,367,259 +frame3045.jpg,800,800,myrobot,274,264,346,444 +frame3050.jpg,800,800,corobot,2,366,102,534 +frame3050.jpg,800,800,corobot,388,414,470,570 +frame3050.jpg,800,800,corobot,184,132,268,347 +frame3050.jpg,800,800,corobot,410,103,498,324 +frame3050.jpg,800,800,corobot,304,18,380,256 +frame3050.jpg,800,800,myrobot,276,259,346,445 +frame1050.jpg,800,800,corobot,130,87,226,320 +frame1050.jpg,800,800,corobot,32,208,143,412 +frame1050.jpg,800,800,corobot,429,121,520,346 +frame1050.jpg,800,800,corobot,388,513,479,662 +frame1050.jpg,800,800,corobot,159,329,251,515 +frame1050.jpg,800,800,myrobot,273,229,354,450 +frame1000.jpg,800,800,corobot,2,166,108,381 +frame1000.jpg,800,800,corobot,202,64,282,312 +frame1000.jpg,800,800,corobot,187,416,270,566 +frame1000.jpg,800,800,corobot,438,77,541,298 +frame1000.jpg,800,800,corobot,430,491,523,653 +frame1000.jpg,800,800,myrobot,270,235,350,446 +frame4025.jpg,800,800,myrobot,266,235,351,452 +frame4025.jpg,800,800,corobot,139,4,236,247 +frame4025.jpg,800,800,corobot,75,400,180,556 +frame4025.jpg,800,800,corobot,348,158,429,360 +frame4025.jpg,800,800,corobot,381,408,471,588 +frame4025.jpg,800,800,corobot,497,296,610,480 +frame3035.jpg,800,800,corobot,13,396,126,556 +frame3035.jpg,800,800,corobot,414,443,503,598 +frame3035.jpg,800,800,corobot,142,166,234,381 +frame3035.jpg,800,800,corobot,440,149,536,358 +frame3035.jpg,800,800,corobot,338,2,418,224 +frame3035.jpg,800,800,myrobot,275,238,344,446 +frame4050.jpg,800,800,corobot,102,14,193,251 +frame4050.jpg,800,800,corobot,54,404,153,568 +frame4050.jpg,800,800,corobot,344,122,418,337 +frame4050.jpg,800,800,corobot,364,432,445,596 +frame4050.jpg,800,800,corobot,511,330,618,501 +frame4050.jpg,800,800,myrobot,270,235,342,442 +frame2051.jpg,800,800,corobot,144,72,235,309 +frame2051.jpg,800,800,corobot,107,380,207,552 +frame2051.jpg,800,800,corobot,353,350,448,527 +frame2051.jpg,800,800,corobot,222,476,317,635 +frame2051.jpg,800,800,corobot,510,117,623,343 +frame2051.jpg,800,800,myrobot,265,259,347,448 +frame3040.jpg,800,800,corobot,12,391,121,552 +frame3040.jpg,800,800,corobot,412,446,503,598 +frame3040.jpg,800,800,corobot,440,147,531,357 +frame3040.jpg,800,800,corobot,137,173,227,382 +frame3040.jpg,800,800,corobot,326,1,405,232 +frame3040.jpg,800,800,myrobot,274,246,344,445 +frame1045.jpg,800,800,myrobot,267,232,354,448 +frame1045.jpg,800,800,corobot,45,218,148,421 +frame1045.jpg,800,800,corobot,126,86,219,322 +frame1045.jpg,800,800,corobot,163,331,251,509 +frame1045.jpg,800,800,corobot,414,129,516,351 +frame1045.jpg,800,800,corobot,376,516,471,668 +frame2025.jpg,800,800,corobot,162,53,254,298 +frame2025.jpg,800,800,corobot,497,160,609,373 +frame2025.jpg,800,800,corobot,89,416,194,571 +frame2025.jpg,800,800,corobot,344,317,438,512 +frame2025.jpg,800,800,corobot,247,478,342,635 +frame2025.jpg,800,800,myrobot,265,253,348,448 +frame2050.jpg,800,800,corobot,142,72,237,309 +frame2050.jpg,800,800,corobot,109,382,208,558 +frame2050.jpg,800,800,corobot,226,475,321,632 +frame2050.jpg,800,800,corobot,348,354,442,535 +frame2050.jpg,800,800,corobot,510,121,629,347 +frame2050.jpg,800,800,myrobot,265,257,346,443 +frame4040.jpg,800,800,corobot,169,1,256,219 +frame4040.jpg,800,800,corobot,109,357,206,535 +frame4040.jpg,800,800,corobot,368,164,446,375 +frame4040.jpg,800,800,corobot,476,289,573,428 +frame4040.jpg,800,800,corobot,412,436,498,555 +frame4040.jpg,800,800,myrobot,270,240,342,446 +frame4035.jpg,800,800,corobot,134,1,225,235 +frame4035.jpg,800,800,corobot,89,373,188,547 +frame4035.jpg,800,800,corobot,352,152,428,363 +frame4035.jpg,800,800,corobot,502,309,601,481 +frame4035.jpg,800,800,corobot,386,412,479,581 +frame4035.jpg,800,800,myrobot,266,237,347,446 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/train_labels.csv b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/train_labels.csv new file mode 100644 index 0000000000000000000000000000000000000000..663fb161ee48065e931877067a6978bfd360225c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data/train_labels.csv @@ -0,0 +1,577 @@ +filename,width,height,class,xmin,ymin,xmax,ymax +frame2030.jpg,800,800,myrobot,270,251,341,446 +frame2030.jpg,800,800,corobot,150,74,241,315 +frame2030.jpg,800,800,corobot,509,141,618,362 +frame2030.jpg,800,800,corobot,109,401,204,560 +frame2030.jpg,800,800,corobot,341,337,427,522 +frame2030.jpg,800,800,corobot,235,483,323,637 +frame2028.jpg,800,800,myrobot,270,254,339,446 +frame2028.jpg,800,800,corobot,343,329,428,518 +frame2028.jpg,800,800,corobot,498,147,611,355 +frame2028.jpg,800,800,corobot,94,396,199,559 +frame2028.jpg,800,800,corobot,239,482,327,637 +frame2028.jpg,800,800,corobot,148,65,240,309 +frame2038.jpg,800,800,corobot,117,52,216,292 +frame2038.jpg,800,800,corobot,81,359,186,533 +frame2038.jpg,800,800,corobot,202,508,289,655 +frame2038.jpg,800,800,corobot,385,312,479,501 +frame2038.jpg,800,800,corobot,498,118,604,337 +frame2038.jpg,800,800,myrobot,269,257,345,445 +frame2036.jpg,800,800,corobot,155,95,251,320 +frame2036.jpg,800,800,corobot,115,403,213,568 +frame2036.jpg,800,800,corobot,243,475,328,628 +frame2036.jpg,800,800,corobot,339,349,429,530 +frame2036.jpg,800,800,corobot,519,164,637,371 +frame2036.jpg,800,800,myrobot,270,258,343,443 +frame1031.jpg,800,800,myrobot,271,228,352,450 +frame1031.jpg,800,800,corobot,24,209,131,414 +frame1031.jpg,800,800,corobot,125,76,221,316 +frame1031.jpg,800,800,corobot,425,136,521,350 +frame1031.jpg,800,800,corobot,180,341,266,515 +frame1031.jpg,800,800,corobot,376,506,472,660 +frame2032.jpg,800,800,myrobot,268,261,339,446 +frame2032.jpg,800,800,corobot,112,45,205,284 +frame2032.jpg,800,800,corobot,485,96,592,325 +frame2032.jpg,800,800,corobot,80,355,181,525 +frame2032.jpg,800,800,corobot,391,308,483,499 +frame2032.jpg,800,800,corobot,195,514,288,664 +frame3034.jpg,800,800,corobot,4,378,116,550 +frame3034.jpg,800,800,corobot,393,420,480,580 +frame3034.jpg,800,800,corobot,163,152,253,373 +frame3034.jpg,800,800,corobot,425,119,514,337 +frame3034.jpg,800,800,corobot,316,3,394,241 +frame3034.jpg,800,800,myrobot,272,246,345,446 +frame1036.jpg,800,800,corobot,35,217,143,419 +frame1036.jpg,800,800,corobot,112,87,211,323 +frame1036.jpg,800,800,corobot,412,144,505,354 +frame1036.jpg,800,800,corobot,165,322,251,497 +frame1036.jpg,800,800,corobot,356,525,444,673 +frame1036.jpg,800,800,myrobot,271,232,350,446 +frame3024.jpg,800,800,corobot,160,152,248,385 +frame3024.jpg,800,800,corobot,1,383,124,542 +frame3024.jpg,800,800,corobot,312,1,388,242 +frame3024.jpg,800,800,corobot,424,141,520,353 +frame3024.jpg,800,800,corobot,399,424,489,596 +frame3024.jpg,800,800,myrobot,275,250,344,448 +frame3037.jpg,800,800,corobot,3,375,116,543 +frame3037.jpg,800,800,corobot,405,431,494,591 +frame3037.jpg,800,800,corobot,150,162,243,378 +frame3037.jpg,800,800,corobot,432,137,521,346 +frame3037.jpg,800,800,corobot,326,1,403,231 +frame3037.jpg,800,800,myrobot,275,242,344,445 +frame1051.jpg,800,800,myrobot,272,231,354,444 +frame1051.jpg,800,800,corobot,31,200,139,405 +frame1051.jpg,800,800,corobot,131,82,231,315 +frame1051.jpg,800,800,corobot,429,123,523,354 +frame1051.jpg,800,800,corobot,167,335,256,516 +frame1051.jpg,800,800,corobot,386,514,483,664 +frame3039.jpg,800,800,corobot,1,339,94,514 +frame3039.jpg,800,800,corobot,379,402,461,564 +frame3039.jpg,800,800,corobot,408,100,497,317 +frame3039.jpg,800,800,corobot,179,144,267,355 +frame3039.jpg,800,800,corobot,289,18,367,257 +frame3039.jpg,800,800,myrobot,273,262,344,444 +frame3047.jpg,800,800,corobot,2,378,108,542 +frame3047.jpg,800,800,corobot,393,425,479,580 +frame3047.jpg,800,800,corobot,421,119,507,337 +frame3047.jpg,800,800,corobot,171,146,257,353 +frame3047.jpg,800,800,corobot,303,15,380,256 +frame3047.jpg,800,800,myrobot,276,259,344,446 +frame3043.jpg,800,800,corobot,22,404,130,566 +frame3043.jpg,800,800,corobot,424,455,514,610 +frame3043.jpg,800,800,corobot,447,159,542,366 +frame3043.jpg,800,800,corobot,138,168,230,380 +frame3043.jpg,800,800,corobot,339,1,419,231 +frame3043.jpg,800,800,myrobot,275,241,346,445 +frame4038.jpg,800,800,corobot,130,2,221,241 +frame4038.jpg,800,800,corobot,73,389,174,555 +frame4038.jpg,800,800,corobot,342,135,418,340 +frame4038.jpg,800,800,corobot,376,424,463,587 +frame4038.jpg,800,800,corobot,509,318,617,495 +frame4038.jpg,800,800,myrobot,267,240,342,445 +frame2042.jpg,800,800,corobot,144,82,238,311 +frame2042.jpg,800,800,corobot,518,141,635,359 +frame2042.jpg,800,800,corobot,107,389,207,556 +frame2042.jpg,800,800,corobot,346,356,435,532 +frame2042.jpg,800,800,corobot,242,469,328,626 +frame2042.jpg,800,800,myrobot,267,255,344,444 +frame3051.jpg,800,800,corobot,2,372,105,539 +frame3051.jpg,800,800,corobot,392,417,473,576 +frame3051.jpg,800,800,corobot,415,113,502,329 +frame3051.jpg,800,800,corobot,170,145,260,355 +frame3051.jpg,800,800,corobot,313,10,388,252 +frame3051.jpg,800,800,myrobot,274,258,344,445 +frame1028.jpg,800,800,corobot,25,220,134,420 +frame1028.jpg,800,800,corobot,130,73,221,317 +frame1028.jpg,800,800,corobot,180,330,266,503 +frame1028.jpg,800,800,corobot,434,145,531,350 +frame1028.jpg,800,800,corobot,378,494,469,655 +frame1028.jpg,800,800,myrobot,271,230,353,447 +frame4047.jpg,800,800,corobot,98,4,192,253 +frame4047.jpg,800,800,corobot,51,402,151,565 +frame4047.jpg,800,800,corobot,335,115,413,299 +frame4047.jpg,800,800,myrobot,269,253,344,441 +frame4047.jpg,800,800,corobot,512,327,615,492 +frame4047.jpg,800,800,corobot,356,432,439,586 +frame4049.jpg,800,800,corobot,141,1,226,228 +frame4049.jpg,800,800,corobot,373,162,453,368 +frame4049.jpg,800,800,corobot,94,373,187,542 +frame4049.jpg,800,800,corobot,481,285,576,458 +frame4049.jpg,800,800,corobot,392,401,480,568 +frame4049.jpg,800,800,myrobot,268,238,345,441 +frame3046.jpg,800,800,corobot,27,414,135,572 +frame3046.jpg,800,800,corobot,426,460,514,612 +frame3046.jpg,800,800,corobot,448,153,538,367 +frame3046.jpg,800,800,corobot,132,171,221,382 +frame3046.jpg,800,800,corobot,339,2,419,234 +frame3046.jpg,800,800,myrobot,276,250,344,443 +frame3031.jpg,800,800,corobot,1,363,112,532 +frame3031.jpg,800,800,corobot,384,415,473,576 +frame3031.jpg,800,800,corobot,174,148,265,368 +frame3031.jpg,800,800,corobot,417,109,509,327 +frame3031.jpg,800,800,corobot,304,2,382,247 +frame3031.jpg,800,800,myrobot,276,254,345,446 +frame3000.jpg,800,800,corobot,344,1,428,234 +frame3000.jpg,800,800,corobot,112,152,215,384 +frame3000.jpg,800,800,corobot,1,433,125,582 +frame3000.jpg,800,800,corobot,428,193,526,394 +frame3000.jpg,800,800,corobot,440,422,542,600 +frame3000.jpg,800,800,myrobot,273,246,355,444 +frame4030.jpg,800,800,corobot,57,422,167,572 +frame4030.jpg,800,800,corobot,354,425,438,590 +frame4030.jpg,800,800,corobot,521,324,629,502 +frame4030.jpg,800,800,corobot,94,5,191,252 +frame4030.jpg,800,800,corobot,343,121,419,326 +frame4030.jpg,800,800,myrobot,270,236,341,445 +frame3042.jpg,800,800,corobot,1,351,96,527 +frame3042.jpg,800,800,corobot,385,410,470,570 +frame3042.jpg,800,800,corobot,417,109,501,323 +frame3042.jpg,800,800,corobot,184,133,269,350 +frame3042.jpg,800,800,corobot,293,21,368,260 +frame3042.jpg,800,800,myrobot,276,263,344,446 +frame3028.jpg,800,800,corobot,3,368,121,533 +frame3028.jpg,800,800,corobot,380,413,471,588 +frame3028.jpg,800,800,corobot,189,147,275,369 +frame3028.jpg,800,800,corobot,422,102,513,330 +frame3028.jpg,800,800,corobot,293,5,371,249 +frame3028.jpg,800,800,myrobot,280,254,345,443 +frame2046.jpg,800,800,corobot,102,34,203,274 +frame2046.jpg,800,800,corobot,480,83,587,310 +frame2046.jpg,800,800,corobot,71,332,177,518 +frame2046.jpg,800,800,corobot,394,306,484,497 +frame2046.jpg,800,800,corobot,189,502,286,656 +frame2046.jpg,800,800,myrobot,267,255,342,446 +frame4048.jpg,800,800,corobot,135,1,223,230 +frame4048.jpg,800,800,corobot,91,375,185,543 +frame4048.jpg,800,800,corobot,363,150,445,367 +frame4048.jpg,800,800,corobot,483,283,584,464 +frame4048.jpg,800,800,corobot,395,403,474,569 +frame4048.jpg,800,800,myrobot,269,238,344,441 +frame4026.jpg,800,800,corobot,162,1,256,246 +frame4026.jpg,800,800,corobot,71,361,181,523 +frame4026.jpg,800,800,corobot,349,187,428,382 +frame4026.jpg,800,800,corobot,484,267,590,453 +frame4026.jpg,800,800,corobot,411,445,503,583 +frame4026.jpg,800,800,myrobot,265,241,349,446 +frame3026.jpg,800,800,corobot,3,431,123,586 +frame3026.jpg,800,800,corobot,436,423,526,597 +frame3026.jpg,800,800,corobot,429,179,526,387 +frame3026.jpg,800,800,corobot,118,154,208,384 +frame3026.jpg,800,800,corobot,334,1,416,228 +frame3026.jpg,800,800,myrobot,268,230,345,443 +frame1046.jpg,800,800,corobot,1,169,119,381 +frame1046.jpg,800,800,corobot,176,52,269,296 +frame1046.jpg,800,800,corobot,462,86,562,317 +frame1046.jpg,800,800,corobot,203,379,289,550 +frame1046.jpg,800,800,corobot,424,478,524,634 +frame1046.jpg,800,800,myrobot,271,234,353,445 +frame2037.jpg,800,800,corobot,121,56,222,293 +frame2037.jpg,800,800,corobot,85,364,190,537 +frame2037.jpg,800,800,corobot,373,318,467,510 +frame2037.jpg,800,800,corobot,200,505,298,651 +frame2037.jpg,800,800,corobot,503,123,615,344 +frame2037.jpg,800,800,myrobot,267,257,345,449 +frame2041.jpg,800,800,corobot,118,52,211,288 +frame2041.jpg,800,800,corobot,82,358,184,532 +frame2041.jpg,800,800,corobot,211,492,302,647 +frame2041.jpg,800,800,corobot,380,323,467,507 +frame2041.jpg,800,800,corobot,498,109,609,336 +frame2041.jpg,800,800,myrobot,267,255,343,447 +frame4037.jpg,800,800,corobot,172,1,260,216 +frame4037.jpg,800,800,corobot,370,174,449,376 +frame4037.jpg,800,800,corobot,114,356,207,528 +frame4037.jpg,800,800,corobot,484,277,581,422 +frame4037.jpg,800,800,corobot,417,424,504,562 +frame4037.jpg,800,800,myrobot,267,237,348,436 +frame1032.jpg,800,800,corobot,167,56,259,302 +frame1032.jpg,800,800,corobot,1,171,107,385 +frame1032.jpg,800,800,corobot,451,97,548,320 +frame1032.jpg,800,800,corobot,205,387,287,549 +frame1032.jpg,800,800,corobot,417,481,514,642 +frame1032.jpg,800,800,myrobot,271,231,351,448 +frame3049.jpg,800,800,corobot,28,413,134,573 +frame3049.jpg,800,800,corobot,424,461,511,608 +frame3049.jpg,800,800,corobot,444,153,534,363 +frame3049.jpg,800,800,corobot,139,164,230,378 +frame3049.jpg,800,800,corobot,344,2,422,230 +frame3049.jpg,800,800,myrobot,275,246,345,449 +frame1042.jpg,800,800,corobot,41,215,147,421 +frame1042.jpg,800,800,corobot,121,85,228,319 +frame1042.jpg,800,800,corobot,161,327,253,505 +frame1042.jpg,800,800,corobot,419,131,512,351 +frame1042.jpg,800,800,corobot,370,518,461,669 +frame1042.jpg,800,800,myrobot,269,233,352,446 +frame1029.jpg,800,800,myrobot,271,232,351,446 +frame1029.jpg,800,800,corobot,1,161,106,379 +frame1029.jpg,800,800,corobot,181,53,270,298 +frame1029.jpg,800,800,corobot,452,90,554,304 +frame1029.jpg,800,800,corobot,200,391,282,550 +frame1029.jpg,800,800,corobot,433,481,528,642 +frame2031.jpg,800,800,corobot,142,78,235,314 +frame2031.jpg,800,800,corobot,507,144,620,356 +frame2031.jpg,800,800,corobot,104,396,200,559 +frame2031.jpg,800,800,corobot,348,328,439,519 +frame2031.jpg,800,800,corobot,230,487,316,643 +frame2031.jpg,800,800,myrobot,266,259,340,446 +frame1038.jpg,800,800,corobot,5,173,116,388 +frame1038.jpg,800,800,corobot,158,60,251,301 +frame1038.jpg,800,800,corobot,444,100,540,328 +frame1038.jpg,800,800,corobot,203,374,287,541 +frame1038.jpg,800,800,corobot,396,497,494,651 +frame1038.jpg,800,800,myrobot,270,233,354,445 +frame4052.jpg,800,800,corobot,140,1,228,225 +frame4052.jpg,800,800,corobot,368,153,450,366 +frame4052.jpg,800,800,corobot,100,368,191,537 +frame4052.jpg,800,800,corobot,401,403,486,565 +frame4052.jpg,800,800,corobot,491,298,587,471 +frame4052.jpg,800,800,myrobot,270,237,349,446 +frame3027.jpg,800,800,corobot,2,384,121,548 +frame3027.jpg,800,800,corobot,389,417,478,590 +frame3027.jpg,800,800,corobot,172,148,262,377 +frame3027.jpg,800,800,corobot,419,129,516,341 +frame3027.jpg,800,800,corobot,297,4,380,249 +frame3027.jpg,800,800,myrobot,275,253,344,450 +frame2048.jpg,800,800,corobot,139,70,234,305 +frame2048.jpg,800,800,corobot,507,124,620,343 +frame2048.jpg,800,800,corobot,105,382,205,551 +frame2048.jpg,800,800,corobot,349,354,438,538 +frame2048.jpg,800,800,corobot,233,470,321,628 +frame2048.jpg,800,800,myrobot,268,253,344,444 +frame1025.jpg,800,800,corobot,118,64,214,312 +frame1025.jpg,800,800,corobot,30,232,139,429 +frame1025.jpg,800,800,corobot,187,341,274,508 +frame1025.jpg,800,800,corobot,440,159,543,362 +frame1025.jpg,800,800,corobot,368,491,452,652 +frame1025.jpg,800,800,myrobot,273,231,353,446 +frame3038.jpg,800,800,corobot,7,383,118,551 +frame3038.jpg,800,800,corobot,411,439,496,591 +frame3038.jpg,800,800,corobot,139,171,230,383 +frame3038.jpg,800,800,corobot,435,145,527,351 +frame3038.jpg,800,800,corobot,331,2,411,230 +frame3038.jpg,800,800,myrobot,274,243,346,445 +frame1026.jpg,800,800,corobot,2,174,110,383 +frame1026.jpg,800,800,corobot,192,63,276,310 +frame1026.jpg,800,800,corobot,444,87,547,308 +frame1026.jpg,800,800,corobot,191,411,275,564 +frame1026.jpg,800,800,corobot,436,490,529,652 +frame1026.jpg,800,800,myrobot,274,236,350,444 +frame3041.jpg,800,800,corobot,8,385,118,553 +frame3041.jpg,800,800,corobot,411,436,494,594 +frame3041.jpg,800,800,corobot,151,161,237,376 +frame3041.jpg,800,800,corobot,435,146,525,351 +frame3041.jpg,800,800,corobot,320,4,398,241 +frame3041.jpg,800,800,myrobot,276,250,344,442 +frame2044.jpg,800,800,corobot,124,52,217,296 +frame2044.jpg,800,800,corobot,91,365,190,536 +frame2044.jpg,800,800,corobot,212,490,307,641 +frame2044.jpg,800,800,corobot,368,334,460,522 +frame2044.jpg,800,800,corobot,501,119,616,340 +frame2044.jpg,800,800,myrobot,267,255,344,446 +frame1044.jpg,800,800,corobot,151,60,246,305 +frame1044.jpg,800,800,corobot,27,189,129,403 +frame1044.jpg,800,800,corobot,444,103,539,332 +frame1044.jpg,800,800,corobot,182,352,268,528 +frame1044.jpg,800,800,corobot,393,501,489,655 +frame1044.jpg,800,800,myrobot,269,232,354,447 +frame3036.jpg,800,800,corobot,1,337,94,510 +frame3036.jpg,800,800,corobot,375,396,459,562 +frame3036.jpg,800,800,corobot,408,91,496,318 +frame3036.jpg,800,800,corobot,186,132,276,352 +frame3036.jpg,800,800,corobot,294,14,370,255 +frame3036.jpg,800,800,myrobot,276,263,344,445 +frame1030.jpg,800,800,corobot,22,210,131,413 +frame1030.jpg,800,800,corobot,121,76,219,323 +frame1030.jpg,800,800,corobot,424,132,517,346 +frame1030.jpg,800,800,corobot,175,331,263,508 +frame1030.jpg,800,800,corobot,377,507,470,659 +frame1030.jpg,800,800,myrobot,270,228,352,445 +frame3033.jpg,800,800,corobot,1,350,102,521 +frame3033.jpg,800,800,corobot,371,395,457,562 +frame3033.jpg,800,800,corobot,407,88,496,312 +frame3033.jpg,800,800,corobot,188,145,274,359 +frame3033.jpg,800,800,corobot,288,15,364,255 +frame3033.jpg,800,800,myrobot,277,260,344,446 +frame2033.jpg,800,800,corobot,159,88,249,322 +frame2033.jpg,800,800,corobot,116,409,210,570 +frame2033.jpg,800,800,corobot,246,473,333,630 +frame2033.jpg,800,800,corobot,341,344,426,527 +frame2033.jpg,800,800,corobot,514,155,628,361 +frame2033.jpg,800,800,myrobot,268,259,337,442 +frame2034.jpg,800,800,corobot,135,67,226,304 +frame2034.jpg,800,800,corobot,94,380,194,549 +frame2034.jpg,800,800,corobot,361,329,453,514 +frame2034.jpg,800,800,corobot,217,496,306,650 +frame2034.jpg,800,800,corobot,502,122,609,341 +frame2034.jpg,800,800,myrobot,270,255,340,444 +frame4041.jpg,800,800,corobot,120,1,212,246 +frame4041.jpg,800,800,corobot,336,132,411,300 +frame4041.jpg,800,800,corobot,67,398,163,564 +frame4041.jpg,800,800,corobot,365,432,454,592 +frame4041.jpg,800,800,corobot,505,323,606,491 +frame4041.jpg,800,800,myrobot,261,285,344,439 +frame1037.jpg,800,800,myrobot,269,235,354,446 +frame1037.jpg,800,800,corobot,2,183,116,393 +frame1037.jpg,800,800,corobot,152,68,246,302 +frame1037.jpg,800,800,corobot,441,105,539,334 +frame1037.jpg,800,800,corobot,193,364,280,536 +frame1037.jpg,800,800,corobot,390,501,485,653 +frame4033.jpg,800,800,corobot,106,1,203,249 +frame4033.jpg,800,800,corobot,67,402,169,565 +frame4033.jpg,800,800,corobot,335,124,412,319 +frame4033.jpg,800,800,corobot,362,424,454,594 +frame4033.jpg,800,800,corobot,513,323,625,502 +frame4033.jpg,800,800,myrobot,267,288,342,450 +frame3048.jpg,800,800,corobot,2,373,107,540 +frame3048.jpg,800,800,corobot,394,419,476,578 +frame3048.jpg,800,800,corobot,417,114,504,333 +frame3048.jpg,800,800,corobot,181,135,267,350 +frame3048.jpg,800,800,corobot,299,19,378,257 +frame3048.jpg,800,800,myrobot,275,263,344,443 +frame4034.jpg,800,800,corobot,162,1,255,226 +frame4034.jpg,800,800,corobot,112,350,207,527 +frame4034.jpg,800,800,corobot,361,173,449,382 +frame4034.jpg,800,800,corobot,488,273,584,428 +frame4034.jpg,800,800,corobot,417,423,500,560 +frame4034.jpg,800,800,myrobot,270,241,346,445 +frame3029.jpg,800,800,corobot,22,423,134,583 +frame3029.jpg,800,800,corobot,426,437,521,605 +frame3029.jpg,800,800,corobot,134,161,225,392 +frame3029.jpg,800,800,corobot,339,1,423,227 +frame3029.jpg,800,800,corobot,437,167,537,371 +frame3029.jpg,800,800,myrobot,274,235,344,444 +frame1024.jpg,800,800,corobot,7,197,123,408 +frame1024.jpg,800,800,corobot,162,63,246,307 +frame1024.jpg,800,800,corobot,187,373,273,535 +frame1024.jpg,800,800,corobot,441,123,542,332 +frame1024.jpg,800,800,corobot,394,491,481,652 +frame1024.jpg,800,800,myrobot,268,226,350,446 +frame4036.jpg,800,800,corobot,117,1,212,248 +frame4036.jpg,800,800,corobot,67,402,166,559 +frame4036.jpg,800,800,corobot,342,131,414,336 +frame4036.jpg,800,800,corobot,365,431,455,591 +frame4036.jpg,800,800,corobot,517,329,623,502 +frame4036.jpg,800,800,myrobot,264,240,346,446 +frame3025.jpg,800,800,corobot,3,360,123,529 +frame3025.jpg,800,800,corobot,365,422,452,596 +frame3025.jpg,800,800,corobot,423,107,525,326 +frame3025.jpg,800,800,corobot,279,5,363,257 +frame3025.jpg,800,800,corobot,189,150,271,379 +frame3025.jpg,800,800,myrobot,275,259,345,447 +frame4031.jpg,800,800,corobot,153,1,243,234 +frame4031.jpg,800,800,corobot,100,361,198,531 +frame4031.jpg,800,800,corobot,362,175,442,385 +frame4031.jpg,800,800,corobot,492,285,593,460 +frame4031.jpg,800,800,corobot,408,400,495,562 +frame4031.jpg,800,800,myrobot,266,233,345,447 +frame4029.jpg,800,800,corobot,86,370,186,537 +frame4029.jpg,800,800,corobot,152,4,237,241 +frame4029.jpg,800,800,corobot,357,181,438,379 +frame4029.jpg,800,800,corobot,496,278,589,454 +frame4029.jpg,800,800,corobot,392,404,488,580 +frame4029.jpg,800,800,myrobot,271,234,345,448 +frame1043.jpg,800,800,myrobot,269,232,354,446 +frame1043.jpg,800,800,corobot,4,172,116,384 +frame1043.jpg,800,800,corobot,179,52,267,292 +frame1043.jpg,800,800,corobot,459,86,558,317 +frame1043.jpg,800,800,corobot,196,370,282,541 +frame1043.jpg,800,800,corobot,413,483,511,640 +frame1034.jpg,800,800,corobot,9,191,123,403 +frame1034.jpg,800,800,corobot,141,75,232,313 +frame1034.jpg,800,800,corobot,433,119,523,341 +frame1034.jpg,800,800,corobot,186,350,270,522 +frame1034.jpg,800,800,corobot,394,500,485,654 +frame1034.jpg,800,800,myrobot,270,235,351,444 +frame4042.jpg,800,800,corobot,126,2,213,243 +frame4042.jpg,800,800,corobot,346,137,422,341 +frame4042.jpg,800,800,corobot,71,392,171,557 +frame4042.jpg,800,800,corobot,372,422,458,586 +frame4042.jpg,800,800,corobot,500,306,595,483 +frame4042.jpg,800,800,myrobot,262,240,344,441 +frame3044.jpg,800,800,corobot,5,380,113,545 +frame3044.jpg,800,800,corobot,406,434,493,591 +frame3044.jpg,800,800,corobot,430,133,520,346 +frame3044.jpg,800,800,corobot,160,154,245,368 +frame3044.jpg,800,800,corobot,316,5,394,241 +frame3044.jpg,800,800,myrobot,275,251,345,441 +frame4028.jpg,800,800,corobot,80,379,185,538 +frame4028.jpg,800,800,corobot,145,2,233,242 +frame4028.jpg,800,800,corobot,398,437,484,578 +frame4028.jpg,800,800,corobot,492,278,595,464 +frame4028.jpg,800,800,corobot,355,170,436,370 +frame4028.jpg,800,800,myrobot,269,233,346,446 +frame4044.jpg,800,800,corobot,107,2,201,250 +frame4044.jpg,800,800,corobot,343,125,414,323 +frame4044.jpg,800,800,myrobot,267,241,344,442 +frame4044.jpg,800,800,corobot,353,436,442,596 +frame4044.jpg,800,800,corobot,515,323,617,491 +frame4044.jpg,800,800,corobot,53,409,157,570 +frame4032.jpg,800,800,corobot,145,1,238,234 +frame4032.jpg,800,800,corobot,91,372,192,541 +frame4032.jpg,800,800,corobot,355,163,434,371 +frame4032.jpg,800,800,corobot,494,296,595,474 +frame4032.jpg,800,800,corobot,397,402,490,577 +frame4032.jpg,800,800,myrobot,267,237,347,449 +frame2024.jpg,800,800,myrobot,265,250,341,449 +frame2024.jpg,800,800,corobot,127,52,222,301 +frame2024.jpg,800,800,corobot,493,140,610,346 +frame2024.jpg,800,800,corobot,88,379,193,538 +frame2024.jpg,800,800,corobot,225,494,314,646 +frame2024.jpg,800,800,corobot,367,318,455,516 +frame4043.jpg,800,800,corobot,160,1,244,218 +frame4043.jpg,800,800,corobot,102,364,197,541 +frame4043.jpg,800,800,corobot,372,165,455,373 +frame4043.jpg,800,800,corobot,477,276,576,423 +frame4043.jpg,800,800,corobot,402,422,489,568 +frame4043.jpg,800,800,myrobot,269,239,345,441 +frame1047.jpg,800,800,myrobot,271,228,355,445 +frame1047.jpg,800,800,corobot,34,205,138,409 +frame1047.jpg,800,800,corobot,140,74,239,312 +frame1047.jpg,800,800,corobot,433,114,529,341 +frame1047.jpg,800,800,corobot,170,343,263,521 +frame1047.jpg,800,800,corobot,394,502,491,654 +frame4051.jpg,800,800,corobot,144,1,232,227 +frame4051.jpg,800,800,corobot,101,364,196,541 +frame4051.jpg,800,800,corobot,377,160,462,374 +frame4051.jpg,800,800,corobot,485,288,580,451 +frame4051.jpg,800,800,corobot,406,399,485,556 +frame4051.jpg,800,800,myrobot,268,234,344,444 +frame2049.jpg,800,800,corobot,105,32,203,274 +frame2049.jpg,800,800,corobot,480,83,588,313 +frame2049.jpg,800,800,corobot,74,339,179,517 +frame2049.jpg,800,800,corobot,385,316,477,505 +frame2049.jpg,800,800,corobot,185,506,280,657 +frame2049.jpg,800,800,myrobot,269,250,345,446 +frame3030.jpg,800,800,corobot,1,360,110,532 +frame3030.jpg,800,800,corobot,378,405,467,575 +frame3030.jpg,800,800,corobot,179,147,276,370 +frame3030.jpg,800,800,corobot,414,103,505,323 +frame3030.jpg,800,800,corobot,298,7,376,250 +frame3030.jpg,800,800,myrobot,279,257,345,441 +frame2026.jpg,800,800,myrobot,264,251,345,450 +frame2026.jpg,800,800,corobot,82,41,191,298 +frame2026.jpg,800,800,corobot,86,335,189,514 +frame2026.jpg,800,800,corobot,196,508,290,662 +frame2026.jpg,800,800,corobot,404,310,503,511 +frame2026.jpg,800,800,corobot,495,87,607,315 +frame4046.jpg,800,800,corobot,150,1,240,223 +frame4046.jpg,800,800,corobot,102,363,197,533 +frame4046.jpg,800,800,corobot,372,162,451,371 +frame4046.jpg,800,800,corobot,478,277,573,444 +frame4046.jpg,800,800,myrobot,268,237,344,441 +frame4046.jpg,800,800,corobot,408,404,478,561 +frame2027.jpg,800,800,corobot,135,54,235,308 +frame2027.jpg,800,800,corobot,96,388,196,555 +frame2027.jpg,800,800,corobot,497,140,612,349 +frame2027.jpg,800,800,corobot,236,485,322,641 +frame2027.jpg,800,800,corobot,357,326,446,518 +frame2027.jpg,800,800,myrobot,267,253,343,448 +frame1027.jpg,800,800,corobot,139,70,231,315 +frame1027.jpg,800,800,corobot,23,214,131,418 +frame1027.jpg,800,800,corobot,184,351,269,520 +frame1027.jpg,800,800,corobot,436,134,531,349 +frame1027.jpg,800,800,corobot,391,495,478,654 +frame1027.jpg,800,800,myrobot,269,232,353,449 +frame4027.jpg,800,800,corobot,67,437,176,588 +frame4027.jpg,800,800,corobot,347,407,427,585 +frame4027.jpg,800,800,corobot,517,319,626,496 +frame4027.jpg,800,800,corobot,87,4,182,243 +frame4027.jpg,800,800,corobot,347,112,421,322 +frame4027.jpg,800,800,myrobot,269,235,344,446 +frame1048.jpg,800,800,corobot,38,214,141,416 +frame1048.jpg,800,800,corobot,130,82,230,315 +frame1048.jpg,800,800,corobot,162,329,251,510 +frame1048.jpg,800,800,corobot,430,121,520,341 +frame1048.jpg,800,800,corobot,387,509,480,659 +frame1048.jpg,800,800,myrobot,272,233,354,444 +frame1033.jpg,800,800,myrobot,270,233,352,449 +frame1033.jpg,800,800,corobot,33,221,139,420 +frame1033.jpg,800,800,corobot,108,88,207,328 +frame1033.jpg,800,800,corobot,413,146,503,359 +frame1033.jpg,800,800,corobot,170,326,255,502 +frame1033.jpg,800,800,corobot,367,516,453,668 +frame1041.jpg,800,800,myrobot,267,233,355,446 +frame1041.jpg,800,800,corobot,12,185,125,396 +frame1041.jpg,800,800,corobot,161,61,254,300 +frame1041.jpg,800,800,corobot,449,100,545,328 +frame1041.jpg,800,800,corobot,194,365,281,536 +frame1041.jpg,800,800,corobot,401,491,498,650 +frame2047.jpg,800,800,corobot,133,65,228,305 +frame2047.jpg,800,800,corobot,101,377,203,549 +frame2047.jpg,800,800,corobot,505,118,617,339 +frame2047.jpg,800,800,corobot,233,473,321,631 +frame2047.jpg,800,800,corobot,357,342,448,527 +frame2047.jpg,800,800,myrobot,265,255,343,450 +frame2000.jpg,800,800,corobot,79,51,179,300 +frame2000.jpg,800,800,corobot,497,87,607,308 +frame2000.jpg,800,800,corobot,85,329,193,500 +frame2000.jpg,800,800,corobot,413,318,503,514 +frame2000.jpg,800,800,corobot,185,513,275,668 +frame2000.jpg,800,800,myrobot,266,254,342,449 +frame1049.jpg,800,800,myrobot,271,233,356,448 +frame1049.jpg,800,800,corobot,1,162,117,379 +frame1049.jpg,800,800,corobot,174,53,266,295 +frame1049.jpg,800,800,corobot,464,88,562,316 +frame1049.jpg,800,800,corobot,198,373,284,545 +frame1049.jpg,800,800,corobot,425,476,523,634 +frame1039.jpg,800,800,myrobot,269,232,353,448 +frame1039.jpg,800,800,corobot,44,218,146,422 +frame1039.jpg,800,800,corobot,116,86,212,323 +frame1039.jpg,800,800,corobot,171,328,257,510 +frame1039.jpg,800,800,corobot,413,136,508,355 +frame1039.jpg,800,800,corobot,357,524,449,676 +frame2043.jpg,800,800,corobot,102,39,200,271 +frame2043.jpg,800,800,corobot,485,95,591,316 +frame2043.jpg,800,800,corobot,71,336,176,517 +frame2043.jpg,800,800,corobot,385,317,478,500 +frame2043.jpg,800,800,corobot,192,509,290,659 +frame2043.jpg,800,800,myrobot,264,255,344,446 +frame2029.jpg,800,800,corobot,97,39,197,294 +frame2029.jpg,800,800,corobot,485,86,592,311 +frame2029.jpg,800,800,corobot,78,342,182,516 +frame2029.jpg,800,800,corobot,392,309,485,508 +frame2029.jpg,800,800,corobot,194,514,281,664 +frame2029.jpg,800,800,myrobot,269,251,342,448 +frame3032.jpg,800,800,corobot,17,405,129,574 +frame3032.jpg,800,800,corobot,414,438,504,599 +frame3032.jpg,800,800,corobot,133,172,225,386 +frame3032.jpg,800,800,corobot,436,147,530,359 +frame3032.jpg,800,800,corobot,335,3,417,225 +frame3032.jpg,800,800,myrobot,274,231,345,443 +frame4000.jpg,800,800,corobot,94,1,191,246 +frame4000.jpg,800,800,corobot,351,111,425,322 +frame4000.jpg,800,800,corobot,75,446,186,597 +frame4000.jpg,800,800,corobot,339,410,416,587 +frame4000.jpg,800,800,corobot,517,334,635,516 +frame4000.jpg,800,800,myrobot,264,228,344,446 +frame4039.jpg,800,800,corobot,129,1,219,242 +frame4039.jpg,800,800,corobot,76,391,171,555 +frame4039.jpg,800,800,corobot,345,124,419,322 +frame4039.jpg,800,800,myrobot,267,242,343,445 +frame4039.jpg,800,800,corobot,374,424,458,584 +frame4039.jpg,800,800,corobot,511,319,617,496 +frame2039.jpg,800,800,corobot,152,91,243,321 +frame2039.jpg,800,800,corobot,115,400,211,563 +frame2039.jpg,800,800,corobot,247,467,331,630 +frame2039.jpg,800,800,corobot,349,348,437,531 +frame2039.jpg,800,800,corobot,524,157,641,368 +frame2039.jpg,800,800,myrobot,267,255,342,446 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff136cd501b909ee13d2539b2d3bf2c9c8e7f4dd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a2ff210d88df30ca9123cf2dc769a8221743c59 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/tf_example_decoder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/tf_example_decoder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e75d46780c913b741e5e61dd8781c5eb332ca0c5 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/tf_example_decoder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/tf_sequence_example_decoder.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/tf_sequence_example_decoder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c60425f4c9395292782cf77dba79205a6cb5386 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/__pycache__/tf_sequence_example_decoder.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a55e41abb3c78a3e631084ef8fbce892764286 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder.py @@ -0,0 +1,881 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensorflow Example proto decoder for object detection. + +A decoder to decode string tensors containing serialized tensorflow.Example +protos for object detection. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import enum +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf +from tf_slim import tfexample_decoder as slim_example_decoder +from object_detection.core import data_decoder +from object_detection.core import standard_fields as fields +from object_detection.protos import input_reader_pb2 +from object_detection.utils import label_map_util +from object_detection.utils import shape_utils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import lookup as contrib_lookup + +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +_LABEL_OFFSET = 1 + + +class Visibility(enum.Enum): + """Visibility definitions. + + This follows the MS Coco convention (http://cocodataset.org/#format-data). + """ + # Keypoint is not labeled. + UNLABELED = 0 + # Keypoint is labeled but falls outside the object segment (e.g. occluded). + NOT_VISIBLE = 1 + # Keypoint is labeled and visible. + VISIBLE = 2 + + +class _ClassTensorHandler(slim_example_decoder.Tensor): + """An ItemHandler to fetch class ids from class text.""" + + def __init__(self, + tensor_key, + label_map_proto_file, + shape_keys=None, + shape=None, + default_value=''): + """Initializes the LookupTensor handler. + + Simply calls a vocabulary (most often, a label mapping) lookup. + + Args: + tensor_key: the name of the `TFExample` feature to read the tensor from. + label_map_proto_file: File path to a text format LabelMapProto message + mapping class text to id. + shape_keys: Optional name or list of names of the TF-Example feature in + which the tensor shape is stored. If a list, then each corresponds to + one dimension of the shape. + shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is + reshaped accordingly. + default_value: The value used when the `tensor_key` is not found in a + particular `TFExample`. + + Raises: + ValueError: if both `shape_keys` and `shape` are specified. + """ + name_to_id = label_map_util.get_label_map_dict( + label_map_proto_file, use_display_name=False) + # We use a default_value of -1, but we expect all labels to be contained + # in the label map. + try: + # Dynamically try to load the tf v2 lookup, falling back to contrib + lookup = tf.compat.v2.lookup + hash_table_class = tf.compat.v2.lookup.StaticHashTable + except AttributeError: + lookup = contrib_lookup + hash_table_class = contrib_lookup.HashTable + name_to_id_table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=tf.constant(list(name_to_id.keys())), + values=tf.constant(list(name_to_id.values()), dtype=tf.int64)), + default_value=-1) + display_name_to_id = label_map_util.get_label_map_dict( + label_map_proto_file, use_display_name=True) + # We use a default_value of -1, but we expect all labels to be contained + # in the label map. + display_name_to_id_table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=tf.constant(list(display_name_to_id.keys())), + values=tf.constant( + list(display_name_to_id.values()), dtype=tf.int64)), + default_value=-1) + + self._name_to_id_table = name_to_id_table + self._display_name_to_id_table = display_name_to_id_table + super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape, + default_value) + + def tensors_to_item(self, keys_to_tensors): + unmapped_tensor = super(_ClassTensorHandler, + self).tensors_to_item(keys_to_tensors) + return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor), + self._display_name_to_id_table.lookup(unmapped_tensor)) + + +class TfExampleDecoder(data_decoder.DataDecoder): + """Tensorflow Example proto decoder.""" + + def __init__(self, + load_instance_masks=False, + instance_mask_type=input_reader_pb2.NUMERICAL_MASKS, + label_map_proto_file=None, + use_display_name=False, + dct_method='', + num_keypoints=0, + num_additional_channels=0, + load_multiclass_scores=False, + load_context_features=False, + expand_hierarchy_labels=False, + load_dense_pose=False, + load_track_id=False): + """Constructor sets keys_to_features and items_to_handlers. + + Args: + load_instance_masks: whether or not to load and handle instance masks. + instance_mask_type: type of instance masks. Options are provided in + input_reader.proto. This is only used if `load_instance_masks` is True. + label_map_proto_file: a file path to a + object_detection.protos.StringIntLabelMap proto. If provided, then the + mapped IDs of 'image/object/class/text' will take precedence over the + existing 'image/object/class/label' ID. Also, if provided, it is + assumed that 'image/object/class/text' will be in the data. + use_display_name: whether or not to use the `display_name` for label + mapping (instead of `name`). Only used if label_map_proto_file is + provided. + dct_method: An optional string. Defaults to None. It only takes + effect when image format is jpeg, used to specify a hint about the + algorithm used for jpeg decompression. Currently valid values + are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for + example, the jpeg library does not have that specific option. + num_keypoints: the number of keypoints per object. + num_additional_channels: how many additional channels to use. + load_multiclass_scores: Whether to load multiclass scores associated with + boxes. + load_context_features: Whether to load information from context_features, + to provide additional context to a detection model for training and/or + inference. + expand_hierarchy_labels: Expands the object and image labels taking into + account the provided hierarchy in the label_map_proto_file. For positive + classes, the labels are extended to ancestor. For negative classes, + the labels are expanded to descendants. + load_dense_pose: Whether to load DensePose annotations. + load_track_id: Whether to load tracking annotations. + + Raises: + ValueError: If `instance_mask_type` option is not one of + input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or + input_reader_pb2.PNG_MASKS. + ValueError: If `expand_labels_hierarchy` is True, but the + `label_map_proto_file` is not provided. + """ + # TODO(rathodv): delete unused `use_display_name` argument once we change + # other decoders to handle label maps similarly. + del use_display_name + self.keys_to_features = { + 'image/encoded': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/format': + tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/filename': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/key/sha256': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/source_id': + tf.FixedLenFeature((), tf.string, default_value=''), + 'image/height': + tf.FixedLenFeature((), tf.int64, default_value=1), + 'image/width': + tf.FixedLenFeature((), tf.int64, default_value=1), + # Image-level labels. + 'image/class/text': + tf.VarLenFeature(tf.string), + 'image/class/label': + tf.VarLenFeature(tf.int64), + 'image/neg_category_ids': + tf.VarLenFeature(tf.int64), + 'image/not_exhaustive_category_ids': + tf.VarLenFeature(tf.int64), + 'image/class/confidence': + tf.VarLenFeature(tf.float32), + # Object boxes and classes. + 'image/object/bbox/xmin': + tf.VarLenFeature(tf.float32), + 'image/object/bbox/xmax': + tf.VarLenFeature(tf.float32), + 'image/object/bbox/ymin': + tf.VarLenFeature(tf.float32), + 'image/object/bbox/ymax': + tf.VarLenFeature(tf.float32), + 'image/object/class/label': + tf.VarLenFeature(tf.int64), + 'image/object/class/text': + tf.VarLenFeature(tf.string), + 'image/object/area': + tf.VarLenFeature(tf.float32), + 'image/object/is_crowd': + tf.VarLenFeature(tf.int64), + 'image/object/difficult': + tf.VarLenFeature(tf.int64), + 'image/object/group_of': + tf.VarLenFeature(tf.int64), + 'image/object/weight': + tf.VarLenFeature(tf.float32), + + } + # We are checking `dct_method` instead of passing it directly in order to + # ensure TF version 1.6 compatibility. + if dct_method: + image = slim_example_decoder.Image( + image_key='image/encoded', + format_key='image/format', + channels=3, + dct_method=dct_method) + additional_channel_image = slim_example_decoder.Image( + image_key='image/additional_channels/encoded', + format_key='image/format', + channels=1, + repeated=True, + dct_method=dct_method) + else: + image = slim_example_decoder.Image( + image_key='image/encoded', format_key='image/format', channels=3) + additional_channel_image = slim_example_decoder.Image( + image_key='image/additional_channels/encoded', + format_key='image/format', + channels=1, + repeated=True) + self.items_to_handlers = { + fields.InputDataFields.image: + image, + fields.InputDataFields.source_id: ( + slim_example_decoder.Tensor('image/source_id')), + fields.InputDataFields.key: ( + slim_example_decoder.Tensor('image/key/sha256')), + fields.InputDataFields.filename: ( + slim_example_decoder.Tensor('image/filename')), + # Image-level labels. + fields.InputDataFields.groundtruth_image_confidences: ( + slim_example_decoder.Tensor('image/class/confidence')), + fields.InputDataFields.groundtruth_verified_neg_classes: ( + slim_example_decoder.Tensor('image/neg_category_ids')), + fields.InputDataFields.groundtruth_not_exhaustive_classes: ( + slim_example_decoder.Tensor('image/not_exhaustive_category_ids')), + # Object boxes and classes. + fields.InputDataFields.groundtruth_boxes: ( + slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], + 'image/object/bbox/')), + fields.InputDataFields.groundtruth_area: + slim_example_decoder.Tensor('image/object/area'), + fields.InputDataFields.groundtruth_is_crowd: ( + slim_example_decoder.Tensor('image/object/is_crowd')), + fields.InputDataFields.groundtruth_difficult: ( + slim_example_decoder.Tensor('image/object/difficult')), + fields.InputDataFields.groundtruth_group_of: ( + slim_example_decoder.Tensor('image/object/group_of')), + fields.InputDataFields.groundtruth_weights: ( + slim_example_decoder.Tensor('image/object/weight')), + + } + if load_multiclass_scores: + self.keys_to_features[ + 'image/object/class/multiclass_scores'] = tf.VarLenFeature(tf.float32) + self.items_to_handlers[fields.InputDataFields.multiclass_scores] = ( + slim_example_decoder.Tensor('image/object/class/multiclass_scores')) + + if load_context_features: + self.keys_to_features[ + 'image/context_features'] = tf.VarLenFeature(tf.float32) + self.items_to_handlers[fields.InputDataFields.context_features] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/context_features', 'image/context_feature_length'], + self._reshape_context_features)) + + self.keys_to_features[ + 'image/context_feature_length'] = tf.FixedLenFeature((), tf.int64) + self.items_to_handlers[fields.InputDataFields.context_feature_length] = ( + slim_example_decoder.Tensor('image/context_feature_length')) + + if num_additional_channels > 0: + self.keys_to_features[ + 'image/additional_channels/encoded'] = tf.FixedLenFeature( + (num_additional_channels,), tf.string) + self.items_to_handlers[ + fields.InputDataFields. + image_additional_channels] = additional_channel_image + self._num_keypoints = num_keypoints + if num_keypoints > 0: + self.keys_to_features['image/object/keypoint/x'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/keypoint/y'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/keypoint/visibility'] = ( + tf.VarLenFeature(tf.int64)) + self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/keypoint/y', 'image/object/keypoint/x'], + self._reshape_keypoints)) + kpt_vis_field = fields.InputDataFields.groundtruth_keypoint_visibilities + self.items_to_handlers[kpt_vis_field] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/keypoint/x', 'image/object/keypoint/visibility'], + self._reshape_keypoint_visibilities)) + if load_instance_masks: + if instance_mask_type in (input_reader_pb2.DEFAULT, + input_reader_pb2.NUMERICAL_MASKS): + self.keys_to_features['image/object/mask'] = ( + tf.VarLenFeature(tf.float32)) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_instance_masks] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/mask', 'image/height', 'image/width'], + self._reshape_instance_masks)) + elif instance_mask_type == input_reader_pb2.PNG_MASKS: + self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_instance_masks] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/mask', 'image/height', 'image/width'], + self._decode_png_instance_masks)) + else: + raise ValueError('Did not recognize the `instance_mask_type` option.') + if load_dense_pose: + self.keys_to_features['image/object/densepose/num'] = ( + tf.VarLenFeature(tf.int64)) + self.keys_to_features['image/object/densepose/part_index'] = ( + tf.VarLenFeature(tf.int64)) + self.keys_to_features['image/object/densepose/x'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/densepose/y'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/densepose/u'] = ( + tf.VarLenFeature(tf.float32)) + self.keys_to_features['image/object/densepose/v'] = ( + tf.VarLenFeature(tf.float32)) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_dp_num_points] = ( + slim_example_decoder.Tensor('image/object/densepose/num')) + self.items_to_handlers[fields.InputDataFields.groundtruth_dp_part_ids] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/densepose/part_index', + 'image/object/densepose/num'], self._dense_pose_part_indices)) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_dp_surface_coords] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/object/densepose/x', 'image/object/densepose/y', + 'image/object/densepose/u', 'image/object/densepose/v', + 'image/object/densepose/num'], + self._dense_pose_surface_coordinates)) + if load_track_id: + self.keys_to_features['image/object/track/label'] = ( + tf.VarLenFeature(tf.int64)) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_track_ids] = ( + slim_example_decoder.Tensor('image/object/track/label')) + + if label_map_proto_file: + # If the label_map_proto is provided, try to use it in conjunction with + # the class text, and fall back to a materialized ID. + label_handler = slim_example_decoder.BackupHandler( + _ClassTensorHandler( + 'image/object/class/text', label_map_proto_file, + default_value=''), + slim_example_decoder.Tensor('image/object/class/label')) + image_label_handler = slim_example_decoder.BackupHandler( + _ClassTensorHandler( + fields.TfExampleFields.image_class_text, + label_map_proto_file, + default_value=''), + slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label)) + else: + label_handler = slim_example_decoder.Tensor('image/object/class/label') + image_label_handler = slim_example_decoder.Tensor( + fields.TfExampleFields.image_class_label) + self.items_to_handlers[ + fields.InputDataFields.groundtruth_classes] = label_handler + self.items_to_handlers[ + fields.InputDataFields.groundtruth_image_classes] = image_label_handler + + self._expand_hierarchy_labels = expand_hierarchy_labels + self._ancestors_lut = None + self._descendants_lut = None + if expand_hierarchy_labels: + if label_map_proto_file: + ancestors_lut, descendants_lut = ( + label_map_util.get_label_map_hierarchy_lut(label_map_proto_file, + True)) + self._ancestors_lut = tf.constant(ancestors_lut, dtype=tf.int64) + self._descendants_lut = tf.constant(descendants_lut, dtype=tf.int64) + else: + raise ValueError('In order to expand labels, the label_map_proto_file ' + 'has to be provided.') + + def decode(self, tf_example_string_tensor): + """Decodes serialized tensorflow example and returns a tensor dictionary. + + Args: + tf_example_string_tensor: a string tensor holding a serialized tensorflow + example proto. + + Returns: + A dictionary of the following tensors. + fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3] + containing image. + fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of + shape [2] containing shape of the image. + fields.InputDataFields.source_id - string tensor containing original + image id. + fields.InputDataFields.key - string tensor with unique sha256 hash key. + fields.InputDataFields.filename - string tensor with original dataset + filename. + fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape + [None, 4] containing box corners. + fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape + [None] containing classes for the boxes. + fields.InputDataFields.groundtruth_weights - 1D float32 tensor of + shape [None] indicating the weights of groundtruth boxes. + fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape + [None] containing containing object mask area in pixel squared. + fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape + [None] indicating if the boxes enclose a crowd. + + Optional: + fields.InputDataFields.groundtruth_image_confidences - 1D float tensor of + shape [None] indicating if a class is present in the image (1.0) or + a class is not present in the image (0.0). + fields.InputDataFields.image_additional_channels - 3D uint8 tensor of + shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim + is width; 3rd dim is the number of additional channels. + fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape + [None] indicating if the boxes represent `difficult` instances. + fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape + [None] indicating if the boxes represent `group_of` instances. + fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of + shape [None, num_keypoints, 2] containing keypoints, where the + coordinates of the keypoints are ordered (y, x). + fields.InputDataFields.groundtruth_keypoint_visibilities - 2D bool + tensor of shape [None, num_keypoints] containing keypoint visibilites. + fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of + shape [None, None, None] containing instance masks. + fields.InputDataFields.groundtruth_image_classes - 1D int64 of shape + [None] containing classes for the boxes. + fields.InputDataFields.multiclass_scores - 1D float32 tensor of shape + [None * num_classes] containing flattened multiclass scores for + groundtruth boxes. + fields.InputDataFields.context_features - 1D float32 tensor of shape + [context_feature_length * num_context_features] + fields.InputDataFields.context_feature_length - int32 tensor specifying + the length of each feature in context_features + """ + serialized_example = tf.reshape(tf_example_string_tensor, shape=[]) + decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, + self.items_to_handlers) + keys = decoder.list_items() + tensors = decoder.decode(serialized_example, items=keys) + tensor_dict = dict(zip(keys, tensors)) + is_crowd = fields.InputDataFields.groundtruth_is_crowd + tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool) + tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3]) + tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape( + tensor_dict[fields.InputDataFields.image])[:2] + + if fields.InputDataFields.image_additional_channels in tensor_dict: + channels = tensor_dict[fields.InputDataFields.image_additional_channels] + channels = tf.squeeze(channels, axis=3) + channels = tf.transpose(channels, perm=[1, 2, 0]) + tensor_dict[fields.InputDataFields.image_additional_channels] = channels + + def default_groundtruth_weights(): + return tf.ones( + [tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]], + dtype=tf.float32) + + tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond( + tf.greater( + tf.shape( + tensor_dict[fields.InputDataFields.groundtruth_weights])[0], + 0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights], + default_groundtruth_weights) + + if fields.InputDataFields.groundtruth_keypoints in tensor_dict: + # Set all keypoints that are not labeled to NaN. + gt_kpt_fld = fields.InputDataFields.groundtruth_keypoints + gt_kpt_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities + visibilities_tiled = tf.tile( + tf.expand_dims(tensor_dict[gt_kpt_vis_fld], -1), + [1, 1, 2]) + tensor_dict[gt_kpt_fld] = tf.where( + visibilities_tiled, + tensor_dict[gt_kpt_fld], + np.nan * tf.ones_like(tensor_dict[gt_kpt_fld])) + + if self._expand_hierarchy_labels: + input_fields = fields.InputDataFields + image_classes, image_confidences = self._expand_image_label_hierarchy( + tensor_dict[input_fields.groundtruth_image_classes], + tensor_dict[input_fields.groundtruth_image_confidences]) + tensor_dict[input_fields.groundtruth_image_classes] = image_classes + tensor_dict[input_fields.groundtruth_image_confidences] = ( + image_confidences) + + box_fields = [ + fields.InputDataFields.groundtruth_group_of, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_weights, + ] + + def expand_field(field_name): + return self._expansion_box_field_labels( + tensor_dict[input_fields.groundtruth_classes], + tensor_dict[field_name]) + + # pylint: disable=cell-var-from-loop + for field in box_fields: + if field in tensor_dict: + tensor_dict[field] = tf.cond( + tf.size(tensor_dict[field]) > 0, lambda: expand_field(field), + lambda: tensor_dict[field]) + # pylint: enable=cell-var-from-loop + + tensor_dict[input_fields.groundtruth_classes] = ( + self._expansion_box_field_labels( + tensor_dict[input_fields.groundtruth_classes], + tensor_dict[input_fields.groundtruth_classes], True)) + + if fields.InputDataFields.groundtruth_group_of in tensor_dict: + group_of = fields.InputDataFields.groundtruth_group_of + tensor_dict[group_of] = tf.cast(tensor_dict[group_of], dtype=tf.bool) + + if fields.InputDataFields.groundtruth_dp_num_points in tensor_dict: + tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] = tf.cast( + tensor_dict[fields.InputDataFields.groundtruth_dp_num_points], + dtype=tf.int32) + tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] = tf.cast( + tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids], + dtype=tf.int32) + + if fields.InputDataFields.groundtruth_track_ids in tensor_dict: + tensor_dict[fields.InputDataFields.groundtruth_track_ids] = tf.cast( + tensor_dict[fields.InputDataFields.groundtruth_track_ids], + dtype=tf.int32) + + return tensor_dict + + def _reshape_keypoints(self, keys_to_tensors): + """Reshape keypoints. + + The keypoints are reshaped to [num_instances, num_keypoints, 2]. + + Args: + keys_to_tensors: a dictionary from keys to tensors. Expected keys are: + 'image/object/keypoint/x' + 'image/object/keypoint/y' + + Returns: + A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values + in [0, 1]. + """ + y = keys_to_tensors['image/object/keypoint/y'] + if isinstance(y, tf.SparseTensor): + y = tf.sparse_tensor_to_dense(y) + y = tf.expand_dims(y, 1) + x = keys_to_tensors['image/object/keypoint/x'] + if isinstance(x, tf.SparseTensor): + x = tf.sparse_tensor_to_dense(x) + x = tf.expand_dims(x, 1) + keypoints = tf.concat([y, x], 1) + keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2]) + return keypoints + + def _reshape_keypoint_visibilities(self, keys_to_tensors): + """Reshape keypoint visibilities. + + The keypoint visibilities are reshaped to [num_instances, + num_keypoints]. + + The raw keypoint visibilities are expected to conform to the + MSCoco definition. See Visibility enum. + + The returned boolean is True for the labeled case (either + Visibility.NOT_VISIBLE or Visibility.VISIBLE). These are the same categories + that COCO uses to evaluate keypoint detection performance: + http://cocodataset.org/#keypoints-eval + + If image/object/keypoint/visibility is not provided, visibilities will be + set to True for finite keypoint coordinate values, and 0 if the coordinates + are NaN. + + Args: + keys_to_tensors: a dictionary from keys to tensors. Expected keys are: + 'image/object/keypoint/x' + 'image/object/keypoint/visibility' + + Returns: + A 2-D bool tensor of shape [num_instances, num_keypoints] with values + in {0, 1}. 1 if the keypoint is labeled, 0 otherwise. + """ + x = keys_to_tensors['image/object/keypoint/x'] + vis = keys_to_tensors['image/object/keypoint/visibility'] + if isinstance(vis, tf.SparseTensor): + vis = tf.sparse_tensor_to_dense(vis) + if isinstance(x, tf.SparseTensor): + x = tf.sparse_tensor_to_dense(x) + + default_vis = tf.where( + tf.math.is_nan(x), + Visibility.UNLABELED.value * tf.ones_like(x, dtype=tf.int64), + Visibility.VISIBLE.value * tf.ones_like(x, dtype=tf.int64)) + # Use visibility if provided, otherwise use the default visibility. + vis = tf.cond(tf.equal(tf.size(x), tf.size(vis)), + true_fn=lambda: vis, + false_fn=lambda: default_vis) + vis = tf.math.logical_or( + tf.math.equal(vis, Visibility.NOT_VISIBLE.value), + tf.math.equal(vis, Visibility.VISIBLE.value)) + vis = tf.reshape(vis, [-1, self._num_keypoints]) + return vis + + def _reshape_instance_masks(self, keys_to_tensors): + """Reshape instance segmentation masks. + + The instance segmentation masks are reshaped to [num_instances, height, + width]. + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 3-D float tensor of shape [num_instances, height, width] with values + in {0, 1}. + """ + height = keys_to_tensors['image/height'] + width = keys_to_tensors['image/width'] + to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32) + masks = keys_to_tensors['image/object/mask'] + if isinstance(masks, tf.SparseTensor): + masks = tf.sparse_tensor_to_dense(masks) + masks = tf.reshape( + tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape) + return tf.cast(masks, tf.float32) + + def _reshape_context_features(self, keys_to_tensors): + """Reshape context features. + + The instance context_features are reshaped to + [num_context_features, context_feature_length] + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 2-D float tensor of shape [num_context_features, context_feature_length] + """ + context_feature_length = keys_to_tensors['image/context_feature_length'] + to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32) + context_features = keys_to_tensors['image/context_features'] + if isinstance(context_features, tf.SparseTensor): + context_features = tf.sparse_tensor_to_dense(context_features) + context_features = tf.reshape(context_features, to_shape) + return context_features + + def _decode_png_instance_masks(self, keys_to_tensors): + """Decode PNG instance segmentation masks and stack into dense tensor. + + The instance segmentation masks are reshaped to [num_instances, height, + width]. + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 3-D float tensor of shape [num_instances, height, width] with values + in {0, 1}. + """ + + def decode_png_mask(image_buffer): + image = tf.squeeze( + tf.image.decode_image(image_buffer, channels=1), axis=2) + image.set_shape([None, None]) + image = tf.cast(tf.greater(image, 0), dtype=tf.float32) + return image + + png_masks = keys_to_tensors['image/object/mask'] + height = keys_to_tensors['image/height'] + width = keys_to_tensors['image/width'] + if isinstance(png_masks, tf.SparseTensor): + png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='') + return tf.cond( + tf.greater(tf.size(png_masks), 0), + lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32), + lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32))) + + def _dense_pose_part_indices(self, keys_to_tensors): + """Creates a tensor that contains part indices for each DensePose point. + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 2-D int32 tensor of shape [num_instances, num_points] where each element + contains the DensePose part index (0-23). The value `num_points` + corresponds to the maximum number of sampled points across all instances + in the image. Note that instances with less sampled points will be padded + with zeros in the last dimension. + """ + num_points_per_instances = keys_to_tensors['image/object/densepose/num'] + part_index = keys_to_tensors['image/object/densepose/part_index'] + if isinstance(num_points_per_instances, tf.SparseTensor): + num_points_per_instances = tf.sparse_tensor_to_dense( + num_points_per_instances) + if isinstance(part_index, tf.SparseTensor): + part_index = tf.sparse_tensor_to_dense(part_index) + part_index = tf.cast(part_index, dtype=tf.int32) + max_points_per_instance = tf.cast( + tf.math.reduce_max(num_points_per_instances), dtype=tf.int32) + num_points_cumulative = tf.concat([ + [0], tf.math.cumsum(num_points_per_instances)], axis=0) + + def pad_parts_tensor(instance_ind): + points_range_start = num_points_cumulative[instance_ind] + points_range_end = num_points_cumulative[instance_ind + 1] + part_inds = part_index[points_range_start:points_range_end] + return shape_utils.pad_or_clip_nd(part_inds, + output_shape=[max_points_per_instance]) + + return tf.map_fn(pad_parts_tensor, + tf.range(tf.size(num_points_per_instances)), + dtype=tf.int32) + + def _dense_pose_surface_coordinates(self, keys_to_tensors): + """Creates a tensor that contains surface coords for each DensePose point. + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 3-D float32 tensor of shape [num_instances, num_points, 4] where each + point contains (y, x, v, u) data for each sampled DensePose point. The + (y, x) coordinate has normalized image locations for the point, and (v, u) + contains the surface coordinate (also normalized) for the part. The value + `num_points` corresponds to the maximum number of sampled points across + all instances in the image. Note that instances with less sampled points + will be padded with zeros in dim=1. + """ + num_points_per_instances = keys_to_tensors['image/object/densepose/num'] + dp_y = keys_to_tensors['image/object/densepose/y'] + dp_x = keys_to_tensors['image/object/densepose/x'] + dp_v = keys_to_tensors['image/object/densepose/v'] + dp_u = keys_to_tensors['image/object/densepose/u'] + if isinstance(num_points_per_instances, tf.SparseTensor): + num_points_per_instances = tf.sparse_tensor_to_dense( + num_points_per_instances) + if isinstance(dp_y, tf.SparseTensor): + dp_y = tf.sparse_tensor_to_dense(dp_y) + if isinstance(dp_x, tf.SparseTensor): + dp_x = tf.sparse_tensor_to_dense(dp_x) + if isinstance(dp_v, tf.SparseTensor): + dp_v = tf.sparse_tensor_to_dense(dp_v) + if isinstance(dp_u, tf.SparseTensor): + dp_u = tf.sparse_tensor_to_dense(dp_u) + max_points_per_instance = tf.cast( + tf.math.reduce_max(num_points_per_instances), dtype=tf.int32) + num_points_cumulative = tf.concat([ + [0], tf.math.cumsum(num_points_per_instances)], axis=0) + + def pad_surface_coordinates_tensor(instance_ind): + """Pads DensePose surface coordinates for each instance.""" + points_range_start = num_points_cumulative[instance_ind] + points_range_end = num_points_cumulative[instance_ind + 1] + y = dp_y[points_range_start:points_range_end] + x = dp_x[points_range_start:points_range_end] + v = dp_v[points_range_start:points_range_end] + u = dp_u[points_range_start:points_range_end] + # Create [num_points_i, 4] tensor, where num_points_i is the number of + # sampled points for instance i. + unpadded_tensor = tf.stack([y, x, v, u], axis=1) + return shape_utils.pad_or_clip_nd( + unpadded_tensor, output_shape=[max_points_per_instance, 4]) + + return tf.map_fn(pad_surface_coordinates_tensor, + tf.range(tf.size(num_points_per_instances)), + dtype=tf.float32) + + def _expand_image_label_hierarchy(self, image_classes, image_confidences): + """Expand image level labels according to the hierarchy. + + Args: + image_classes: Int64 tensor with the image level class ids for a sample. + image_confidences: Float tensor signaling whether a class id is present in + the image (1.0) or not present (0.0). + + Returns: + new_image_classes: Int64 tensor equal to expanding image_classes. + new_image_confidences: Float tensor equal to expanding image_confidences. + """ + + def expand_labels(relation_tensor, confidence_value): + """Expand to ancestors or descendants depending on arguments.""" + mask = tf.equal(image_confidences, confidence_value) + target_image_classes = tf.boolean_mask(image_classes, mask) + expanded_indices = tf.reduce_any((tf.gather( + relation_tensor, target_image_classes - _LABEL_OFFSET, axis=0) > 0), + axis=0) + expanded_indices = tf.where(expanded_indices)[:, 0] + _LABEL_OFFSET + new_groundtruth_image_classes = ( + tf.concat([ + tf.boolean_mask(image_classes, tf.logical_not(mask)), + expanded_indices, + ], + axis=0)) + new_groundtruth_image_confidences = ( + tf.concat([ + tf.boolean_mask(image_confidences, tf.logical_not(mask)), + tf.ones([tf.shape(expanded_indices)[0]], + dtype=image_confidences.dtype) * confidence_value, + ], + axis=0)) + return new_groundtruth_image_classes, new_groundtruth_image_confidences + + image_classes, image_confidences = expand_labels(self._ancestors_lut, 1.0) + new_image_classes, new_image_confidences = expand_labels( + self._descendants_lut, 0.0) + return new_image_classes, new_image_confidences + + def _expansion_box_field_labels(self, + object_classes, + object_field, + copy_class_id=False): + """Expand the labels of a specific object field according to the hierarchy. + + Args: + object_classes: Int64 tensor with the class id for each element in + object_field. + object_field: Tensor to be expanded. + copy_class_id: Boolean to choose whether to use class id values in the + output tensor instead of replicating the original values. + + Returns: + A tensor with the result of expanding object_field. + """ + expanded_indices = tf.gather( + self._ancestors_lut, object_classes - _LABEL_OFFSET, axis=0) + if copy_class_id: + new_object_field = tf.where(expanded_indices > 0)[:, 1] + _LABEL_OFFSET + else: + new_object_field = tf.repeat( + object_field, tf.reduce_sum(expanded_indices, axis=1), axis=0) + return new_object_field diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dca6324a61f413bf5655864e8a7541c18ade21a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..438f382c9e0dbab796061ce771445f057420f250 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_example_decoder_test.py @@ -0,0 +1,1532 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.data_decoders.tf_example_decoder.""" + +import os +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.protos import input_reader_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import test_case + + +class TfExampleDecoderTest(test_case.TestCase): + + def _create_encoded_and_decoded_data(self, data, encoding_type): + if encoding_type == 'jpeg': + encode_fn = tf.image.encode_jpeg + decode_fn = tf.image.decode_jpeg + elif encoding_type == 'png': + encode_fn = tf.image.encode_png + decode_fn = tf.image.decode_png + else: + raise ValueError('Invalid encoding type.') + + def prepare_data_fn(): + encoded_data = encode_fn(data) + decoded_data = decode_fn(encoded_data) + return encoded_data, decoded_data + + return self.execute_cpu(prepare_data_fn, []) + + def testDecodeAdditionalChannels(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg') + + additional_channel = np.random.randint(256, size=(4, 5, 1)).astype(np.uint8) + (encoded_additional_channel, + decoded_additional_channel) = self._create_encoded_and_decoded_data( + additional_channel, 'jpeg') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/additional_channels/encoded': + dataset_util.bytes_list_feature( + [encoded_additional_channel] * 2), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + num_additional_channels=2) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + np.concatenate([decoded_additional_channel] * 2, axis=2), + tensor_dict[fields.InputDataFields.image_additional_channels]) + + def testDecodeJpegImage(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, decoded_jpeg = self._create_encoded_and_decoded_data( + image, 'jpeg') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual( + (output[fields.InputDataFields.image].get_shape().as_list()), + [None, None, 3]) + self.assertAllEqual( + (output[fields.InputDataFields.original_image_spatial_shape] + .get_shape().as_list()), [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image]) + self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields. + original_image_spatial_shape]) + self.assertEqual( + six.b('image_id'), tensor_dict[fields.InputDataFields.source_id]) + + def testDecodeImageKeyAndFilename(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/key/sha256': + dataset_util.bytes_feature(six.b('abc')), + 'image/filename': + dataset_util.bytes_feature(six.b('filename')) + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertEqual(six.b('abc'), tensor_dict[fields.InputDataFields.key]) + self.assertEqual( + six.b('filename'), tensor_dict[fields.InputDataFields.filename]) + + def testDecodePngImage(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_png, decoded_png = self._create_encoded_and_decoded_data( + image, 'png') + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_png), + 'image/format': + dataset_util.bytes_feature(six.b('png')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')) + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual( + (output[fields.InputDataFields.image].get_shape().as_list()), + [None, None, 3]) + self.assertAllEqual( + (output[fields.InputDataFields.original_image_spatial_shape] + .get_shape().as_list()), [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image]) + self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields. + original_image_spatial_shape]) + self.assertEqual( + six.b('image_id'), tensor_dict[fields.InputDataFields.source_id]) + + def testDecodePngInstanceMasks(self): + image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_png, _ = self._create_encoded_and_decoded_data(image, 'png') + mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8) + mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8) + encoded_png_1, _ = self._create_encoded_and_decoded_data(mask_1, 'png') + decoded_png_1 = np.squeeze(mask_1.astype(np.float32)) + encoded_png_2, _ = self._create_encoded_and_decoded_data(mask_2, 'png') + decoded_png_2 = np.squeeze(mask_2.astype(np.float32)) + encoded_masks = [encoded_png_1, encoded_png_2] + decoded_masks = np.stack([decoded_png_1, decoded_png_2]) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_png), + 'image/format': + dataset_util.bytes_feature(six.b('png')), + 'image/object/mask': + dataset_util.bytes_list_feature(encoded_masks) + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=True, + instance_mask_type=input_reader_pb2.PNG_MASKS) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + decoded_masks, + tensor_dict[fields.InputDataFields.groundtruth_instance_masks]) + + def testDecodeEmptyPngInstanceMasks(self): + image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8) + encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png') + encoded_masks = [] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_png), + 'image/format': + dataset_util.bytes_feature(six.b('png')), + 'image/object/mask': + dataset_util.bytes_list_feature(encoded_masks), + 'image/height': + dataset_util.int64_feature(10), + 'image/width': + dataset_util.int64_feature(10), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=True, + instance_mask_type=input_reader_pb2.PNG_MASKS) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape, + [0, 10, 10]) + + def testDecodeBoundingBox(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + def testDecodeKeypoint(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] + keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + keypoint_visibility = [1, 2, 0, 1, 0, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/keypoint/y': + dataset_util.float_list_feature(keypoint_ys), + 'image/object/keypoint/x': + dataset_util.float_list_feature(keypoint_xs), + 'image/object/keypoint/visibility': + dataset_util.int64_list_feature(keypoint_visibility), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()), + [2, 3, 2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + expected_keypoints = [ + [[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan]], + [[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0]]] + self.assertAllClose( + expected_keypoints, + tensor_dict[fields.InputDataFields.groundtruth_keypoints]) + + expected_visibility = ( + (np.array(keypoint_visibility) > 0).reshape((2, 3))) + self.assertAllEqual( + expected_visibility, + tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities]) + + def testDecodeKeypointNoVisibilities(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] + keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/keypoint/y': + dataset_util.float_list_feature(keypoint_ys), + 'image/object/keypoint/x': + dataset_util.float_list_feature(keypoint_xs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()), + [2, 3, 2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + + expected_keypoints = ( + np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2))) + self.assertAllEqual( + expected_keypoints, + tensor_dict[fields.InputDataFields.groundtruth_keypoints]) + + expected_visibility = np.ones((2, 3)) + self.assertAllEqual( + expected_visibility, + tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities]) + + def testDecodeDefaultGroundtruthWeights(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_boxes].get_shape().as_list()), + [None, 4]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights], + np.ones(2, dtype=np.float32)) + + def testDecodeObjectLabel(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/label': + dataset_util.int64_list_feature(bbox_classes), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(bbox_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeMultiClassScores(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + flattened_multiclass_scores = [100., 50.] + [20., 30.] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/multiclass_scores': + dataset_util.float_list_feature( + flattened_multiclass_scores), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_multiclass_scores=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual(flattened_multiclass_scores, + tensor_dict[fields.InputDataFields.multiclass_scores]) + + def testDecodeEmptyMultiClassScores(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_multiclass_scores=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertEqual( + (0,), tensor_dict[fields.InputDataFields.multiclass_scores].shape) + + def testDecodeObjectLabelNoText(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes = [1, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/label': + dataset_util.int64_list_feature(bbox_classes), + })).SerializeToString() + label_map_string = """ + item { + id:1 + name:'cat' + } + item { + id:2 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(bbox_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelWithText(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('dog')] + # Annotation label gets overridden by labelmap id. + annotated_bbox_classes = [3, 4] + expected_bbox_classes = [1, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + 'image/object/class/label': + dataset_util.int64_list_feature(annotated_bbox_classes), + })).SerializeToString() + label_map_string = """ + item { + id:1 + name:'cat' + } + item { + id:2 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(expected_bbox_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelUnrecognizedName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('cheetah')] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + })).SerializeToString() + + label_map_string = """ + item { + id:2 + name:'cat' + } + item { + id:1 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([2, -1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelWithMappingWithDisplayName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('dog')] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + })).SerializeToString() + + label_map_string = """ + item { + id:3 + display_name:'cat' + } + item { + id:1 + display_name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([3, 1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelUnrecognizedNameWithMappingWithDisplayName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('cheetah')] + bbox_classes_id = [5, 6] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + 'image/object/class/label': + dataset_util.int64_list_feature(bbox_classes_id), + })).SerializeToString() + + label_map_string = """ + item { + name:'/m/cat' + id:3 + display_name:'cat' + } + item { + name:'/m/dog' + id:1 + display_name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([3, -1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectLabelWithMappingWithName(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_classes_text = [six.b('cat'), six.b('dog')] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + })).SerializeToString() + + label_map_string = """ + item { + id:3 + name:'cat' + } + item { + id:1 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual([3, 1], + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testDecodeObjectArea(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_area = [100., 174.] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/area': + dataset_util.float_list_feature(object_area), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_area].get_shape().as_list()), [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(object_area, + tensor_dict[fields.InputDataFields.groundtruth_area]) + + def testDecodeVerifiedNegClasses(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + neg_category_ids = [0, 5, 8] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/neg_category_ids': + dataset_util.int64_list_feature(neg_category_ids), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + neg_category_ids, + tensor_dict[fields.InputDataFields.groundtruth_verified_neg_classes]) + + def testDecodeNotExhaustiveClasses(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + not_exhaustive_category_ids = [0, 5, 8] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/not_exhaustive_category_ids': + dataset_util.int64_list_feature( + not_exhaustive_category_ids), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + not_exhaustive_category_ids, + tensor_dict[fields.InputDataFields.groundtruth_not_exhaustive_classes]) + + def testDecodeObjectIsCrowd(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_is_crowd = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/is_crowd': + dataset_util.int64_list_feature(object_is_crowd), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [bool(item) for item in object_is_crowd], + tensor_dict[fields.InputDataFields.groundtruth_is_crowd]) + + def testDecodeObjectDifficult(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_difficult = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/difficult': + dataset_util.int64_list_feature(object_difficult), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_difficult].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [bool(item) for item in object_difficult], + tensor_dict[fields.InputDataFields.groundtruth_difficult]) + + def testDecodeObjectGroupOf(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_group_of = [0, 1] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/group_of': + dataset_util.int64_list_feature(object_group_of), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_group_of].get_shape().as_list()), + [2]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [bool(item) for item in object_group_of], + tensor_dict[fields.InputDataFields.groundtruth_group_of]) + + def testDecodeObjectWeight(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + object_weights = [0.75, 1.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/weight': + dataset_util.float_list_feature(object_weights), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_weights].get_shape().as_list()), + [None]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual(object_weights, + tensor_dict[fields.InputDataFields.groundtruth_weights]) + + def testDecodeClassConfidence(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + class_confidence = [0.0, 1.0, 0.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/class/confidence': + dataset_util.float_list_feature(class_confidence), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual( + (output[fields.InputDataFields.groundtruth_image_confidences] + .get_shape().as_list()), [3]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + class_confidence, + tensor_dict[fields.InputDataFields.groundtruth_image_confidences]) + + def testDecodeInstanceSegmentation(self): + num_instances = 4 + image_height = 5 + image_width = 3 + + # Randomly generate image. + image_tensor = np.random.randint( + 256, size=(image_height, image_width, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + + # Randomly generate instance segmentation masks. + instance_masks = ( + np.random.randint(2, size=(num_instances, image_height, + image_width)).astype(np.float32)) + instance_masks_flattened = np.reshape(instance_masks, [-1]) + + # Randomly generate class labels for each instance. + object_classes = np.random.randint( + 100, size=(num_instances)).astype(np.int64) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/object/mask': + dataset_util.float_list_feature(instance_masks_flattened), + 'image/object/class/label': + dataset_util.int64_list_feature(object_classes) + })).SerializeToString() + example_decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=True) + output = example_decoder.decode(tf.convert_to_tensor(example)) + + self.assertAllEqual( + (output[fields.InputDataFields.groundtruth_instance_masks].get_shape( + ).as_list()), [4, 5, 3]) + + self.assertAllEqual((output[ + fields.InputDataFields.groundtruth_classes].get_shape().as_list()), + [4]) + return output + + tensor_dict = self.execute_cpu(graph_fn, []) + + self.assertAllEqual( + instance_masks.astype(np.float32), + tensor_dict[fields.InputDataFields.groundtruth_instance_masks]) + self.assertAllEqual(object_classes, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + + def testInstancesNotAvailableByDefault(self): + num_instances = 4 + image_height = 5 + image_width = 3 + # Randomly generate image. + image_tensor = np.random.randint( + 256, size=(image_height, image_width, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + + # Randomly generate instance segmentation masks. + instance_masks = ( + np.random.randint(2, size=(num_instances, image_height, + image_width)).astype(np.float32)) + instance_masks_flattened = np.reshape(instance_masks, [-1]) + + # Randomly generate class labels for each instance. + object_classes = np.random.randint( + 100, size=(num_instances)).astype(np.int64) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/object/mask': + dataset_util.float_list_feature(instance_masks_flattened), + 'image/object/class/label': + dataset_util.int64_list_feature(object_classes) + })).SerializeToString() + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks, + tensor_dict) + + def testDecodeImageLabels(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + + def graph_fn_1(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), + 'image/format': dataset_util.bytes_feature(six.b('jpeg')), + 'image/class/label': dataset_util.int64_list_feature([1, 2]), + })).SerializeToString() + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn_1, []) + self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict) + self.assertAllEqual( + tensor_dict[fields.InputDataFields.groundtruth_image_classes], + np.array([1, 2])) + + def graph_fn_2(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/class/text': + dataset_util.bytes_list_feature( + [six.b('dog'), six.b('cat')]), + })).SerializeToString() + label_map_string = """ + item { + id:3 + name:'cat' + } + item { + id:1 + name:'dog' + } + """ + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn_2, []) + self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict) + self.assertAllEqual( + tensor_dict[fields.InputDataFields.groundtruth_image_classes], + np.array([1, 3])) + + def testDecodeContextFeatures(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + num_features = 8 + context_feature_length = 10 + context_features = np.random.random(num_features*context_feature_length) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/context_features': + dataset_util.float_list_feature(context_features), + 'image/context_feature_length': + dataset_util.int64_feature(context_feature_length), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_context_features=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertAllClose( + context_features.reshape(num_features, context_feature_length), + tensor_dict[fields.InputDataFields.context_features]) + self.assertAllEqual( + context_feature_length, + tensor_dict[fields.InputDataFields.context_feature_length]) + + def testContextFeaturesNotAvailableByDefault(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + num_features = 10 + context_feature_length = 10 + context_features = np.random.random(num_features*context_feature_length) + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/context_features': + dataset_util.float_list_feature(context_features), + 'image/context_feature_length': + dataset_util.int64_feature(context_feature_length), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder() + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + self.assertNotIn(fields.InputDataFields.context_features, + tensor_dict) + + def testExpandLabels(self): + label_map_string = """ + item { + id:1 + name:'cat' + ancestor_ids: 2 + } + item { + id:2 + name:'animal' + descendant_ids: 1 + } + item { + id:3 + name:'man' + ancestor_ids: 5 + } + item { + id:4 + name:'woman' + display_name:'woman' + ancestor_ids: 5 + } + item { + id:5 + name:'person' + descendant_ids: 3 + descendant_ids: 4 + } + """ + + label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') + with tf.gfile.Open(label_map_path, 'wb') as f: + f.write(label_map_string) + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0] + bbox_xmins = [1.0, 5.0] + bbox_ymaxs = [2.0, 6.0] + bbox_xmaxs = [3.0, 7.0] + bbox_classes_text = [six.b('cat'), six.b('cat')] + bbox_group_of = [0, 1] + image_class_text = [six.b('cat'), six.b('person')] + image_confidence = [1.0, 0.0] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/class/text': + dataset_util.bytes_list_feature(bbox_classes_text), + 'image/object/group_of': + dataset_util.int64_list_feature(bbox_group_of), + 'image/class/text': + dataset_util.bytes_list_feature(image_class_text), + 'image/class/confidence': + dataset_util.float_list_feature(image_confidence), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + label_map_proto_file=label_map_path, expand_hierarchy_labels=True) + return example_decoder.decode(tf.convert_to_tensor(example)) + + tensor_dict = self.execute_cpu(graph_fn, []) + + boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs, + bbox_xmaxs]).transpose() + expected_boxes = np.stack( + [boxes[0, :], boxes[0, :], boxes[1, :], boxes[1, :]], axis=0) + expected_boxes_class = np.array([1, 2, 1, 2]) + expected_boxes_group_of = np.array([0, 0, 1, 1]) + expected_image_class = np.array([1, 2, 3, 4, 5]) + expected_image_confidence = np.array([1.0, 1.0, 0.0, 0.0, 0.0]) + self.assertAllEqual(expected_boxes, + tensor_dict[fields.InputDataFields.groundtruth_boxes]) + self.assertAllEqual(expected_boxes_class, + tensor_dict[fields.InputDataFields.groundtruth_classes]) + self.assertAllEqual( + expected_boxes_group_of, + tensor_dict[fields.InputDataFields.groundtruth_group_of]) + self.assertAllEqual( + expected_image_class, + tensor_dict[fields.InputDataFields.groundtruth_image_classes]) + self.assertAllEqual( + expected_image_confidence, + tensor_dict[fields.InputDataFields.groundtruth_image_confidences]) + + def testDecodeDensePose(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0, 2.0] + bbox_xmins = [1.0, 5.0, 8.0] + bbox_ymaxs = [2.0, 6.0, 1.0] + bbox_xmaxs = [3.0, 7.0, 3.3] + densepose_num = [0, 4, 2] + densepose_part_index = [2, 2, 3, 4, 2, 9] + densepose_x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + densepose_y = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4] + densepose_u = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06] + densepose_v = [0.99, 0.98, 0.97, 0.96, 0.95, 0.94] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/densepose/num': + dataset_util.int64_list_feature(densepose_num), + 'image/object/densepose/part_index': + dataset_util.int64_list_feature(densepose_part_index), + 'image/object/densepose/x': + dataset_util.float_list_feature(densepose_x), + 'image/object/densepose/y': + dataset_util.float_list_feature(densepose_y), + 'image/object/densepose/u': + dataset_util.float_list_feature(densepose_u), + 'image/object/densepose/v': + dataset_util.float_list_feature(densepose_v), + + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_dense_pose=True) + output = example_decoder.decode(tf.convert_to_tensor(example)) + dp_num_points = output[fields.InputDataFields.groundtruth_dp_num_points] + dp_part_ids = output[fields.InputDataFields.groundtruth_dp_part_ids] + dp_surface_coords = output[ + fields.InputDataFields.groundtruth_dp_surface_coords] + return dp_num_points, dp_part_ids, dp_surface_coords + + dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu( + graph_fn, []) + + expected_dp_num_points = [0, 4, 2] + expected_dp_part_ids = [ + [0, 0, 0, 0], + [2, 2, 3, 4], + [2, 9, 0, 0] + ] + expected_dp_surface_coords = np.array( + [ + # Instance 0 (no points). + [[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], + # Instance 1 (4 points). + [[0.9, 0.1, 0.99, 0.01], + [0.8, 0.2, 0.98, 0.02], + [0.7, 0.3, 0.97, 0.03], + [0.6, 0.4, 0.96, 0.04]], + # Instance 2 (2 points). + [[0.5, 0.5, 0.95, 0.05], + [0.4, 0.6, 0.94, 0.06], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], + ], dtype=np.float32) + + self.assertAllEqual(dp_num_points, expected_dp_num_points) + self.assertAllEqual(dp_part_ids, expected_dp_part_ids) + self.assertAllClose(dp_surface_coords, expected_dp_surface_coords) + + def testDecodeTrack(self): + image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8) + encoded_jpeg, _ = self._create_encoded_and_decoded_data( + image_tensor, 'jpeg') + bbox_ymins = [0.0, 4.0, 2.0] + bbox_xmins = [1.0, 5.0, 8.0] + bbox_ymaxs = [2.0, 6.0, 1.0] + bbox_xmaxs = [3.0, 7.0, 3.3] + track_labels = [0, 1, 2] + + def graph_fn(): + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(bbox_ymins), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(bbox_xmins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(bbox_ymaxs), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(bbox_xmaxs), + 'image/object/track/label': + dataset_util.int64_list_feature(track_labels), + })).SerializeToString() + + example_decoder = tf_example_decoder.TfExampleDecoder( + load_track_id=True) + output = example_decoder.decode(tf.convert_to_tensor(example)) + track_ids = output[fields.InputDataFields.groundtruth_track_ids] + return track_ids + + track_ids = self.execute_cpu(graph_fn, []) + + expected_track_labels = [0, 1, 2] + + self.assertAllEqual(track_ids, expected_track_labels) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..1565a910eb1726ce0846e9c78488a7e8d4f97fdf --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder.py @@ -0,0 +1,314 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Sequence example decoder for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import zip +import tensorflow.compat.v1 as tf +from tf_slim import tfexample_decoder as slim_example_decoder + +from object_detection.core import data_decoder +from object_detection.core import standard_fields as fields +from object_detection.utils import label_map_util + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import lookup as contrib_lookup +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +class _ClassTensorHandler(slim_example_decoder.Tensor): + """An ItemHandler to fetch class ids from class text.""" + + def __init__(self, + tensor_key, + label_map_proto_file, + shape_keys=None, + shape=None, + default_value=''): + """Initializes the LookupTensor handler. + + Simply calls a vocabulary (most often, a label mapping) lookup. + + Args: + tensor_key: the name of the `TFExample` feature to read the tensor from. + label_map_proto_file: File path to a text format LabelMapProto message + mapping class text to id. + shape_keys: Optional name or list of names of the TF-Example feature in + which the tensor shape is stored. If a list, then each corresponds to + one dimension of the shape. + shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is + reshaped accordingly. + default_value: The value used when the `tensor_key` is not found in a + particular `TFExample`. + + Raises: + ValueError: if both `shape_keys` and `shape` are specified. + """ + name_to_id = label_map_util.get_label_map_dict( + label_map_proto_file, use_display_name=False) + # We use a default_value of -1, but we expect all labels to be contained + # in the label map. + try: + # Dynamically try to load the tf v2 lookup, falling back to contrib + lookup = tf.compat.v2.lookup + hash_table_class = tf.compat.v2.lookup.StaticHashTable + except AttributeError: + lookup = contrib_lookup + hash_table_class = contrib_lookup.HashTable + name_to_id_table = hash_table_class( + initializer=lookup.KeyValueTensorInitializer( + keys=tf.constant(list(name_to_id.keys())), + values=tf.constant(list(name_to_id.values()), dtype=tf.int64)), + default_value=-1) + + self._name_to_id_table = name_to_id_table + super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape, + default_value) + + def tensors_to_item(self, keys_to_tensors): + unmapped_tensor = super(_ClassTensorHandler, + self).tensors_to_item(keys_to_tensors) + return self._name_to_id_table.lookup(unmapped_tensor) + + +class TfSequenceExampleDecoder(data_decoder.DataDecoder): + """Tensorflow Sequence Example proto decoder for Object Detection. + + Sequence examples contain sequences of images which share common + features. The structure of TfSequenceExamples can be seen in + dataset_tools/seq_example_util.py + + For the TFODAPI, the following fields are required: + Shared features: + 'image/format' + 'image/height' + 'image/width' + + Features with an entry for each image, where bounding box features can + be empty lists if the image does not contain any objects: + 'image/encoded' + 'image/source_id' + 'region/bbox/xmin' + 'region/bbox/xmax' + 'region/bbox/ymin' + 'region/bbox/ymax' + 'region/label/string' + + Optionally, the sequence example can include context_features for use in + Context R-CNN (see https://arxiv.org/abs/1912.03538): + 'image/context_features' + 'image/context_feature_length' + """ + + def __init__(self, + label_map_proto_file, + load_context_features=False, + use_display_name=False, + fully_annotated=False): + """Constructs `TfSequenceExampleDecoder` object. + + Args: + label_map_proto_file: a file path to a + object_detection.protos.StringIntLabelMap proto. The + label map will be used to map IDs of 'region/label/string'. + It is assumed that 'region/label/string' will be in the data. + load_context_features: Whether to load information from context_features, + to provide additional context to a detection model for training and/or + inference + use_display_name: whether or not to use the `display_name` for label + mapping (instead of `name`). Only used if label_map_proto_file is + provided. + fully_annotated: If True, will assume that every frame (whether it has + boxes or not), has been fully annotated. If False, a + 'region/is_annotated' field must be provided in the dataset which + indicates which frames have annotations. Default False. + """ + # Specifies how the tf.SequenceExamples are decoded. + self._context_keys_to_features = { + 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), + 'image/height': tf.FixedLenFeature((), tf.int64), + 'image/width': tf.FixedLenFeature((), tf.int64), + } + self._sequence_keys_to_feature_lists = { + 'image/encoded': tf.FixedLenSequenceFeature([], dtype=tf.string), + 'image/source_id': tf.FixedLenSequenceFeature([], dtype=tf.string), + 'region/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), + 'region/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), + 'region/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), + 'region/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), + 'region/label/string': tf.VarLenFeature(dtype=tf.string), + 'region/label/confidence': tf.VarLenFeature(dtype=tf.float32), + } + + self._items_to_handlers = { + # Context. + fields.InputDataFields.image_height: + slim_example_decoder.Tensor('image/height'), + fields.InputDataFields.image_width: + slim_example_decoder.Tensor('image/width'), + + # Sequence. + fields.InputDataFields.num_groundtruth_boxes: + slim_example_decoder.NumBoxesSequence('region/bbox/xmin'), + fields.InputDataFields.groundtruth_boxes: + slim_example_decoder.BoundingBoxSequence( + prefix='region/bbox/', default_value=0.0), + fields.InputDataFields.groundtruth_weights: + slim_example_decoder.Tensor('region/label/confidence'), + } + + # If the dataset is sparsely annotated, parse sequence features which + # indicate which frames have been labeled. + if not fully_annotated: + self._sequence_keys_to_feature_lists['region/is_annotated'] = ( + tf.FixedLenSequenceFeature([], dtype=tf.int64)) + self._items_to_handlers[fields.InputDataFields.is_annotated] = ( + slim_example_decoder.Tensor('region/is_annotated')) + + self._items_to_handlers[fields.InputDataFields.image] = ( + slim_example_decoder.Tensor('image/encoded')) + self._items_to_handlers[fields.InputDataFields.source_id] = ( + slim_example_decoder.Tensor('image/source_id')) + + label_handler = _ClassTensorHandler( + 'region/label/string', label_map_proto_file, default_value='') + + self._items_to_handlers[ + fields.InputDataFields.groundtruth_classes] = label_handler + + if load_context_features: + self._context_keys_to_features['image/context_features'] = ( + tf.VarLenFeature(dtype=tf.float32)) + self._items_to_handlers[fields.InputDataFields.context_features] = ( + slim_example_decoder.ItemHandlerCallback( + ['image/context_features', 'image/context_feature_length'], + self._reshape_context_features)) + + self._context_keys_to_features['image/context_feature_length'] = ( + tf.FixedLenFeature((), tf.int64)) + self._items_to_handlers[fields.InputDataFields.context_feature_length] = ( + slim_example_decoder.Tensor('image/context_feature_length')) + self._fully_annotated = fully_annotated + + def decode(self, tf_seq_example_string_tensor): + """Decodes serialized `tf.SequenceExample`s and returns a tensor dictionary. + + Args: + tf_seq_example_string_tensor: a string tensor holding a serialized + `tf.SequenceExample`. + + Returns: + A list of dictionaries with (at least) the following tensors: + fields.InputDataFields.source_id: a [num_frames] string tensor with a + unique ID for each frame. + fields.InputDataFields.num_groundtruth_boxes: a [num_frames] int32 tensor + specifying the number of boxes in each frame. + fields.InputDataFields.groundtruth_boxes: a [num_frames, num_boxes, 4] + float32 tensor with bounding boxes for each frame. Note that num_boxes + is the maximum boxes seen in any individual frame. Any frames with fewer + boxes are padded with 0.0. + fields.InputDataFields.groundtruth_classes: a [num_frames, num_boxes] + int32 tensor with class indices for each box in each frame. + fields.InputDataFields.groundtruth_weights: a [num_frames, num_boxes] + float32 tensor with weights of the groundtruth boxes. + fields.InputDataFields.is_annotated: a [num_frames] bool tensor specifying + whether the image was annotated or not. If False, the corresponding + entries in the groundtruth tensor will be ignored. + fields.InputDataFields.context_features - 1D float32 tensor of shape + [context_feature_length * num_context_features] + fields.InputDataFields.context_feature_length - int32 tensor specifying + the length of each feature in context_features + fields.InputDataFields.image: a [num_frames] string tensor with + the encoded images. + """ + serialized_example = tf.reshape(tf_seq_example_string_tensor, shape=[]) + decoder = slim_example_decoder.TFSequenceExampleDecoder( + self._context_keys_to_features, self._sequence_keys_to_feature_lists, + self._items_to_handlers) + keys = decoder.list_items() + tensors = decoder.decode(serialized_example, items=keys) + tensor_dict = dict(list(zip(keys, tensors))) + tensor_dict[fields.InputDataFields.groundtruth_boxes].set_shape( + [None, None, 4]) + tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.cast( + tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + dtype=tf.int32) + tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.cast( + tensor_dict[fields.InputDataFields.groundtruth_classes], dtype=tf.int32) + tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.cast( + tf.stack([ + tensor_dict[fields.InputDataFields.image_height], + tensor_dict[fields.InputDataFields.image_width] + ]), + dtype=tf.int32) + tensor_dict.pop(fields.InputDataFields.image_height) + tensor_dict.pop(fields.InputDataFields.image_width) + + def default_groundtruth_weights(): + """Produces weights of 1.0 for each valid box, and 0.0 otherwise.""" + num_boxes_per_frame = tensor_dict[ + fields.InputDataFields.num_groundtruth_boxes] + max_num_boxes = tf.reduce_max(num_boxes_per_frame) + num_boxes_per_frame_tiled = tf.tile( + tf.expand_dims(num_boxes_per_frame, axis=-1), + multiples=tf.stack([1, max_num_boxes])) + range_tiled = tf.tile( + tf.expand_dims(tf.range(max_num_boxes), axis=0), + multiples=tf.stack([tf.shape(num_boxes_per_frame)[0], 1])) + return tf.cast( + tf.greater(num_boxes_per_frame_tiled, range_tiled), tf.float32) + + tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond( + tf.greater( + tf.size(tensor_dict[fields.InputDataFields.groundtruth_weights]), + 0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights], + default_groundtruth_weights) + + if self._fully_annotated: + tensor_dict[fields.InputDataFields.is_annotated] = tf.ones_like( + tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + dtype=tf.bool) + else: + tensor_dict[fields.InputDataFields.is_annotated] = tf.cast( + tensor_dict[fields.InputDataFields.is_annotated], dtype=tf.bool) + + return tensor_dict + + def _reshape_context_features(self, keys_to_tensors): + """Reshape context features. + + The instance context_features are reshaped to + [num_context_features, context_feature_length] + + Args: + keys_to_tensors: a dictionary from keys to tensors. + + Returns: + A 2-D float tensor of shape [num_context_features, context_feature_length] + """ + context_feature_length = keys_to_tensors['image/context_feature_length'] + to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32) + context_features = keys_to_tensors['image/context_features'] + if isinstance(context_features, tf.SparseTensor): + context_features = tf.sparse_tensor_to_dense(context_features) + context_features = tf.reshape(context_features, to_shape) + return context_features diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92549d87c3c12f11e306432c74f8de3719e48fb8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2ea1c6163454cf2d05065713b2e0657f24af5e64 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/data_decoders/tf_sequence_example_decoder_test.py @@ -0,0 +1,173 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tf_sequence_example_decoder.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_sequence_example_decoder +from object_detection.dataset_tools import seq_example_util +from object_detection.utils import test_case + + +class TfSequenceExampleDecoderTest(test_case.TestCase): + + def _create_label_map(self, path): + label_map_text = """ + item { + name: "dog" + id: 1 + } + item { + name: "cat" + id: 2 + } + item { + name: "panda" + id: 4 + } + """ + with tf.gfile.Open(path, 'wb') as f: + f.write(label_map_text) + + def _make_random_serialized_jpeg_images(self, num_frames, image_height, + image_width): + def graph_fn(): + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + return [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.execute(graph_fn, []) + return encoded_images + + def test_decode_sequence_example(self): + num_frames = 4 + image_height = 20 + image_width = 30 + + expected_groundtruth_boxes = [ + [[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], + [[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], + [[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]], + [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]] + ] + expected_groundtruth_classes = [ + [-1, -1], + [-1, 1], + [1, 2], + [-1, -1] + ] + + flds = fields.InputDataFields + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + + def graph_fn(): + label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt') + self._create_label_map(label_map_proto_file) + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file) + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_format='JPEG', + image_source_ids=[str(i) for i in range(num_frames)], + is_annotated=[[1], [1], [1], [1]], + bboxes=[ + [[0., 0., 1., 1.]], # Frame 0. + [[0.2, 0.2, 1., 1.], + [0., 0., 1., 1.]], # Frame 1. + [[0., 0., 1., 1.], # Frame 2. + [0.1, 0.1, 0.2, 0.2]], + [[]], # Frame 3. + ], + label_strings=[ + ['fox'], # Frame 0. Fox will be filtered out. + ['fox', 'dog'], # Frame 1. Fox will be filtered out. + ['dog', 'cat'], # Frame 2. + [], # Frame 3 + ]).SerializeToString() + + example_string_tensor = tf.convert_to_tensor(sequence_example_serialized) + return decoder.decode(example_string_tensor) + + tensor_dict_out = self.execute(graph_fn, []) + self.assertAllClose(expected_groundtruth_boxes, + tensor_dict_out[flds.groundtruth_boxes]) + self.assertAllEqual(expected_groundtruth_classes, + tensor_dict_out[flds.groundtruth_classes]) + + def test_decode_sequence_example_negative_clip(self): + num_frames = 4 + image_height = 20 + image_width = 30 + + expected_groundtruth_boxes = -1 * np.ones((4, 0, 4)) + expected_groundtruth_classes = -1 * np.ones((4, 0)) + + flds = fields.InputDataFields + + encoded_images = self._make_random_serialized_jpeg_images( + num_frames, image_height, image_width) + + def graph_fn(): + sequence_example_serialized = seq_example_util.make_sequence_example( + dataset_name='video_dataset', + video_id='video', + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_format='JPEG', + image_source_ids=[str(i) for i in range(num_frames)], + bboxes=[ + [[]], + [[]], + [[]], + [[]] + ], + label_strings=[ + [], + [], + [], + [] + ]).SerializeToString() + example_string_tensor = tf.convert_to_tensor(sequence_example_serialized) + + label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt') + self._create_label_map(label_map_proto_file) + decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder( + label_map_proto_file=label_map_proto_file) + return decoder.decode(example_string_tensor) + + tensor_dict_out = self.execute(graph_fn, []) + self.assertAllClose(expected_groundtruth_boxes, + tensor_dict_out[flds.groundtruth_boxes]) + self.assertAllEqual(expected_groundtruth_classes, + tensor_dict_out[flds.groundtruth_classes]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..a5b8b0ab7f7c0dab76027325dcd4d96e93eeccbc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py @@ -0,0 +1,938 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""A Beam job to add contextual memory banks to tf.Examples. + +This tool groups images containing bounding boxes and embedded context features +by a key, either `image/location` or `image/seq_id`, and time horizon, +then uses these groups to build up a contextual memory bank from the embedded +context features from each image in the group and adds that context to the +output tf.Examples for each image in the group. + +Steps to generate a dataset with context from one with bounding boxes and +embedded context features: +1. Use object/detection/export_inference_graph.py to get a `saved_model` for + inference. The input node must accept a tf.Example proto. +2. Run this tool with `saved_model` from step 1 and a TFRecord of tf.Example + protos containing images, bounding boxes, and embedded context features. + The context features can be added to tf.Examples using + generate_embedding_data.py. + +Example Usage: +-------------- +python add_context_to_examples.py \ + --input_tfrecord path/to/input_tfrecords* \ + --output_tfrecord path/to/output_tfrecords \ + --sequence_key image/location \ + --time_horizon month + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import copy +import datetime +import io +import itertools +import json +import os +import numpy as np +import PIL.Image +import six +import tensorflow as tf + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +class ReKeyDataFn(beam.DoFn): + """Re-keys tfrecords by sequence_key. + + This Beam DoFn re-keys the tfrecords by a user-defined sequence_key + """ + + def __init__(self, sequence_key, time_horizon, + reduce_image_size, max_image_dimension): + """Initialization function. + + Args: + sequence_key: A feature name to use as a key for grouping sequences. + Must point to a key of type bytes_list + time_horizon: What length of time to use to partition the data when + building the memory banks. Options: `year`, `month`, `week`, `day `, + `hour`, `minute`, None + reduce_image_size: Whether to reduce the sizes of the stored images. + max_image_dimension: maximum dimension of reduced images + """ + self._sequence_key = sequence_key + if time_horizon is None or time_horizon in {'year', 'month', 'week', 'day', + 'hour', 'minute'}: + self._time_horizon = time_horizon + else: + raise ValueError('Time horizon not supported.') + self._reduce_image_size = reduce_image_size + self._max_image_dimension = max_image_dimension + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'data_rekey', 'num_tf_examples_processed') + self._num_images_resized = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_resized') + self._num_images_read = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_read') + self._num_images_found = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_read') + self._num_got_shape = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_got_shape') + self._num_images_found_size = beam.metrics.Metrics.counter( + 'data_rekey', 'num_images_found_size') + self._num_examples_cleared = beam.metrics.Metrics.counter( + 'data_rekey', 'num_examples_cleared') + self._num_examples_updated = beam.metrics.Metrics.counter( + 'data_rekey', 'num_examples_updated') + + def process(self, tfrecord_entry): + return self._rekey_examples(tfrecord_entry) + + def _largest_size_at_most(self, height, width, largest_side): + """Computes new shape with the largest side equal to `largest_side`. + + Args: + height: an int indicating the current height. + width: an int indicating the current width. + largest_side: A python integer indicating the size of + the largest side after resize. + Returns: + new_height: an int indicating the new height. + new_width: an int indicating the new width. + """ + + x_scale = float(largest_side) / float(width) + y_scale = float(largest_side) / float(height) + scale = min(x_scale, y_scale) + + new_width = int(width * scale) + new_height = int(height * scale) + + return new_height, new_width + + def _resize_image(self, input_example): + """Resizes the image within input_example and updates the height and width. + + Args: + input_example: A tf.Example that we want to update to contain a resized + image. + Returns: + input_example: Updated tf.Example. + """ + + original_image = copy.deepcopy( + input_example.features.feature['image/encoded'].bytes_list.value[0]) + self._num_images_read.inc(1) + + height = copy.deepcopy( + input_example.features.feature['image/height'].int64_list.value[0]) + + width = copy.deepcopy( + input_example.features.feature['image/width'].int64_list.value[0]) + + self._num_got_shape.inc(1) + + new_height, new_width = self._largest_size_at_most( + height, width, self._max_image_dimension) + + self._num_images_found_size.inc(1) + + encoded_jpg_io = io.BytesIO(original_image) + image = PIL.Image.open(encoded_jpg_io) + resized_image = image.resize((new_width, new_height)) + + with io.BytesIO() as output: + resized_image.save(output, format='JPEG') + encoded_resized_image = output.getvalue() + + self._num_images_resized.inc(1) + + del input_example.features.feature['image/encoded'].bytes_list.value[:] + del input_example.features.feature['image/height'].int64_list.value[:] + del input_example.features.feature['image/width'].int64_list.value[:] + + self._num_examples_cleared.inc(1) + + input_example.features.feature['image/encoded'].bytes_list.value.extend( + [encoded_resized_image]) + input_example.features.feature['image/height'].int64_list.value.extend( + [new_height]) + input_example.features.feature['image/width'].int64_list.value.extend( + [new_width]) + self._num_examples_updated.inc(1) + + return input_example + + def _rekey_examples(self, tfrecord_entry): + serialized_example = copy.deepcopy(tfrecord_entry) + + input_example = tf.train.Example.FromString(serialized_example) + + self._num_images_found.inc(1) + + if self._reduce_image_size: + input_example = self._resize_image(input_example) + self._num_images_resized.inc(1) + + new_key = input_example.features.feature[ + self._sequence_key].bytes_list.value[0] + + if self._time_horizon: + date_captured = datetime.datetime.strptime( + six.ensure_str(input_example.features.feature[ + 'image/date_captured'].bytes_list.value[0]), '%Y-%m-%d %H:%M:%S') + year = date_captured.year + month = date_captured.month + day = date_captured.day + week = np.floor(float(day) / float(7)) + hour = date_captured.hour + minute = date_captured.minute + + if self._time_horizon == 'year': + new_key = new_key + six.ensure_binary('/' + str(year)) + elif self._time_horizon == 'month': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month)) + elif self._time_horizon == 'week': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(week)) + elif self._time_horizon == 'day': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(day)) + elif self._time_horizon == 'hour': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + ( + str(hour))) + elif self._time_horizon == 'minute': + new_key = new_key + six.ensure_binary( + '/' + str(year) + '/' + str(month) + '/' + str(day) + '/' + ( + str(hour) + '/' + str(minute))) + + self._num_examples_processed.inc(1) + + return [(new_key, input_example)] + + +class SortGroupedDataFn(beam.DoFn): + """Sorts data within a keyed group. + + This Beam DoFn sorts the grouped list of image examples by frame_num + """ + + def __init__(self, sequence_key, sorted_image_ids, + max_num_elements_in_context_features): + """Initialization function. + + Args: + sequence_key: A feature name to use as a key for grouping sequences. + Must point to a key of type bytes_list + sorted_image_ids: Whether the image ids are sortable to use as sorting + tie-breakers + max_num_elements_in_context_features: The maximum number of elements + allowed in the memory bank + """ + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'sort_group', 'num_groups_sorted') + self._too_many_elements = beam.metrics.Metrics.counter( + 'sort_group', 'too_many_elements') + self._split_elements = beam.metrics.Metrics.counter( + 'sort_group', 'split_elements') + self._sequence_key = six.ensure_binary(sequence_key) + self._sorted_image_ids = sorted_image_ids + self._max_num_elements_in_context_features = ( + max_num_elements_in_context_features) + + def process(self, grouped_entry): + return self._sort_image_examples(grouped_entry) + + def _sort_image_examples(self, grouped_entry): + key, example_collection = grouped_entry + example_list = list(example_collection) + + def get_frame_num(example): + return example.features.feature['image/seq_frame_num'].int64_list.value[0] + + def get_date_captured(example): + return datetime.datetime.strptime( + six.ensure_str( + example.features.feature[ + 'image/date_captured'].bytes_list.value[0]), + '%Y-%m-%d %H:%M:%S') + + def get_image_id(example): + return example.features.feature['image/source_id'].bytes_list.value[0] + + if self._sequence_key == six.ensure_binary('image/seq_id'): + sorting_fn = get_frame_num + elif self._sequence_key == six.ensure_binary('image/location'): + if self._sorted_image_ids: + sorting_fn = get_image_id + else: + sorting_fn = get_date_captured + + sorted_example_list = sorted(example_list, key=sorting_fn) + + self._num_examples_processed.inc(1) + + if len(sorted_example_list) > self._max_num_elements_in_context_features: + leftovers = sorted_example_list + output_list = [] + count = 0 + self._too_many_elements.inc(1) + while len(leftovers) > self._max_num_elements_in_context_features: + self._split_elements.inc(1) + new_key = key + six.ensure_binary('_' + str(count)) + new_list = leftovers[:self._max_num_elements_in_context_features] + output_list.append((new_key, new_list)) + leftovers = leftovers[:self._max_num_elements_in_context_features] + count += 1 + else: + output_list = [(key, sorted_example_list)] + + return output_list + + +def get_sliding_window(example_list, max_clip_length, stride_length): + """Yields a sliding window over data from example_list. + + Sliding window has width max_clip_len (n) and stride stride_len (m). + s -> (s0,s1,...s[n-1]), (s[m],s[m+1],...,s[m+n]), ... + + Args: + example_list: A list of examples. + max_clip_length: The maximum length of each clip. + stride_length: The stride between each clip. + + Yields: + A list of lists of examples, each with length <= max_clip_length + """ + + # check if the list is too short to slide over + if len(example_list) < max_clip_length: + yield example_list + else: + starting_values = [i*stride_length for i in + range(len(example_list)) if + len(example_list) > i*stride_length] + for start in starting_values: + result = tuple(itertools.islice(example_list, start, + min(start + max_clip_length, + len(example_list)))) + yield result + + +class GenerateContextFn(beam.DoFn): + """Generates context data for camera trap images. + + This Beam DoFn builds up contextual memory banks from groups of images and + stores them in the output tf.Example or tf.Sequence_example for each image. + """ + + def __init__(self, sequence_key, add_context_features, image_ids_to_keep, + keep_context_features_image_id_list=False, + subsample_context_features_rate=0, + keep_only_positives=False, + context_features_score_threshold=0.7, + keep_only_positives_gt=False, + max_num_elements_in_context_features=5000, + pad_context_features=False, + output_type='tf_example', max_clip_length=None, + context_feature_length=2057): + """Initialization function. + + Args: + sequence_key: A feature name to use as a key for grouping sequences. + add_context_features: Whether to keep and store the contextual memory + bank. + image_ids_to_keep: A list of image ids to save, to use to build data + subsets for evaluation. + keep_context_features_image_id_list: Whether to save an ordered list of + the ids of the images in the contextual memory bank. + subsample_context_features_rate: What rate to subsample images for the + contextual memory bank. + keep_only_positives: Whether to only keep high scoring + (>context_features_score_threshold) features in the contextual memory + bank. + context_features_score_threshold: What threshold to use for keeping + features. + keep_only_positives_gt: Whether to only keep features from images that + contain objects based on the ground truth (for training). + max_num_elements_in_context_features: the maximum number of elements in + the memory bank + pad_context_features: Whether to pad the context features to a fixed size. + output_type: What type of output, tf_example of tf_sequence_example + max_clip_length: The maximum length of a sequence example, before + splitting into multiple + context_feature_length: The length of the context feature embeddings + stored in the input data. + """ + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'num_seq_examples_processed') + self._num_keys_processed = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'num_keys_processed') + self._sequence_key = sequence_key + self._add_context_features = add_context_features + self._pad_context_features = pad_context_features + self._output_type = output_type + self._max_clip_length = max_clip_length + if six.ensure_str(image_ids_to_keep) == 'All': + self._image_ids_to_keep = None + else: + with tf.io.gfile.GFile(image_ids_to_keep) as f: + self._image_ids_to_keep = json.load(f) + self._keep_context_features_image_id_list = ( + keep_context_features_image_id_list) + self._subsample_context_features_rate = subsample_context_features_rate + self._keep_only_positives = keep_only_positives + self._keep_only_positives_gt = keep_only_positives_gt + self._context_features_score_threshold = context_features_score_threshold + self._max_num_elements_in_context_features = ( + max_num_elements_in_context_features) + self._context_feature_length = context_feature_length + + self._images_kept = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'images_kept') + self._images_loaded = beam.metrics.Metrics.counter( + 'sequence_data_generation', 'images_loaded') + + def process(self, grouped_entry): + return self._add_context_to_example(copy.deepcopy(grouped_entry)) + + def _build_context_features(self, example_list): + context_features = [] + context_features_image_id_list = [] + count = 0 + example_embedding = [] + + for idx, example in enumerate(example_list): + if self._subsample_context_features_rate > 0: + if (idx % self._subsample_context_features_rate) != 0: + example.features.feature[ + 'context_features_idx'].int64_list.value.append( + self._max_num_elements_in_context_features + 1) + continue + if self._keep_only_positives: + if example.features.feature[ + 'image/embedding_score' + ].float_list.value[0] < self._context_features_score_threshold: + example.features.feature[ + 'context_features_idx'].int64_list.value.append( + self._max_num_elements_in_context_features + 1) + continue + if self._keep_only_positives_gt: + if len(example.features.feature[ + 'image/object/bbox/xmin' + ].float_list.value) < 1: + example.features.feature[ + 'context_features_idx'].int64_list.value.append( + self._max_num_elements_in_context_features + 1) + continue + + example_embedding = list(example.features.feature[ + 'image/embedding'].float_list.value) + context_features.extend(example_embedding) + example.features.feature[ + 'context_features_idx'].int64_list.value.append(count) + count += 1 + example_image_id = example.features.feature[ + 'image/source_id'].bytes_list.value[0] + context_features_image_id_list.append(example_image_id) + + if not example_embedding: + example_embedding.append(np.zeros(self._context_feature_length)) + + feature_length = self._context_feature_length + + # If the example_list is not empty and image/embedding_length is in the + # featture dict, feature_length will be assigned to that. Otherwise, it will + # be kept as default. + if example_list and ( + 'image/embedding_length' in example_list[0].features.feature): + feature_length = example_list[0].features.feature[ + 'image/embedding_length'].int64_list.value[0] + + if self._pad_context_features: + while len(context_features_image_id_list) < ( + self._max_num_elements_in_context_features): + context_features_image_id_list.append('') + + return context_features, feature_length, context_features_image_id_list + + def _add_context_to_example(self, grouped_entry): + key, example_collection = grouped_entry + list_of_examples = [] + + example_list = list(example_collection) + + if self._add_context_features: + context_features, feature_length, context_features_image_id_list = ( + self._build_context_features(example_list)) + + if self._image_ids_to_keep is not None: + new_example_list = [] + for example in example_list: + im_id = example.features.feature['image/source_id'].bytes_list.value[0] + self._images_loaded.inc(1) + if six.ensure_str(im_id) in self._image_ids_to_keep: + self._images_kept.inc(1) + new_example_list.append(example) + if new_example_list: + example_list = new_example_list + else: + return [] + + if self._output_type == 'tf_sequence_example': + if self._max_clip_length is not None: + # For now, no overlap + clips = get_sliding_window( + example_list, self._max_clip_length, self._max_clip_length) + else: + clips = [example_list] + + for clip_num, clip_list in enumerate(clips): + # initialize sequence example + seq_example = tf.train.SequenceExample() + video_id = six.ensure_str(key)+'_'+ str(clip_num) + seq_example.context.feature['clip/media_id'].bytes_list.value.append( + video_id.encode('utf8')) + seq_example.context.feature['clip/frames'].int64_list.value.append( + len(clip_list)) + + seq_example.context.feature[ + 'clip/start/timestamp'].int64_list.value.append(0) + seq_example.context.feature[ + 'clip/end/timestamp'].int64_list.value.append(len(clip_list)) + seq_example.context.feature['image/format'].bytes_list.value.append( + six.ensure_binary('JPG')) + seq_example.context.feature['image/channels'].int64_list.value.append(3) + context_example = clip_list[0] + seq_example.context.feature['image/height'].int64_list.value.append( + context_example.features.feature[ + 'image/height'].int64_list.value[0]) + seq_example.context.feature['image/width'].int64_list.value.append( + context_example.features.feature['image/width'].int64_list.value[0]) + + seq_example.context.feature[ + 'image/context_feature_length'].int64_list.value.append( + feature_length) + seq_example.context.feature[ + 'image/context_features'].float_list.value.extend( + context_features) + if self._keep_context_features_image_id_list: + seq_example.context.feature[ + 'image/context_features_image_id_list'].bytes_list.value.extend( + context_features_image_id_list) + + encoded_image_list = seq_example.feature_lists.feature_list[ + 'image/encoded'] + timestamps_list = seq_example.feature_lists.feature_list[ + 'image/timestamp'] + context_features_idx_list = seq_example.feature_lists.feature_list[ + 'image/context_features_idx'] + date_captured_list = seq_example.feature_lists.feature_list[ + 'image/date_captured'] + unix_time_list = seq_example.feature_lists.feature_list[ + 'image/unix_time'] + location_list = seq_example.feature_lists.feature_list['image/location'] + image_ids_list = seq_example.feature_lists.feature_list[ + 'image/source_id'] + gt_xmin_list = seq_example.feature_lists.feature_list[ + 'region/bbox/xmin'] + gt_xmax_list = seq_example.feature_lists.feature_list[ + 'region/bbox/xmax'] + gt_ymin_list = seq_example.feature_lists.feature_list[ + 'region/bbox/ymin'] + gt_ymax_list = seq_example.feature_lists.feature_list[ + 'region/bbox/ymax'] + gt_type_list = seq_example.feature_lists.feature_list[ + 'region/label/index'] + gt_type_string_list = seq_example.feature_lists.feature_list[ + 'region/label/string'] + gt_is_annotated_list = seq_example.feature_lists.feature_list[ + 'region/is_annotated'] + + for idx, example in enumerate(clip_list): + + encoded_image = encoded_image_list.feature.add() + encoded_image.bytes_list.value.extend( + example.features.feature['image/encoded'].bytes_list.value) + + image_id = image_ids_list.feature.add() + image_id.bytes_list.value.append( + example.features.feature['image/source_id'].bytes_list.value[0]) + + timestamp = timestamps_list.feature.add() + # Timestamp is currently order in the list. + timestamp.int64_list.value.extend([idx]) + + context_features_idx = context_features_idx_list.feature.add() + context_features_idx.int64_list.value.extend( + example.features.feature['context_features_idx'].int64_list.value) + + date_captured = date_captured_list.feature.add() + date_captured.bytes_list.value.extend( + example.features.feature['image/date_captured'].bytes_list.value) + unix_time = unix_time_list.feature.add() + unix_time.float_list.value.extend( + example.features.feature['image/unix_time'].float_list.value) + location = location_list.feature.add() + location.bytes_list.value.extend( + example.features.feature['image/location'].bytes_list.value) + + gt_xmin = gt_xmin_list.feature.add() + gt_xmax = gt_xmax_list.feature.add() + gt_ymin = gt_ymin_list.feature.add() + gt_ymax = gt_ymax_list.feature.add() + gt_type = gt_type_list.feature.add() + gt_type_str = gt_type_string_list.feature.add() + + gt_is_annotated = gt_is_annotated_list.feature.add() + gt_is_annotated.int64_list.value.append(1) + + gt_xmin.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/xmin'].float_list.value) + gt_xmax.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/xmax'].float_list.value) + gt_ymin.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/ymin'].float_list.value) + gt_ymax.float_list.value.extend( + example.features.feature[ + 'image/object/bbox/ymax'].float_list.value) + + gt_type.int64_list.value.extend( + example.features.feature[ + 'image/object/class/label'].int64_list.value) + gt_type_str.bytes_list.value.extend( + example.features.feature[ + 'image/object/class/text'].bytes_list.value) + + self._num_examples_processed.inc(1) + list_of_examples.append(seq_example) + + elif self._output_type == 'tf_example': + + for example in example_list: + im_id = example.features.feature['image/source_id'].bytes_list.value[0] + + if self._add_context_features: + example.features.feature[ + 'image/context_features'].float_list.value.extend( + context_features) + example.features.feature[ + 'image/context_feature_length'].int64_list.value.append( + feature_length) + + if self._keep_context_features_image_id_list: + example.features.feature[ + 'image/context_features_image_id_list'].bytes_list.value.extend( + context_features_image_id_list) + + self._num_examples_processed.inc(1) + list_of_examples.append(example) + + return list_of_examples + + +def construct_pipeline(pipeline, + input_tfrecord, + output_tfrecord, + sequence_key, + time_horizon=None, + subsample_context_features_rate=0, + reduce_image_size=True, + max_image_dimension=1024, + add_context_features=True, + sorted_image_ids=True, + image_ids_to_keep='All', + keep_context_features_image_id_list=False, + keep_only_positives=False, + context_features_score_threshold=0.7, + keep_only_positives_gt=False, + max_num_elements_in_context_features=5000, + num_shards=0, + output_type='tf_example', + max_clip_length=None, + context_feature_length=2057): + """Returns a beam pipeline to run object detection inference. + + Args: + pipeline: Initialized beam pipeline. + input_tfrecord: An TFRecord of tf.train.Example protos containing images. + output_tfrecord: An TFRecord of tf.train.Example protos that contain images + in the input TFRecord and the detections from the model. + sequence_key: A feature name to use as a key for grouping sequences. + time_horizon: What length of time to use to partition the data when building + the memory banks. Options: `year`, `month`, `week`, `day `, `hour`, + `minute`, None. + subsample_context_features_rate: What rate to subsample images for the + contextual memory bank. + reduce_image_size: Whether to reduce the size of the stored images. + max_image_dimension: The maximum image dimension to use for resizing. + add_context_features: Whether to keep and store the contextual memory bank. + sorted_image_ids: Whether the image ids are sortable, and can be used as + datetime tie-breakers when building memory banks. + image_ids_to_keep: A list of image ids to save, to use to build data subsets + for evaluation. + keep_context_features_image_id_list: Whether to save an ordered list of the + ids of the images in the contextual memory bank. + keep_only_positives: Whether to only keep high scoring + (>context_features_score_threshold) features in the contextual memory + bank. + context_features_score_threshold: What threshold to use for keeping + features. + keep_only_positives_gt: Whether to only keep features from images that + contain objects based on the ground truth (for training). + max_num_elements_in_context_features: the maximum number of elements in the + memory bank + num_shards: The number of output shards. + output_type: What type of output, tf_example of tf_sequence_example + max_clip_length: The maximum length of a sequence example, before + splitting into multiple + context_feature_length: The length of the context feature embeddings stored + in the input data. + """ + if output_type == 'tf_example': + coder = beam.coders.ProtoCoder(tf.train.Example) + elif output_type == 'tf_sequence_example': + coder = beam.coders.ProtoCoder(tf.train.SequenceExample) + else: + raise ValueError('Unsupported output type.') + input_collection = ( + pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( + input_tfrecord, + coder=beam.coders.BytesCoder())) + rekey_collection = input_collection | 'RekeyExamples' >> beam.ParDo( + ReKeyDataFn(sequence_key, time_horizon, + reduce_image_size, max_image_dimension)) + grouped_collection = ( + rekey_collection | 'GroupBySequenceKey' >> beam.GroupByKey()) + grouped_collection = ( + grouped_collection | 'ReshuffleGroups' >> beam.Reshuffle()) + ordered_collection = ( + grouped_collection | 'OrderByFrameNumber' >> beam.ParDo( + SortGroupedDataFn(sequence_key, sorted_image_ids, + max_num_elements_in_context_features))) + ordered_collection = ( + ordered_collection | 'ReshuffleSortedGroups' >> beam.Reshuffle()) + output_collection = ( + ordered_collection | 'AddContextToExamples' >> beam.ParDo( + GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep, + keep_context_features_image_id_list=( + keep_context_features_image_id_list), + subsample_context_features_rate=subsample_context_features_rate, + keep_only_positives=keep_only_positives, + keep_only_positives_gt=keep_only_positives_gt, + context_features_score_threshold=( + context_features_score_threshold), + max_num_elements_in_context_features=( + max_num_elements_in_context_features), + output_type=output_type, + max_clip_length=max_clip_length, + context_feature_length=context_feature_length))) + + output_collection = ( + output_collection | 'ReshuffleExamples' >> beam.Reshuffle()) + _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=coder) + + +def parse_args(argv): + """Command-line argument parser. + + Args: + argv: command line arguments + Returns: + beam_args: Arguments for the beam pipeline. + pipeline_args: Arguments for the pipeline options, such as runner type. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '--input_tfrecord', + dest='input_tfrecord', + required=True, + help='TFRecord containing images in tf.Example format for object ' + 'detection, with bounding boxes and contextual feature embeddings.') + parser.add_argument( + '--output_tfrecord', + dest='output_tfrecord', + required=True, + help='TFRecord containing images in tf.Example format, with added ' + 'contextual memory banks.') + parser.add_argument( + '--sequence_key', + dest='sequence_key', + default='image/location', + help='Key to use when grouping sequences: so far supports `image/seq_id` ' + 'and `image/location`.') + parser.add_argument( + '--context_feature_length', + dest='context_feature_length', + default=2057, + help='The length of the context feature embeddings stored in the input ' + 'data.') + parser.add_argument( + '--time_horizon', + dest='time_horizon', + default=None, + help='What time horizon to use when splitting the data, if any. Options ' + 'are: `year`, `month`, `week`, `day `, `hour`, `minute`, `None`.') + parser.add_argument( + '--subsample_context_features_rate', + dest='subsample_context_features_rate', + default=0, + help='Whether to subsample the context_features, and if so how many to ' + 'sample. If the rate is set to X, it will sample context from 1 out of ' + 'every X images. Default is sampling from every image, which is X=0.') + parser.add_argument( + '--reduce_image_size', + dest='reduce_image_size', + default=True, + help='downsamples images to have longest side max_image_dimension, ' + 'maintaining aspect ratio') + parser.add_argument( + '--max_image_dimension', + dest='max_image_dimension', + default=1024, + help='Sets max image dimension for resizing.') + parser.add_argument( + '--add_context_features', + dest='add_context_features', + default=True, + help='Adds a memory bank of embeddings to each clip') + parser.add_argument( + '--sorted_image_ids', + dest='sorted_image_ids', + default=True, + help='Whether the image source_ids are sortable to deal with ' + 'date_captured tie-breaks.') + parser.add_argument( + '--image_ids_to_keep', + dest='image_ids_to_keep', + default='All', + help='Path to .json list of image ids to keep, used for ground truth ' + 'eval creation.') + parser.add_argument( + '--keep_context_features_image_id_list', + dest='keep_context_features_image_id_list', + default=False, + help='Whether or not to keep a list of the image_ids corresponding to ' + 'the memory bank.') + parser.add_argument( + '--keep_only_positives', + dest='keep_only_positives', + default=False, + help='Whether or not to keep only positive boxes based on score.') + parser.add_argument( + '--context_features_score_threshold', + dest='context_features_score_threshold', + default=0.7, + help='What score threshold to use for boxes in context_features, when ' + '`keep_only_positives` is set to `True`.') + parser.add_argument( + '--keep_only_positives_gt', + dest='keep_only_positives_gt', + default=False, + help='Whether or not to keep only positive boxes based on gt class.') + parser.add_argument( + '--max_num_elements_in_context_features', + dest='max_num_elements_in_context_features', + default=2000, + help='Sets max number of context feature elements per memory bank. ' + 'If the number of images in the context group is greater than ' + '`max_num_elements_in_context_features`, the context group will be split.' + ) + parser.add_argument( + '--output_type', + dest='output_type', + default='tf_example', + help='Output type, one of `tf_example`, `tf_sequence_example`.') + parser.add_argument( + '--max_clip_length', + dest='max_clip_length', + default=None, + help='Max length for sequence example outputs.') + parser.add_argument( + '--num_shards', + dest='num_shards', + default=0, + help='Number of output shards.') + beam_args, pipeline_args = parser.parse_known_args(argv) + return beam_args, pipeline_args + + +def main(argv=None, save_main_session=True): + """Runs the Beam pipeline that performs inference. + + Args: + argv: Command line arguments. + save_main_session: Whether to save the main session. + """ + args, pipeline_args = parse_args(argv) + + pipeline_options = beam.options.pipeline_options.PipelineOptions( + pipeline_args) + pipeline_options.view_as( + beam.options.pipeline_options.SetupOptions).save_main_session = ( + save_main_session) + + dirname = os.path.dirname(args.output_tfrecord) + tf.io.gfile.makedirs(dirname) + + p = beam.Pipeline(options=pipeline_options) + + construct_pipeline( + p, + args.input_tfrecord, + args.output_tfrecord, + args.sequence_key, + args.time_horizon, + args.subsample_context_features_rate, + args.reduce_image_size, + args.max_image_dimension, + args.add_context_features, + args.sorted_image_ids, + args.image_ids_to_keep, + args.keep_context_features_image_id_list, + args.keep_only_positives, + args.context_features_score_threshold, + args.keep_only_positives_gt, + args.max_num_elements_in_context_features, + args.num_shards, + args.output_type, + args.max_clip_length, + args.context_feature_length) + + p.run() + + +if __name__ == '__main__': + main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ae4e02bdca3df7f46aff48137b5064fa9938db06 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py @@ -0,0 +1,396 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for add_context_to_examples.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import contextlib +import datetime +import os +import tempfile +import unittest + +import numpy as np +import six +import tensorflow as tf + +from object_detection.utils import tf_version + +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import add_context_to_examples # pylint:disable=g-import-not-at-top + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +@contextlib.contextmanager +def InMemoryTFRecord(entries): + temp = tempfile.NamedTemporaryFile(delete=False) + filename = temp.name + try: + with tf.io.TFRecordWriter(filename) as writer: + for value in entries: + writer.write(value) + yield filename + finally: + os.unlink(temp.name) + + +def BytesFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def BytesListFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) + + +def Int64Feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + +def Int64ListFeature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def FloatListFeature(value): + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class GenerateContextDataTest(tf.test.TestCase): + + def _create_first_tf_example(self): + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy() + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(six.ensure_binary('image_id_1')), + 'image/height': Int64Feature(4), + 'image/width': Int64Feature(4), + 'image/object/class/label': Int64ListFeature([5, 5]), + 'image/object/class/text': BytesListFeature([six.ensure_binary('hyena'), + six.ensure_binary('hyena') + ]), + 'image/object/bbox/xmin': FloatListFeature([0.0, 0.1]), + 'image/object/bbox/xmax': FloatListFeature([0.2, 0.3]), + 'image/object/bbox/ymin': FloatListFeature([0.4, 0.5]), + 'image/object/bbox/ymax': FloatListFeature([0.6, 0.7]), + 'image/seq_id': BytesFeature(six.ensure_binary('01')), + 'image/seq_num_frames': Int64Feature(2), + 'image/seq_frame_num': Int64Feature(0), + 'image/date_captured': BytesFeature( + six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 0, 0)))), + 'image/embedding': FloatListFeature([0.1, 0.2, 0.3]), + 'image/embedding_score': FloatListFeature([0.9]), + 'image/embedding_length': Int64Feature(3) + + })) + + return example.SerializeToString() + + def _create_second_tf_example(self): + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy() + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(six.ensure_binary('image_id_2')), + 'image/height': Int64Feature(4), + 'image/width': Int64Feature(4), + 'image/object/class/label': Int64ListFeature([5]), + 'image/object/class/text': BytesListFeature([six.ensure_binary('hyena') + ]), + 'image/object/bbox/xmin': FloatListFeature([0.0]), + 'image/object/bbox/xmax': FloatListFeature([0.1]), + 'image/object/bbox/ymin': FloatListFeature([0.2]), + 'image/object/bbox/ymax': FloatListFeature([0.3]), + 'image/seq_id': BytesFeature(six.ensure_binary('01')), + 'image/seq_num_frames': Int64Feature(2), + 'image/seq_frame_num': Int64Feature(1), + 'image/date_captured': BytesFeature( + six.ensure_binary(str(datetime.datetime(2020, 1, 1, 1, 1, 0)))), + 'image/embedding': FloatListFeature([0.4, 0.5, 0.6]), + 'image/embedding_score': FloatListFeature([0.9]), + 'image/embedding_length': Int64Feature(3) + })) + + return example.SerializeToString() + + def assert_expected_examples(self, tf_example_list): + self.assertAllEqual( + {tf_example.features.feature['image/source_id'].bytes_list.value[0] + for tf_example in tf_example_list}, + {six.ensure_binary('image_id_1'), six.ensure_binary('image_id_2')}) + self.assertAllClose( + tf_example_list[0].features.feature[ + 'image/context_features'].float_list.value, + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) + self.assertAllClose( + tf_example_list[1].features.feature[ + 'image/context_features'].float_list.value, + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) + + def assert_expected_sequence_example(self, tf_sequence_example_list): + tf_sequence_example = tf_sequence_example_list[0] + num_frames = 2 + + self.assertAllEqual( + tf_sequence_example.context.feature[ + 'clip/media_id'].bytes_list.value[0], six.ensure_binary( + '01_0')) + self.assertAllClose( + tf_sequence_example.context.feature[ + 'image/context_features'].float_list.value, + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) + + seq_feature_dict = tf_sequence_example.feature_lists.feature_list + + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + actual_timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + timestamps = [0, 1] + self.assertAllEqual(timestamps, actual_timestamps) + + # First image. + self.assertAllClose( + [0.4, 0.5], + seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.0, 0.1], + seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.6, 0.7], + seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.2, 0.3], + seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + [six.ensure_binary('hyena'), six.ensure_binary('hyena')], + seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:]) + + # Second example. + self.assertAllClose( + [0.2], + seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.0], + seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.3], + seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.1], + seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [six.ensure_binary('hyena')], + seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:]) + + def assert_expected_key(self, key): + self.assertAllEqual(key, b'01') + + def assert_sorted(self, example_collection): + example_list = list(example_collection) + counter = 0 + for example in example_list: + frame_num = example.features.feature[ + 'image/seq_frame_num'].int64_list.value[0] + self.assertGreaterEqual(frame_num, counter) + counter = frame_num + + def assert_context(self, example_collection): + example_list = list(example_collection) + for example in example_list: + context = example.features.feature[ + 'image/context_features'].float_list.value + self.assertAllClose([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], context) + + def assert_resized(self, example): + width = example.features.feature['image/width'].int64_list.value[0] + self.assertAllEqual(width, 2) + height = example.features.feature['image/height'].int64_list.value[0] + self.assertAllEqual(height, 2) + + def assert_size(self, example): + width = example.features.feature['image/width'].int64_list.value[0] + self.assertAllEqual(width, 4) + height = example.features.feature['image/height'].int64_list.value[0] + self.assertAllEqual(height, 4) + + def test_sliding_window(self): + example_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + max_clip_length = 3 + stride_length = 3 + out_list = [list(i) for i in add_context_to_examples.get_sliding_window( + example_list, max_clip_length, stride_length)] + self.assertAllEqual(out_list, [['a', 'b', 'c'], + ['d', 'e', 'f'], + ['g']]) + + def test_rekey_data_fn(self): + sequence_key = 'image/seq_id' + time_horizon = None + reduce_image_size = False + max_dim = None + + rekey_fn = add_context_to_examples.ReKeyDataFn( + sequence_key, time_horizon, + reduce_image_size, max_dim) + output = rekey_fn.process(self._create_first_tf_example()) + + self.assert_expected_key(output[0][0]) + self.assert_size(output[0][1]) + + def test_rekey_data_fn_w_resize(self): + sequence_key = 'image/seq_id' + time_horizon = None + reduce_image_size = True + max_dim = 2 + + rekey_fn = add_context_to_examples.ReKeyDataFn( + sequence_key, time_horizon, + reduce_image_size, max_dim) + output = rekey_fn.process(self._create_first_tf_example()) + + self.assert_expected_key(output[0][0]) + self.assert_resized(output[0][1]) + + def test_sort_fn(self): + sequence_key = 'image/seq_id' + sorted_image_ids = False + max_num_elements_in_context_features = 10 + sort_fn = add_context_to_examples.SortGroupedDataFn( + sequence_key, sorted_image_ids, max_num_elements_in_context_features) + output = sort_fn.process( + ('dummy_key', [tf.train.Example.FromString( + self._create_second_tf_example()), + tf.train.Example.FromString( + self._create_first_tf_example())])) + + self.assert_sorted(output[0][1]) + + def test_add_context_fn(self): + sequence_key = 'image/seq_id' + add_context_features = True + image_ids_to_keep = 'All' + context_fn = add_context_to_examples.GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep) + output = context_fn.process( + ('dummy_key', [tf.train.Example.FromString( + self._create_first_tf_example()), + tf.train.Example.FromString( + self._create_second_tf_example())])) + + self.assertEqual(len(output), 2) + self.assert_context(output) + + def test_add_context_fn_output_sequence_example(self): + sequence_key = 'image/seq_id' + add_context_features = True + image_ids_to_keep = 'All' + context_fn = add_context_to_examples.GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep, + output_type='tf_sequence_example') + output = context_fn.process( + ('01', + [tf.train.Example.FromString(self._create_first_tf_example()), + tf.train.Example.FromString(self._create_second_tf_example())])) + + self.assertEqual(len(output), 1) + self.assert_expected_sequence_example(output) + + def test_add_context_fn_output_sequence_example_cliplen(self): + sequence_key = 'image/seq_id' + add_context_features = True + image_ids_to_keep = 'All' + context_fn = add_context_to_examples.GenerateContextFn( + sequence_key, add_context_features, image_ids_to_keep, + output_type='tf_sequence_example', max_clip_length=1) + output = context_fn.process( + ('01', + [tf.train.Example.FromString(self._create_first_tf_example()), + tf.train.Example.FromString(self._create_second_tf_example())])) + self.assertEqual(len(output), 2) + + def test_beam_pipeline(self): + with InMemoryTFRecord( + [self._create_first_tf_example(), + self._create_second_tf_example()]) as input_tfrecord: + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + sequence_key = six.ensure_binary('image/seq_id') + max_num_elements = 10 + num_shards = 1 + pipeline_options = beam.options.pipeline_options.PipelineOptions( + runner='DirectRunner') + p = beam.Pipeline(options=pipeline_options) + add_context_to_examples.construct_pipeline( + p, + input_tfrecord, + output_tfrecord, + sequence_key, + max_num_elements_in_context_features=max_num_elements, + num_shards=num_shards) + p.run() + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 2) + self.assert_expected_examples([tf.train.Example.FromString( + tf_example) for tf_example in actual_output]) + + def test_beam_pipeline_sequence_example(self): + with InMemoryTFRecord( + [self._create_first_tf_example(), + self._create_second_tf_example()]) as input_tfrecord: + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + sequence_key = six.ensure_binary('image/seq_id') + max_num_elements = 10 + num_shards = 1 + pipeline_options = beam.options.pipeline_options.PipelineOptions( + runner='DirectRunner') + p = beam.Pipeline(options=pipeline_options) + add_context_to_examples.construct_pipeline( + p, + input_tfrecord, + output_tfrecord, + sequence_key, + max_num_elements_in_context_features=max_num_elements, + num_shards=num_shards, + output_type='tf_sequence_example') + p.run() + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 1) + self.assert_expected_sequence_example( + [tf.train.SequenceExample.FromString( + tf_example) for tf_example in actual_output]) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py new file mode 100644 index 0000000000000000000000000000000000000000..dbf3cad0eacaa4883aba340e34bb623a96d3af50 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py @@ -0,0 +1,334 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Beam pipeline to create COCO Camera Traps Object Detection TFRecords. + +Please note that this tool creates sharded output files. + +This tool assumes the input annotations are in the COCO Camera Traps json +format, specified here: +https://github.com/Microsoft/CameraTraps/blob/master/data_management/README.md + +Example usage: + + python create_cococameratraps_tfexample_main.py \ + --alsologtostderr \ + --output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \ + --image_directory="/path/to/image/folder/" \ + --input_annotations_file="path/to/annotations.json" + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import hashlib +import io +import json +import os +import numpy as np +import PIL.Image +import tensorflow as tf +from object_detection.utils import dataset_util + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +class ParseImage(beam.DoFn): + """A DoFn that parses a COCO-CameraTraps json and emits TFRecords.""" + + def __init__(self, image_directory, images, annotations, categories, + keep_bboxes): + """Initialization function. + + Args: + image_directory: Path to image directory + images: list of COCO Camera Traps style image dictionaries + annotations: list of COCO Camera Traps style annotation dictionaries + categories: list of COCO Camera Traps style category dictionaries + keep_bboxes: Whether to keep any bounding boxes that exist in the + annotations + """ + + self._image_directory = image_directory + self._image_dict = {im['id']: im for im in images} + self._annotation_dict = {im['id']: [] for im in images} + self._category_dict = {int(cat['id']): cat for cat in categories} + for ann in annotations: + self._annotation_dict[ann['image_id']].append(ann) + self._images = images + self._keep_bboxes = keep_bboxes + + self._num_examples_processed = beam.metrics.Metrics.counter( + 'cococameratraps_data_generation', 'num_tf_examples_processed') + + def process(self, image_id): + """Builds a tf.Example given an image id. + + Args: + image_id: the image id of the associated image + + Returns: + List of tf.Examples. + """ + + image = self._image_dict[image_id] + annotations = self._annotation_dict[image_id] + image_height = image['height'] + image_width = image['width'] + filename = image['file_name'] + image_id = image['id'] + image_location_id = image['location'] + + image_datetime = str(image['date_captured']) + + image_sequence_id = str(image['seq_id']) + image_sequence_num_frames = int(image['seq_num_frames']) + image_sequence_frame_num = int(image['frame_num']) + + full_path = os.path.join(self._image_directory, filename) + + try: + # Ensure the image exists and is not corrupted + with tf.io.gfile.GFile(full_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + image = tf.io.decode_jpeg(encoded_jpg, channels=3) + except Exception: # pylint: disable=broad-except + # The image file is missing or corrupt + return [] + + key = hashlib.sha256(encoded_jpg).hexdigest() + feature_dict = { + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/filename': + dataset_util.bytes_feature(filename.encode('utf8')), + 'image/source_id': + dataset_util.bytes_feature(str(image_id).encode('utf8')), + 'image/key/sha256': + dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': + dataset_util.bytes_feature(encoded_jpg), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/location': + dataset_util.bytes_feature(str(image_location_id).encode('utf8')), + 'image/seq_num_frames': + dataset_util.int64_feature(image_sequence_num_frames), + 'image/seq_frame_num': + dataset_util.int64_feature(image_sequence_frame_num), + 'image/seq_id': + dataset_util.bytes_feature(image_sequence_id.encode('utf8')), + 'image/date_captured': + dataset_util.bytes_feature(image_datetime.encode('utf8')) + } + + num_annotations_skipped = 0 + if annotations: + xmin = [] + xmax = [] + ymin = [] + ymax = [] + category_names = [] + category_ids = [] + area = [] + + for object_annotations in annotations: + if 'bbox' in object_annotations and self._keep_bboxes: + (x, y, width, height) = tuple(object_annotations['bbox']) + if width <= 0 or height <= 0: + num_annotations_skipped += 1 + continue + if x + width > image_width or y + height > image_height: + num_annotations_skipped += 1 + continue + xmin.append(float(x) / image_width) + xmax.append(float(x + width) / image_width) + ymin.append(float(y) / image_height) + ymax.append(float(y + height) / image_height) + if 'area' in object_annotations: + area.append(object_annotations['area']) + else: + # approximate area using l*w/2 + area.append(width*height/2.0) + + category_id = int(object_annotations['category_id']) + category_ids.append(category_id) + category_names.append( + self._category_dict[category_id]['name'].encode('utf8')) + + feature_dict.update({ + 'image/object/bbox/xmin': + dataset_util.float_list_feature(xmin), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(xmax), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(ymin), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(ymax), + 'image/object/class/text': + dataset_util.bytes_list_feature(category_names), + 'image/object/class/label': + dataset_util.int64_list_feature(category_ids), + 'image/object/area': + dataset_util.float_list_feature(area), + }) + + # For classification, add the first category to image/class/label and + # image/class/text + if not category_ids: + feature_dict.update({ + 'image/class/label': + dataset_util.int64_list_feature([0]), + 'image/class/text': + dataset_util.bytes_list_feature(['empty'.encode('utf8')]), + }) + else: + feature_dict.update({ + 'image/class/label': + dataset_util.int64_list_feature([category_ids[0]]), + 'image/class/text': + dataset_util.bytes_list_feature([category_names[0]]), + }) + + else: + # Add empty class if there are no annotations + feature_dict.update({ + 'image/class/label': + dataset_util.int64_list_feature([0]), + 'image/class/text': + dataset_util.bytes_list_feature(['empty'.encode('utf8')]), + }) + + example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) + self._num_examples_processed.inc(1) + + return [(example)] + + +def load_json_data(data_file): + with tf.io.gfile.GFile(data_file, 'r') as fid: + data_dict = json.load(fid) + return data_dict + + +def create_pipeline(pipeline, + image_directory, + input_annotations_file, + output_tfrecord_prefix=None, + num_images_per_shard=200, + keep_bboxes=True): + """Creates a beam pipeline for producing a COCO-CameraTraps Image dataset. + + Args: + pipeline: Initialized beam pipeline. + image_directory: Path to image directory + input_annotations_file: Path to a coco-cameratraps annotation file + output_tfrecord_prefix: Absolute path for tfrecord outputs. Final files will + be named {output_tfrecord_prefix}@N. + num_images_per_shard: The number of images to store in each shard + keep_bboxes: Whether to keep any bounding boxes that exist in the json file + """ + + data = load_json_data(input_annotations_file) + + num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard)) + + image_examples = ( + pipeline | ('CreateCollections') >> beam.Create( + [im['id'] for im in data['images']]) + | ('ParseImage') >> beam.ParDo(ParseImage( + image_directory, data['images'], data['annotations'], + data['categories'], keep_bboxes=keep_bboxes))) + _ = (image_examples + | ('Reshuffle') >> beam.Reshuffle() + | ('WriteTfImageExample') >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord_prefix, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example))) + + +def parse_args(argv): + """Command-line argument parser. + + Args: + argv: command line arguments + Returns: + beam_args: Arguments for the beam pipeline. + pipeline_args: Arguments for the pipeline options, such as runner type. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '--image_directory', + dest='image_directory', + required=True, + help='Path to the directory where the images are stored.') + parser.add_argument( + '--output_tfrecord_prefix', + dest='output_tfrecord_prefix', + required=True, + help='Path and prefix to store TFRecords containing images in tf.Example' + 'format.') + parser.add_argument( + '--input_annotations_file', + dest='input_annotations_file', + required=True, + help='Path to Coco-CameraTraps style annotations file.') + parser.add_argument( + '--num_images_per_shard', + dest='num_images_per_shard', + default=200, + help='The number of images to be stored in each outputshard.') + beam_args, pipeline_args = parser.parse_known_args(argv) + return beam_args, pipeline_args + + +def main(argv=None, save_main_session=True): + """Runs the Beam pipeline that performs inference. + + Args: + argv: Command line arguments. + save_main_session: Whether to save the main session. + """ + args, pipeline_args = parse_args(argv) + + pipeline_options = beam.options.pipeline_options.PipelineOptions( + pipeline_args) + pipeline_options.view_as( + beam.options.pipeline_options.SetupOptions).save_main_session = ( + save_main_session) + + dirname = os.path.dirname(args.output_tfrecord_prefix) + tf.io.gfile.makedirs(dirname) + + p = beam.Pipeline(options=pipeline_options) + create_pipeline( + pipeline=p, + image_directory=args.image_directory, + input_annotations_file=args.input_annotations_file, + output_tfrecord_prefix=args.output_tfrecord_prefix, + num_images_per_shard=args.num_images_per_shard) + p.run() + + +if __name__ == '__main__': + main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1ac203f334574a3b09654fd736047b8236fa38 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py @@ -0,0 +1,214 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for create_cococameratraps_tfexample_main.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import datetime +import json +import os +import tempfile +import unittest + +import numpy as np + +from PIL import Image +import tensorflow as tf +from object_detection.utils import tf_version + +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main # pylint:disable=g-import-not-at-top + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase): + + IMAGE_HEIGHT = 360 + IMAGE_WIDTH = 480 + + def _write_random_images_to_directory(self, directory, num_frames): + for frame_num in range(num_frames): + img = np.random.randint(0, high=256, + size=(self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 3), + dtype=np.uint8) + pil_image = Image.fromarray(img) + fname = 'im_' + str(frame_num) + '.jpg' + pil_image.save(os.path.join(directory, fname), 'JPEG') + + def _create_json_file(self, directory, num_frames, keep_bboxes=False): + json_dict = {'images': [], 'annotations': []} + json_dict['categories'] = [{'id': 0, 'name': 'empty'}, + {'id': 1, 'name': 'animal'}] + for idx in range(num_frames): + im = {'id': 'im_' + str(idx), + 'file_name': 'im_' + str(idx) + '.jpg', + 'height': self.IMAGE_HEIGHT, + 'width': self.IMAGE_WIDTH, + 'seq_id': 'seq_1', + 'seq_num_frames': num_frames, + 'frame_num': idx, + 'location': 'loc_' + str(idx), + 'date_captured': str(datetime.datetime.now()) + } + json_dict['images'].append(im) + ann = {'id': 'ann' + str(idx), + 'image_id': 'im_' + str(idx), + 'category_id': 1, + } + if keep_bboxes: + ann['bbox'] = [0.0 * self.IMAGE_WIDTH, + 0.1 * self.IMAGE_HEIGHT, + 0.5 * self.IMAGE_WIDTH, + 0.5 * self.IMAGE_HEIGHT] + json_dict['annotations'].append(ann) + + json_path = os.path.join(directory, 'test_file.json') + with tf.io.gfile.GFile(json_path, 'w') as f: + json.dump(json_dict, f) + return json_path + + def assert_expected_example_bbox(self, example): + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.1]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.0]) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.6]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.5]) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, [b'animal']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, [b'animal']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, + [self.IMAGE_HEIGHT]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, + [self.IMAGE_WIDTH]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + [b'im_0']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def assert_expected_example(self, example): + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + []) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, [b'animal']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [1]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, [b'animal']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, + [self.IMAGE_HEIGHT]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, + [self.IMAGE_WIDTH]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + [b'im_0']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def test_beam_pipeline(self): + num_frames = 1 + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + json_path = self._create_json_file(temp_dir, num_frames) + output_tfrecord = temp_dir+'/output' + self._write_random_images_to_directory(temp_dir, num_frames) + pipeline_options = beam.options.pipeline_options.PipelineOptions( + runner='DirectRunner') + p = beam.Pipeline(options=pipeline_options) + create_cococameratraps_tfexample_main.create_pipeline( + p, temp_dir, json_path, + output_tfrecord_prefix=output_tfrecord) + p.run() + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), num_frames) + self.assert_expected_example(tf.train.Example.FromString( + actual_output[0])) + + def test_beam_pipeline_bbox(self): + num_frames = 1 + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + json_path = self._create_json_file(temp_dir, num_frames, keep_bboxes=True) + output_tfrecord = temp_dir+'/output' + self._write_random_images_to_directory(temp_dir, num_frames) + pipeline_options = beam.options.pipeline_options.PipelineOptions( + runner='DirectRunner') + p = beam.Pipeline(options=pipeline_options) + create_cococameratraps_tfexample_main.create_pipeline( + p, temp_dir, json_path, + output_tfrecord_prefix=output_tfrecord, + keep_bboxes=True) + p.run() + filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????') + actual_output = [] + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), num_frames) + self.assert_expected_example_bbox(tf.train.Example.FromString( + actual_output[0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_detection_data.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_detection_data.py new file mode 100644 index 0000000000000000000000000000000000000000..c826873802f09ffbc48788576eb9c02038ceeb65 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_detection_data.py @@ -0,0 +1,283 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""A Beam job to generate detection data for camera trap images. + +This tools allows to run inference with an exported Object Detection model in +`saved_model` format and produce raw detection boxes on images in tf.Examples, +with the assumption that the bounding box class label will match the image-level +class label in the tf.Example. + +Steps to generate a detection dataset: +1. Use object_detection/export_inference_graph.py to get a `saved_model` for + inference. The input node must accept a tf.Example proto. +2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example + protos containing images for inference. + +Example Usage: +-------------- +python tensorflow_models/object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/detection_model.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +python generate_detection_data.py \ + --alsologtostderr \ + --input_tfrecord path/to/input_tfrecord@X \ + --output_tfrecord path/to/output_tfrecord@X \ + --model_dir path/to/exported_model_directory/saved_model +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import threading +import tensorflow as tf + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +class GenerateDetectionDataFn(beam.DoFn): + """Generates detection data for camera trap images. + + This Beam DoFn performs inference with an object detection `saved_model` and + produces detection boxes for camera trap data, matched to the + object class. + """ + session_lock = threading.Lock() + + def __init__(self, model_dir, confidence_threshold): + """Initialization function. + + Args: + model_dir: A directory containing saved model. + confidence_threshold: the confidence threshold for boxes to keep + """ + self._model_dir = model_dir + self._confidence_threshold = confidence_threshold + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'detection_data_generation', 'num_tf_examples_processed') + + def setup(self): + self._load_inference_model() + + def _load_inference_model(self): + # Because initialization of the tf.Session is expensive we share + # one instance across all threads in the worker. This is possible since + # tf.Session.run() is thread safe. + with self.session_lock: + self._detect_fn = tf.saved_model.load(self._model_dir) + + def process(self, tfrecord_entry): + return self._run_inference_and_generate_detections(tfrecord_entry) + + def _run_inference_and_generate_detections(self, tfrecord_entry): + input_example = tf.train.Example.FromString(tfrecord_entry) + if input_example.features.feature[ + 'image/object/bbox/ymin'].float_list.value: + # There are already ground truth boxes for this image, just keep them. + return [input_example] + + detections = self._detect_fn.signatures['serving_default']( + (tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0))) + detection_boxes = detections['detection_boxes'] + num_detections = detections['num_detections'] + detection_scores = detections['detection_scores'] + + example = tf.train.Example() + + num_detections = int(num_detections[0]) + + image_class_labels = input_example.features.feature[ + 'image/object/class/label'].int64_list.value + + image_class_texts = input_example.features.feature[ + 'image/object/class/text'].bytes_list.value + + # Ignore any images with multiple classes, + # we can't match the class to the box. + if len(image_class_labels) > 1: + return [] + + # Don't add boxes for images already labeled empty (for now) + if len(image_class_labels) == 1: + # Add boxes over confidence threshold. + for idx, score in enumerate(detection_scores[0]): + if score >= self._confidence_threshold and idx < num_detections: + example.features.feature[ + 'image/object/bbox/ymin'].float_list.value.extend([ + detection_boxes[0, idx, 0]]) + example.features.feature[ + 'image/object/bbox/xmin'].float_list.value.extend([ + detection_boxes[0, idx, 1]]) + example.features.feature[ + 'image/object/bbox/ymax'].float_list.value.extend([ + detection_boxes[0, idx, 2]]) + example.features.feature[ + 'image/object/bbox/xmax'].float_list.value.extend([ + detection_boxes[0, idx, 3]]) + + # Add box scores and class texts and labels. + example.features.feature[ + 'image/object/class/score'].float_list.value.extend( + [score]) + + example.features.feature[ + 'image/object/class/label'].int64_list.value.extend( + [image_class_labels[0]]) + + example.features.feature[ + 'image/object/class/text'].bytes_list.value.extend( + [image_class_texts[0]]) + + # Add other essential example attributes + example.features.feature['image/encoded'].bytes_list.value.extend( + input_example.features.feature['image/encoded'].bytes_list.value) + example.features.feature['image/height'].int64_list.value.extend( + input_example.features.feature['image/height'].int64_list.value) + example.features.feature['image/width'].int64_list.value.extend( + input_example.features.feature['image/width'].int64_list.value) + example.features.feature['image/source_id'].bytes_list.value.extend( + input_example.features.feature['image/source_id'].bytes_list.value) + example.features.feature['image/location'].bytes_list.value.extend( + input_example.features.feature['image/location'].bytes_list.value) + + example.features.feature['image/date_captured'].bytes_list.value.extend( + input_example.features.feature['image/date_captured'].bytes_list.value) + + example.features.feature['image/class/text'].bytes_list.value.extend( + input_example.features.feature['image/class/text'].bytes_list.value) + example.features.feature['image/class/label'].int64_list.value.extend( + input_example.features.feature['image/class/label'].int64_list.value) + + example.features.feature['image/seq_id'].bytes_list.value.extend( + input_example.features.feature['image/seq_id'].bytes_list.value) + example.features.feature['image/seq_num_frames'].int64_list.value.extend( + input_example.features.feature['image/seq_num_frames'].int64_list.value) + example.features.feature['image/seq_frame_num'].int64_list.value.extend( + input_example.features.feature['image/seq_frame_num'].int64_list.value) + + self._num_examples_processed.inc(1) + return [example] + + +def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir, + confidence_threshold, num_shards): + """Returns a Beam pipeline to run object detection inference. + + Args: + pipeline: Initialized beam pipeline. + input_tfrecord: A TFRecord of tf.train.Example protos containing images. + output_tfrecord: A TFRecord of tf.train.Example protos that contain images + in the input TFRecord and the detections from the model. + model_dir: Path to `saved_model` to use for inference. + confidence_threshold: Threshold to use when keeping detection results. + num_shards: The number of output shards. + """ + input_collection = ( + pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( + input_tfrecord, + coder=beam.coders.BytesCoder())) + output_collection = input_collection | 'RunInference' >> beam.ParDo( + GenerateDetectionDataFn(model_dir, confidence_threshold)) + output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle() + _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example)) + + +def parse_args(argv): + """Command-line argument parser. + + Args: + argv: command line arguments + Returns: + beam_args: Arguments for the beam pipeline. + pipeline_args: Arguments for the pipeline options, such as runner type. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '--detection_input_tfrecord', + dest='detection_input_tfrecord', + required=True, + help='TFRecord containing images in tf.Example format for object ' + 'detection.') + parser.add_argument( + '--detection_output_tfrecord', + dest='detection_output_tfrecord', + required=True, + help='TFRecord containing detections in tf.Example format.') + parser.add_argument( + '--detection_model_dir', + dest='detection_model_dir', + required=True, + help='Path to directory containing an object detection SavedModel.') + parser.add_argument( + '--confidence_threshold', + dest='confidence_threshold', + default=0.9, + help='Min confidence to keep bounding boxes.') + parser.add_argument( + '--num_shards', + dest='num_shards', + default=0, + help='Number of output shards.') + beam_args, pipeline_args = parser.parse_known_args(argv) + return beam_args, pipeline_args + + +def main(argv=None, save_main_session=True): + """Runs the Beam pipeline that performs inference. + + Args: + argv: Command line arguments. + save_main_session: Whether to save the main session. + """ + + args, pipeline_args = parse_args(argv) + + pipeline_options = beam.options.pipeline_options.PipelineOptions( + pipeline_args) + pipeline_options.view_as( + beam.options.pipeline_options.SetupOptions).save_main_session = ( + save_main_session) + + dirname = os.path.dirname(args.detection_output_tfrecord) + tf.io.gfile.makedirs(dirname) + + p = beam.Pipeline(options=pipeline_options) + + construct_pipeline( + p, + args.detection_input_tfrecord, + args.detection_output_tfrecord, + args.detection_model_dir, + args.confidence_threshold, + args.num_shards) + + p.run() + + +if __name__ == '__main__': + main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..71b327635579ea812c38deb4190248cff1a187b8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py @@ -0,0 +1,261 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for generate_detection_data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow as tf + +from object_detection import exporter_lib_v2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import pipeline_pb2 +from object_detection.utils import tf_version + +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import generate_detection_data # pylint:disable=g-import-not-at-top + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +class FakeModel(model.DetectionModel): + + def __init__(self, conv_weight_scalar=1.0): + super(FakeModel, self).__init__(num_classes=5) + self._conv = tf.keras.layers.Conv2D( + filters=1, kernel_size=1, strides=(1, 1), padding='valid', + kernel_initializer=tf.keras.initializers.Constant( + value=conv_weight_scalar)) + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': self._conv(preprocessed_inputs)} + + def postprocess(self, prediction_dict, true_image_shapes): + with tf.control_dependencies(list(prediction_dict.values())): + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6], + [0.5, 0.5, 0.8, 0.8]]], tf.float32), + 'detection_scores': tf.constant([[0.95, 0.6]], tf.float32), + 'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2], + [0.3, 0.1, 0.6]]], + tf.float32), + 'detection_classes': tf.constant([[0, 1]], tf.float32), + 'num_detections': tf.constant([2], tf.float32) + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@contextlib.contextmanager +def InMemoryTFRecord(entries): + temp = tempfile.NamedTemporaryFile(delete=False) + filename = temp.name + try: + with tf.io.TFRecordWriter(filename) as writer: + for value in entries: + writer.write(value) + yield filename + finally: + os.unlink(filename) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class GenerateDetectionDataTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, checkpoint_path): + """A function to save checkpoint from a fake Detection Model. + + Args: + checkpoint_path: Path to save checkpoint from Fake model. + """ + mock_model = FakeModel() + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_path, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + def _export_saved_model(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + tf.io.gfile.makedirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + saved_model_path = os.path.join(output_directory, 'saved_model') + return saved_model_path + + def _create_tf_example(self): + with self.test_session(): + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).numpy() + + def BytesFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def Int64Feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(b'image_id'), + 'image/height': Int64Feature(4), + 'image/width': Int64Feature(6), + 'image/object/class/label': Int64Feature(5), + 'image/object/class/text': BytesFeature(b'hyena'), + 'image/class/label': Int64Feature(5), + 'image/class/text': BytesFeature(b'hyena'), + })) + + return example.SerializeToString() + + def assert_expected_example(self, example): + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.0]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.1]) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.5]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.6]) + self.assertAllClose( + example.features.feature['image/object/class/score'] + .float_list.value, [0.95]) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, [b'hyena']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, [b'hyena']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, [4]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, [6]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + [b'image_id']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def test_generate_detection_data_fn(self): + saved_model_path = self._export_saved_model() + confidence_threshold = 0.8 + inference_fn = generate_detection_data.GenerateDetectionDataFn( + saved_model_path, confidence_threshold) + inference_fn.setup() + generated_example = self._create_tf_example() + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/text'] + .bytes_list.value, [b'hyena']) + output = inference_fn.process(generated_example) + output_example = output[0] + + self.assertAllEqual( + output_example.features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual(output_example.features.feature['image/width'] + .int64_list.value, [6]) + + self.assert_expected_example(output_example) + + def test_beam_pipeline(self): + with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord: + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + saved_model_path = self._export_saved_model() + confidence_threshold = 0.8 + num_shards = 1 + pipeline_options = beam.options.pipeline_options.PipelineOptions( + runner='DirectRunner') + p = beam.Pipeline(options=pipeline_options) + generate_detection_data.construct_pipeline( + p, input_tfrecord, output_tfrecord, saved_model_path, + confidence_threshold, num_shards) + p.run() + filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 1) + self.assert_expected_example(tf.train.Example.FromString( + actual_output[0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py new file mode 100644 index 0000000000000000000000000000000000000000..02e1382c03b07eef637128da987ab804268f428e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -0,0 +1,355 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""A Beam job to generate embedding data for camera trap images. + +This tool runs inference with an exported Object Detection model in +`saved_model` format and produce raw embeddings for camera trap data. These +embeddings contain an object-centric feature embedding from Faster R-CNN, the +datetime that the image was taken (normalized in a specific way), and the +position of the object of interest. By default, only the highest-scoring object +embedding is included. + +Steps to generate a embedding dataset: +1. Use object_detection/export_inference_graph.py to get a Faster R-CNN + `saved_model` for inference. The input node must accept a tf.Example proto. +2. Run this tool with `saved_model` from step 1 and an TFRecord of tf.Example + protos containing images for inference. + +Example Usage: +-------------- +python tensorflow_models/object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/faster_rcnn_model.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory \ + --additional_output_tensor_names detection_features + +python generate_embedding_data.py \ + --alsologtostderr \ + --embedding_input_tfrecord path/to/input_tfrecords* \ + --embedding_output_tfrecord path/to/output_tfrecords \ + --embedding_model_dir path/to/exported_model_directory/saved_model +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import datetime +import os +import threading + +import numpy as np +import six +import tensorflow as tf + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +def add_keys(serialized_example): + key = hash(serialized_example) + return key, serialized_example + + +def drop_keys(key_value_tuple): + return key_value_tuple[1] + + +def get_date_captured(example): + date_captured = datetime.datetime.strptime( + six.ensure_str( + example.features.feature['image/date_captured'].bytes_list.value[0]), + '%Y-%m-%d %H:%M:%S') + return date_captured + + +def embed_date_captured(date_captured): + """Encodes the datetime of the image.""" + embedded_date_captured = [] + month_max = 12.0 + day_max = 31.0 + hour_max = 24.0 + minute_max = 60.0 + min_year = 1990.0 + max_year = 2030.0 + + year = (date_captured.year - min_year) / float(max_year - min_year) + embedded_date_captured.append(year) + + month = (date_captured.month - 1) / month_max + embedded_date_captured.append(month) + + day = (date_captured.day - 1) / day_max + embedded_date_captured.append(day) + + hour = date_captured.hour / hour_max + embedded_date_captured.append(hour) + + minute = date_captured.minute / minute_max + embedded_date_captured.append(minute) + + return np.asarray(embedded_date_captured) + + +def embed_position_and_size(box): + """Encodes the bounding box of the object of interest.""" + ymin = box[0] + xmin = box[1] + ymax = box[2] + xmax = box[3] + w = xmax - xmin + h = ymax - ymin + x = xmin + w / 2.0 + y = ymin + h / 2.0 + return np.asarray([x, y, w, h]) + + +def get_bb_embedding(detection_features, detection_boxes, detection_scores, + index): + embedding = detection_features[0][index] + pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0) + + box = detection_boxes[0][index] + position_embedding = embed_position_and_size(box) + + score = detection_scores[0][index] + return np.concatenate((pooled_embedding, position_embedding)), score + + +class GenerateEmbeddingDataFn(beam.DoFn): + """Generates embedding data for camera trap images. + + This Beam DoFn performs inference with an object detection `saved_model` and + produces contextual embedding vectors. + """ + session_lock = threading.Lock() + + def __init__(self, model_dir, top_k_embedding_count, + bottom_k_embedding_count): + """Initialization function. + + Args: + model_dir: A directory containing saved model. + top_k_embedding_count: the number of high-confidence embeddings to store + bottom_k_embedding_count: the number of low-confidence embeddings to store + """ + self._model_dir = model_dir + self._session = None + self._num_examples_processed = beam.metrics.Metrics.counter( + 'embedding_data_generation', 'num_tf_examples_processed') + self._top_k_embedding_count = top_k_embedding_count + self._bottom_k_embedding_count = bottom_k_embedding_count + + def setup(self): + self._load_inference_model() + + def _load_inference_model(self): + # Because initialization of the tf.Session is expensive we share + # one instance across all threads in the worker. This is possible since + # tf.Session.run() is thread safe. + with self.session_lock: + self._detect_fn = tf.saved_model.load(self._model_dir) + + def process(self, tfexample_key_value): + return self._run_inference_and_generate_embedding(tfexample_key_value) + + def _run_inference_and_generate_embedding(self, tfexample_key_value): + key, tfexample = tfexample_key_value + input_example = tf.train.Example.FromString(tfexample) + example = tf.train.Example() + example.CopyFrom(input_example) + + try: + date_captured = get_date_captured(input_example) + unix_time = ((date_captured - + datetime.datetime.fromtimestamp(0)).total_seconds()) + example.features.feature['image/unix_time'].float_list.value.extend( + [unix_time]) + temporal_embedding = embed_date_captured(date_captured) + except Exception: # pylint: disable=broad-except + temporal_embedding = None + + detections = self._detect_fn.signatures['serving_default']( + (tf.expand_dims(tf.convert_to_tensor(tfexample), 0))) + detection_features = detections['detection_features'] + detection_boxes = detections['detection_boxes'] + num_detections = detections['num_detections'] + detection_scores = detections['detection_scores'] + + num_detections = int(num_detections) + embed_all = [] + score_all = [] + + detection_features = np.asarray(detection_features) + + embedding_count = 0 + for index in range(min(num_detections, self._top_k_embedding_count)): + bb_embedding, score = get_bb_embedding( + detection_features, detection_boxes, detection_scores, index) + embed_all.extend(bb_embedding) + if temporal_embedding is not None: embed_all.extend(temporal_embedding) + score_all.append(score) + embedding_count += 1 + + for index in range( + max(0, num_detections - 1), + max(-1, num_detections - 1 - self._bottom_k_embedding_count), -1): + bb_embedding, score = get_bb_embedding( + detection_features, detection_boxes, detection_scores, index) + embed_all.extend(bb_embedding) + if temporal_embedding is not None: embed_all.extend(temporal_embedding) + score_all.append(score) + embedding_count += 1 + + if embedding_count == 0: + bb_embedding, score = get_bb_embedding( + detection_features, detection_boxes, detection_scores, 0) + embed_all.extend(bb_embedding) + if temporal_embedding is not None: embed_all.extend(temporal_embedding) + score_all.append(score) + + # Takes max in case embedding_count is 0. + embedding_length = len(embed_all) // max(1, embedding_count) + + embed_all = np.asarray(embed_all) + + example.features.feature['image/embedding'].float_list.value.extend( + embed_all) + example.features.feature['image/embedding_score'].float_list.value.extend( + score_all) + example.features.feature['image/embedding_length'].int64_list.value.append( + embedding_length) + example.features.feature['image/embedding_count'].int64_list.value.append( + embedding_count) + + self._num_examples_processed.inc(1) + return [(key, example)] + + +def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir, + top_k_embedding_count, bottom_k_embedding_count, + num_shards): + """Returns a beam pipeline to run object detection inference. + + Args: + pipeline: Initialized beam pipeline. + input_tfrecord: An TFRecord of tf.train.Example protos containing images. + output_tfrecord: An TFRecord of tf.train.Example protos that contain images + in the input TFRecord and the detections from the model. + model_dir: Path to `saved_model` to use for inference. + top_k_embedding_count: The number of high-confidence embeddings to store. + bottom_k_embedding_count: The number of low-confidence embeddings to store. + num_shards: The number of output shards. + """ + input_collection = ( + pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( + input_tfrecord, coder=beam.coders.BytesCoder()) + | 'AddKeys' >> beam.Map(add_keys)) + output_collection = input_collection | 'ExtractEmbedding' >> beam.ParDo( + GenerateEmbeddingDataFn(model_dir, top_k_embedding_count, + bottom_k_embedding_count)) + output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle() + _ = output_collection | 'DropKeys' >> beam.Map( + drop_keys) | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example)) + + +def parse_args(argv): + """Command-line argument parser. + + Args: + argv: command line arguments + Returns: + beam_args: Arguments for the beam pipeline. + pipeline_args: Arguments for the pipeline options, such as runner type. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '--embedding_input_tfrecord', + dest='embedding_input_tfrecord', + required=True, + help='TFRecord containing images in tf.Example format for object ' + 'detection.') + parser.add_argument( + '--embedding_output_tfrecord', + dest='embedding_output_tfrecord', + required=True, + help='TFRecord containing embeddings in tf.Example format.') + parser.add_argument( + '--embedding_model_dir', + dest='embedding_model_dir', + required=True, + help='Path to directory containing an object detection SavedModel with' + 'detection_box_classifier_features in the output.') + parser.add_argument( + '--top_k_embedding_count', + dest='top_k_embedding_count', + default=1, + help='The number of top k embeddings to add to the memory bank.') + parser.add_argument( + '--bottom_k_embedding_count', + dest='bottom_k_embedding_count', + default=0, + help='The number of bottom k embeddings to add to the memory bank.') + parser.add_argument( + '--num_shards', + dest='num_shards', + default=0, + help='Number of output shards.') + beam_args, pipeline_args = parser.parse_known_args(argv) + return beam_args, pipeline_args + + +def main(argv=None, save_main_session=True): + """Runs the Beam pipeline that performs inference. + + Args: + argv: Command line arguments. + save_main_session: Whether to save the main session. + """ + args, pipeline_args = parse_args(argv) + + pipeline_options = beam.options.pipeline_options.PipelineOptions( + pipeline_args) + pipeline_options.view_as( + beam.options.pipeline_options.SetupOptions).save_main_session = ( + save_main_session) + + dirname = os.path.dirname(args.embedding_output_tfrecord) + tf.io.gfile.makedirs(dirname) + + p = beam.Pipeline(options=pipeline_options) + + construct_pipeline( + p, + args.embedding_input_tfrecord, + args.embedding_output_tfrecord, + args.embedding_model_dir, + args.top_k_embedding_count, + args.bottom_k_embedding_count, + args.num_shards) + + p.run() + + +if __name__ == '__main__': + main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5566d6d5f35b7bccf363be9a9a1088baef326e3f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py @@ -0,0 +1,330 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for generate_embedding_data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import contextlib +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow as tf +from object_detection import exporter_lib_v2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import pipeline_pb2 +from object_detection.utils import tf_version + +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock + +try: + import apache_beam as beam # pylint:disable=g-import-not-at-top +except ModuleNotFoundError: + pass + + +class FakeModel(model.DetectionModel): + + def __init__(self, conv_weight_scalar=1.0): + super(FakeModel, self).__init__(num_classes=5) + self._conv = tf.keras.layers.Conv2D( + filters=1, kernel_size=1, strides=(1, 1), padding='valid', + kernel_initializer=tf.keras.initializers.Constant( + value=conv_weight_scalar)) + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': self._conv(preprocessed_inputs)} + + def postprocess(self, prediction_dict, true_image_shapes): + with tf.control_dependencies(prediction_dict.values()): + num_features = 100 + feature_dims = 10 + classifier_feature = np.ones( + (2, feature_dims, feature_dims, num_features), + dtype=np.float32).tolist() + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6], + [0.5, 0.5, 0.8, 0.8]]], tf.float32), + 'detection_scores': tf.constant([[0.95, 0.6]], tf.float32), + 'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2], + [0.3, 0.1, 0.6]]], + tf.float32), + 'detection_classes': tf.constant([[0, 1]], tf.float32), + 'num_detections': tf.constant([2], tf.float32), + 'detection_features': + tf.constant([classifier_feature], + tf.float32) + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@contextlib.contextmanager +def InMemoryTFRecord(entries): + temp = tempfile.NamedTemporaryFile(delete=False) + filename = temp.name + try: + with tf.io.TFRecordWriter(filename) as writer: + for value in entries: + writer.write(value) + yield filename + finally: + os.unlink(temp.name) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class GenerateEmbeddingData(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, checkpoint_path): + """A function to save checkpoint from a fake Detection Model. + + Args: + checkpoint_path: Path to save checkpoint from Fake model. + """ + mock_model = FakeModel() + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_path, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + def _export_saved_model(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + tf.io.gfile.makedirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + saved_model_path = os.path.join(output_directory, 'saved_model') + return saved_model_path + + def _create_tf_example(self): + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy() + + def BytesFeature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def Int64Feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + + def FloatFeature(value): + return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) + + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': BytesFeature(encoded_image), + 'image/source_id': BytesFeature(b'image_id'), + 'image/height': Int64Feature(400), + 'image/width': Int64Feature(600), + 'image/class/label': Int64Feature(5), + 'image/class/text': BytesFeature(b'hyena'), + 'image/object/bbox/xmin': FloatFeature(0.1), + 'image/object/bbox/xmax': FloatFeature(0.6), + 'image/object/bbox/ymin': FloatFeature(0.0), + 'image/object/bbox/ymax': FloatFeature(0.5), + 'image/object/class/score': FloatFeature(0.95), + 'image/object/class/label': Int64Feature(5), + 'image/object/class/text': BytesFeature(b'hyena'), + 'image/date_captured': BytesFeature(b'2019-10-20 12:12:12') + })) + + return example.SerializeToString() + + def assert_expected_example(self, example, topk=False, botk=False): + # Check embeddings + if topk or botk: + self.assertEqual(len( + example.features.feature['image/embedding'].float_list.value), + 218) + self.assertAllEqual( + example.features.feature['image/embedding_count'].int64_list.value, + [2]) + else: + self.assertEqual(len( + example.features.feature['image/embedding'].float_list.value), + 109) + self.assertAllEqual( + example.features.feature['image/embedding_count'].int64_list.value, + [1]) + + self.assertAllEqual( + example.features.feature['image/embedding_length'].int64_list.value, + [109]) + + # Check annotations + self.assertAllClose( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.0]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.1]) + self.assertAllClose( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.5]) + self.assertAllClose( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.6]) + self.assertAllClose( + example.features.feature['image/object/class/score'] + .float_list.value, [0.95]) + self.assertAllClose( + example.features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/object/class/text'] + .bytes_list.value, [b'hyena']) + self.assertAllClose( + example.features.feature['image/class/label'] + .int64_list.value, [5]) + self.assertAllEqual( + example.features.feature['image/class/text'] + .bytes_list.value, [b'hyena']) + + # Check other essential attributes. + self.assertAllEqual( + example.features.feature['image/height'].int64_list.value, [400]) + self.assertAllEqual( + example.features.feature['image/width'].int64_list.value, [600]) + self.assertAllEqual( + example.features.feature['image/source_id'].bytes_list.value, + [b'image_id']) + self.assertTrue( + example.features.feature['image/encoded'].bytes_list.value) + + def test_generate_embedding_data_fn(self): + saved_model_path = self._export_saved_model() + top_k_embedding_count = 1 + bottom_k_embedding_count = 0 + inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( + saved_model_path, top_k_embedding_count, bottom_k_embedding_count) + inference_fn.setup() + generated_example = self._create_tf_example() + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/label'] + .int64_list.value, [5]) + self.assertAllEqual(tf.train.Example.FromString( + generated_example).features.feature['image/object/class/text'] + .bytes_list.value, [b'hyena']) + output = inference_fn.process(('dummy_key', generated_example)) + output_example = output[0][1] + self.assert_expected_example(output_example) + + def test_generate_embedding_data_with_top_k_boxes(self): + saved_model_path = self._export_saved_model() + top_k_embedding_count = 2 + bottom_k_embedding_count = 0 + inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( + saved_model_path, top_k_embedding_count, bottom_k_embedding_count) + inference_fn.setup() + generated_example = self._create_tf_example() + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/label'].int64_list.value, [5]) + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/text'].bytes_list.value, [b'hyena']) + output = inference_fn.process(('dummy_key', generated_example)) + output_example = output[0][1] + self.assert_expected_example(output_example, topk=True) + + def test_generate_embedding_data_with_bottom_k_boxes(self): + saved_model_path = self._export_saved_model() + top_k_embedding_count = 0 + bottom_k_embedding_count = 2 + inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( + saved_model_path, top_k_embedding_count, bottom_k_embedding_count) + inference_fn.setup() + generated_example = self._create_tf_example() + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/label'].int64_list.value, [5]) + self.assertAllEqual( + tf.train.Example.FromString(generated_example).features + .feature['image/object/class/text'].bytes_list.value, [b'hyena']) + output = inference_fn.process(('dummy_key', generated_example)) + output_example = output[0][1] + self.assert_expected_example(output_example, botk=True) + + def test_beam_pipeline(self): + with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord: + temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR')) + output_tfrecord = os.path.join(temp_dir, 'output_tfrecord') + saved_model_path = self._export_saved_model() + top_k_embedding_count = 1 + bottom_k_embedding_count = 0 + num_shards = 1 + pipeline_options = beam.options.pipeline_options.PipelineOptions( + runner='DirectRunner') + p = beam.Pipeline(options=pipeline_options) + generate_embedding_data.construct_pipeline( + p, input_tfrecord, output_tfrecord, saved_model_path, + top_k_embedding_count, bottom_k_embedding_count, num_shards) + p.run() + filenames = tf.io.gfile.glob( + output_tfrecord + '-?????-of-?????') + actual_output = [] + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() + for record in record_iterator: + actual_output.append(record) + self.assertEqual(len(actual_output), 1) + self.assert_expected_example(tf.train.Example.FromString( + actual_output[0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_ava_actions_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_ava_actions_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..a27001d879c48e1e10194015f20eecb0724dfdf9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_ava_actions_tf_record.py @@ -0,0 +1,540 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Code to download and parse the AVA Actions dataset for TensorFlow models. + +The [AVA Actions data set]( +https://research.google.com/ava/index.html) +is a dataset for human action recognition. + +This script downloads the annotations and prepares data from similar annotations +if local video files are available. The video files can be downloaded +from the following website: +https://github.com/cvdfoundation/ava-dataset + +Prior to running this script, please run download_and_preprocess_ava.sh to +download input videos. + +Running this code as a module generates the data set on disk. First, the +required files are downloaded (_download_data) which enables constructing the +label map. Then (in generate_examples), for each split in the data set, the +metadata and image frames are generated from the annotations for each sequence +example (_generate_examples). The data set is written to disk as a set of +numbered TFRecord files. + +Generating the data on disk can take considerable time and disk space. +(Image compression quality is the primary determiner of disk usage. + +If using the Tensorflow Object Detection API, set the input_type field +in the input_reader to TF_SEQUENCE_EXAMPLE. If using this script to generate +data for Context R-CNN scripts, the --examples_for_context flag should be +set to true, so that properly-formatted tf.example objects are written to disk. + +This data is structured for per-clip action classification where images is +the sequence of images and labels are a one-hot encoded value. See +as_dataset() for more details. + +Note that the number of videos changes in the data set over time, so it will +likely be necessary to change the expected number of examples. + +The argument video_path_format_string expects a value as such: + '/path/to/videos/{0}' + +""" +import collections +import contextlib +import csv +import glob +import hashlib +import os +import random +import sys +import zipfile + +from absl import app +from absl import flags +from absl import logging +import cv2 +from six.moves import range +from six.moves import urllib +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import seq_example_util +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + + +POSSIBLE_TIMESTAMPS = range(902, 1798) +ANNOTATION_URL = 'https://research.google.com/ava/download/ava_v2.2.zip' +SECONDS_TO_MILLI = 1000 +FILEPATTERN = 'ava_actions_%s_1fps_rgb' +SPLITS = { + 'train': { + 'shards': 1000, + 'examples': 862663, + 'csv': '', + 'excluded-csv': '' + }, + 'val': { + 'shards': 100, + 'examples': 243029, + 'csv': '', + 'excluded-csv': '' + }, + # Test doesn't have ground truth, so TF Records can't be created + 'test': { + 'shards': 100, + 'examples': 0, + 'csv': '', + 'excluded-csv': '' + } +} + +NUM_CLASSES = 80 + + +def feature_list_feature(value): + return tf.train.FeatureList(feature=value) + + +class Ava(object): + """Generates and loads the AVA Actions 2.2 data set.""" + + def __init__(self, path_to_output_dir, path_to_data_download): + if not path_to_output_dir: + raise ValueError('You must supply the path to the data directory.') + self.path_to_data_download = path_to_data_download + self.path_to_output_dir = path_to_output_dir + + def generate_and_write_records(self, + splits_to_process='train,val,test', + video_path_format_string=None, + seconds_per_sequence=10, + hop_between_sequences=10, + examples_for_context=False): + """Downloads data and generates sharded TFRecords. + + Downloads the data files, generates metadata, and processes the metadata + with MediaPipe to produce tf.SequenceExamples for training. The resulting + files can be read with as_dataset(). After running this function the + original data files can be deleted. + + Args: + splits_to_process: csv string of which splits to process. Allows + providing a custom CSV with the CSV flag. The original data is still + downloaded to generate the label_map. + video_path_format_string: The format string for the path to local files. + seconds_per_sequence: The length of each sequence, in seconds. + hop_between_sequences: The gap between the centers of + successive sequences. + examples_for_context: Whether to generate sequence examples with context + for context R-CNN. + """ + example_function = self._generate_sequence_examples + if examples_for_context: + example_function = self._generate_examples + + logging.info('Downloading data.') + download_output = self._download_data() + for key in splits_to_process.split(','): + logging.info('Generating examples for split: %s', key) + all_metadata = list(example_function( + download_output[0][key][0], download_output[0][key][1], + download_output[1], seconds_per_sequence, hop_between_sequences, + video_path_format_string)) + logging.info('An example of the metadata: ') + logging.info(all_metadata[0]) + random.seed(47) + random.shuffle(all_metadata) + shards = SPLITS[key]['shards'] + shard_names = [os.path.join( + self.path_to_output_dir, FILEPATTERN % key + '-%05d-of-%05d' % ( + i, shards)) for i in range(shards)] + writers = [tf.io.TFRecordWriter(shard) for shard in shard_names] + with _close_on_exit(writers) as writers: + for i, seq_ex in enumerate(all_metadata): + writers[i % len(writers)].write(seq_ex.SerializeToString()) + logging.info('Data extraction complete.') + + def _generate_sequence_examples(self, annotation_file, excluded_file, + label_map, seconds_per_sequence, + hop_between_sequences, + video_path_format_string): + """For each row in the annotation CSV, generates corresponding examples. + + When iterating through frames for a single sequence example, skips over + excluded frames. When moving to the next sequence example, also skips over + excluded frames as if they don't exist. Generates equal-length sequence + examples, each with length seconds_per_sequence (1 fps) and gaps of + hop_between_sequences frames (and seconds) between them, possible greater + due to excluded frames. + + Args: + annotation_file: path to the file of AVA CSV annotations. + excluded_file: path to a CSV file of excluded timestamps for each video. + label_map: an {int: string} label map. + seconds_per_sequence: The number of seconds per example in each example. + hop_between_sequences: The hop between sequences. If less than + seconds_per_sequence, will overlap. + video_path_format_string: File path format to glob video files. + + Yields: + Each prepared tf.SequenceExample of metadata also containing video frames + """ + fieldnames = ['id', 'timestamp_seconds', 'xmin', 'ymin', 'xmax', 'ymax', + 'action_label'] + frame_excluded = {} + # create a sparse, nested map of videos and frame indices. + with open(excluded_file, 'r') as excluded: + reader = csv.reader(excluded) + for row in reader: + frame_excluded[(row[0], int(float(row[1])))] = True + with open(annotation_file, 'r') as annotations: + reader = csv.DictReader(annotations, fieldnames) + frame_annotations = collections.defaultdict(list) + ids = set() + # aggreggate by video and timestamp: + for row in reader: + ids.add(row['id']) + key = (row['id'], int(float(row['timestamp_seconds']))) + frame_annotations[key].append(row) + # for each video, find aggregates near each sampled frame.: + logging.info('Generating metadata...') + media_num = 1 + for media_id in ids: + logging.info('%d/%d, ignore warnings.\n', media_num, len(ids)) + media_num += 1 + + filepath = glob.glob( + video_path_format_string.format(media_id) + '*')[0] + cur_vid = cv2.VideoCapture(filepath) + width = cur_vid.get(cv2.CAP_PROP_FRAME_WIDTH) + height = cur_vid.get(cv2.CAP_PROP_FRAME_HEIGHT) + middle_frame_time = POSSIBLE_TIMESTAMPS[0] + while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]: + start_time = middle_frame_time - seconds_per_sequence // 2 - ( + 0 if seconds_per_sequence % 2 == 0 else 1) + end_time = middle_frame_time + (seconds_per_sequence // 2) + + total_boxes = [] + total_labels = [] + total_label_strings = [] + total_images = [] + total_source_ids = [] + total_confidences = [] + total_is_annotated = [] + windowed_timestamp = start_time + + while windowed_timestamp < end_time: + if (media_id, windowed_timestamp) in frame_excluded: + end_time += 1 + windowed_timestamp += 1 + logging.info('Ignoring and skipping excluded frame.') + continue + + cur_vid.set(cv2.CAP_PROP_POS_MSEC, + (windowed_timestamp) * SECONDS_TO_MILLI) + _, image = cur_vid.read() + _, buffer = cv2.imencode('.jpg', image) + + bufstring = buffer.tostring() + total_images.append(bufstring) + source_id = str(windowed_timestamp) + '_' + media_id + total_source_ids.append(source_id) + total_is_annotated.append(1) + + boxes = [] + labels = [] + label_strings = [] + confidences = [] + for row in frame_annotations[(media_id, windowed_timestamp)]: + if len(row) > 2 and int(row['action_label']) in label_map: + boxes.append([float(row['ymin']), float(row['xmin']), + float(row['ymax']), float(row['xmax'])]) + labels.append(int(row['action_label'])) + label_strings.append(label_map[int(row['action_label'])]) + confidences.append(1) + else: + logging.warning('Unknown label: %s', row['action_label']) + + total_boxes.append(boxes) + total_labels.append(labels) + total_label_strings.append(label_strings) + total_confidences.append(confidences) + windowed_timestamp += 1 + + if total_boxes: + yield seq_example_util.make_sequence_example( + 'AVA', media_id, total_images, int(height), int(width), 'jpeg', + total_source_ids, None, total_is_annotated, total_boxes, + total_label_strings, use_strs_for_source_id=True) + + # Move middle_time_frame, skipping excluded frames + frames_mv = 0 + frames_excluded_count = 0 + while (frames_mv < hop_between_sequences + frames_excluded_count + and middle_frame_time + frames_mv < POSSIBLE_TIMESTAMPS[-1]): + frames_mv += 1 + if (media_id, windowed_timestamp + frames_mv) in frame_excluded: + frames_excluded_count += 1 + middle_frame_time += frames_mv + + cur_vid.release() + + def _generate_examples(self, annotation_file, excluded_file, label_map, + seconds_per_sequence, hop_between_sequences, + video_path_format_string): + """For each row in the annotation CSV, generates examples. + + When iterating through frames for a single example, skips + over excluded frames. Generates equal-length sequence examples, each with + length seconds_per_sequence (1 fps) and gaps of hop_between_sequences + frames (and seconds) between them, possible greater due to excluded frames. + + Args: + annotation_file: path to the file of AVA CSV annotations. + excluded_file: path to a CSV file of excluded timestamps for each video. + label_map: an {int: string} label map. + seconds_per_sequence: The number of seconds per example in each example. + hop_between_sequences: The hop between sequences. If less than + seconds_per_sequence, will overlap. + video_path_format_string: File path format to glob video files. + + Yields: + Each prepared tf.Example of metadata also containing video frames + """ + del seconds_per_sequence + del hop_between_sequences + fieldnames = ['id', 'timestamp_seconds', 'xmin', 'ymin', 'xmax', 'ymax', + 'action_label'] + frame_excluded = {} + # create a sparse, nested map of videos and frame indices. + with open(excluded_file, 'r') as excluded: + reader = csv.reader(excluded) + for row in reader: + frame_excluded[(row[0], int(float(row[1])))] = True + with open(annotation_file, 'r') as annotations: + reader = csv.DictReader(annotations, fieldnames) + frame_annotations = collections.defaultdict(list) + ids = set() + # aggreggate by video and timestamp: + for row in reader: + ids.add(row['id']) + key = (row['id'], int(float(row['timestamp_seconds']))) + frame_annotations[key].append(row) + # for each video, find aggreggates near each sampled frame.: + logging.info('Generating metadata...') + media_num = 1 + for media_id in ids: + logging.info('%d/%d, ignore warnings.\n', media_num, len(ids)) + media_num += 1 + + filepath = glob.glob( + video_path_format_string.format(media_id) + '*')[0] + cur_vid = cv2.VideoCapture(filepath) + width = cur_vid.get(cv2.CAP_PROP_FRAME_WIDTH) + height = cur_vid.get(cv2.CAP_PROP_FRAME_HEIGHT) + middle_frame_time = POSSIBLE_TIMESTAMPS[0] + total_non_excluded = 0 + while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]: + if (media_id, middle_frame_time) not in frame_excluded: + total_non_excluded += 1 + middle_frame_time += 1 + + middle_frame_time = POSSIBLE_TIMESTAMPS[0] + cur_frame_num = 0 + while middle_frame_time < POSSIBLE_TIMESTAMPS[-1]: + cur_vid.set(cv2.CAP_PROP_POS_MSEC, + middle_frame_time * SECONDS_TO_MILLI) + _, image = cur_vid.read() + _, buffer = cv2.imencode('.jpg', image) + + bufstring = buffer.tostring() + + if (media_id, middle_frame_time) in frame_excluded: + middle_frame_time += 1 + logging.info('Ignoring and skipping excluded frame.') + continue + + cur_frame_num += 1 + source_id = str(middle_frame_time) + '_' + media_id + + xmins = [] + xmaxs = [] + ymins = [] + ymaxs = [] + areas = [] + labels = [] + label_strings = [] + confidences = [] + for row in frame_annotations[(media_id, middle_frame_time)]: + if len(row) > 2 and int(row['action_label']) in label_map: + xmins.append(float(row['xmin'])) + xmaxs.append(float(row['xmax'])) + ymins.append(float(row['ymin'])) + ymaxs.append(float(row['ymax'])) + areas.append(float((xmaxs[-1] - xmins[-1]) * + (ymaxs[-1] - ymins[-1])) / 2) + labels.append(int(row['action_label'])) + label_strings.append(label_map[int(row['action_label'])]) + confidences.append(1) + else: + logging.warning('Unknown label: %s', row['action_label']) + + middle_frame_time += 1/3 + if abs(middle_frame_time - round(middle_frame_time) < 0.0001): + middle_frame_time = round(middle_frame_time) + + key = hashlib.sha256(bufstring).hexdigest() + date_captured_feature = ( + '2020-06-17 00:%02d:%02d' % ((middle_frame_time - 900)*3 // 60, + (middle_frame_time - 900)*3 % 60)) + context_feature_dict = { + 'image/height': + dataset_util.int64_feature(int(height)), + 'image/width': + dataset_util.int64_feature(int(width)), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/source_id': + dataset_util.bytes_feature(source_id.encode('utf8')), + 'image/filename': + dataset_util.bytes_feature(source_id.encode('utf8')), + 'image/encoded': + dataset_util.bytes_feature(bufstring), + 'image/key/sha256': + dataset_util.bytes_feature(key.encode('utf8')), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(ymaxs), + 'image/object/area': + dataset_util.float_list_feature(areas), + 'image/object/class/label': + dataset_util.int64_list_feature(labels), + 'image/object/class/text': + dataset_util.bytes_list_feature(label_strings), + 'image/location': + dataset_util.bytes_feature(media_id.encode('utf8')), + 'image/date_captured': + dataset_util.bytes_feature( + date_captured_feature.encode('utf8')), + 'image/seq_num_frames': + dataset_util.int64_feature(total_non_excluded), + 'image/seq_frame_num': + dataset_util.int64_feature(cur_frame_num), + 'image/seq_id': + dataset_util.bytes_feature(media_id.encode('utf8')), + } + + yield tf.train.Example( + features=tf.train.Features(feature=context_feature_dict)) + + cur_vid.release() + + def _download_data(self): + """Downloads and extracts data if not already available.""" + if sys.version_info >= (3, 0): + urlretrieve = urllib.request.urlretrieve + else: + urlretrieve = urllib.request.urlretrieve + logging.info('Creating data directory.') + tf.io.gfile.makedirs(self.path_to_data_download) + logging.info('Downloading annotations.') + paths = {} + + zip_path = os.path.join(self.path_to_data_download, + ANNOTATION_URL.split('/')[-1]) + urlretrieve(ANNOTATION_URL, zip_path) + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_ref.extractall(self.path_to_data_download) + for split in ['train', 'test', 'val']: + csv_path = os.path.join(self.path_to_data_download, + 'ava_%s_v2.2.csv' % split) + excl_name = 'ava_%s_excluded_timestamps_v2.2.csv' % split + excluded_csv_path = os.path.join(self.path_to_data_download, excl_name) + SPLITS[split]['csv'] = csv_path + SPLITS[split]['excluded-csv'] = excluded_csv_path + paths[split] = (csv_path, excluded_csv_path) + + label_map = self.get_label_map(os.path.join( + self.path_to_data_download, + 'ava_action_list_v2.2_for_activitynet_2019.pbtxt')) + return paths, label_map + + def get_label_map(self, path): + """Parses a label map into {integer:string} format.""" + label_map_dict = label_map_util.get_label_map_dict(path) + label_map_dict = {v: bytes(k, 'utf8') for k, v in label_map_dict.items()} + logging.info(label_map_dict) + return label_map_dict + + +@contextlib.contextmanager +def _close_on_exit(writers): + """Call close on all writers on exit.""" + try: + yield writers + finally: + for writer in writers: + writer.close() + + +def main(argv): + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + Ava(flags.FLAGS.path_to_output_dir, + flags.FLAGS.path_to_download_data).generate_and_write_records( + flags.FLAGS.splits_to_process, + flags.FLAGS.video_path_format_string, + flags.FLAGS.seconds_per_sequence, + flags.FLAGS.hop_between_sequences, + flags.FLAGS.examples_for_context) + +if __name__ == '__main__': + flags.DEFINE_string('path_to_download_data', + '', + 'Path to directory to download data to.') + flags.DEFINE_string('path_to_output_dir', + '', + 'Path to directory to write data to.') + flags.DEFINE_string('splits_to_process', + 'train,val', + 'Process these splits. Useful for custom data splits.') + flags.DEFINE_string('video_path_format_string', + None, + 'The format string for the path to local video files. ' + 'Uses the Python string.format() syntax with possible ' + 'arguments of {video}, {start}, {end}, {label_name}, and ' + '{split}, corresponding to columns of the data csvs.') + flags.DEFINE_integer('seconds_per_sequence', + 10, + 'The number of seconds per example in each example.' + 'Always 1 when examples_for_context is True.') + flags.DEFINE_integer('hop_between_sequences', + 10, + 'The hop between sequences. If less than ' + 'seconds_per_sequence, will overlap. Always 1 when ' + 'examples_for_context is True.') + flags.DEFINE_boolean('examples_for_context', + False, + 'Whether to generate examples instead of sequence ' + 'examples. If true, will generate tf.Example objects ' + 'for use in Context R-CNN.') + app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_coco_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_coco_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..2703c427e9bae8ebca5233f1ddaf7c42e5f1b82e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_coco_tf_record.py @@ -0,0 +1,518 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Convert raw COCO dataset to TFRecord for object_detection. + +This tool supports data generation for object detection (boxes, masks), +keypoint detection, and DensePose. + +Please note that this tool creates sharded output files. + +Example usage: + python create_coco_tf_record.py --logtostderr \ + --train_image_dir="${TRAIN_IMAGE_DIR}" \ + --val_image_dir="${VAL_IMAGE_DIR}" \ + --test_image_dir="${TEST_IMAGE_DIR}" \ + --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ + --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ + --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ + --output_dir="${OUTPUT_DIR}" +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib +import io +import json +import logging +import os +import contextlib2 +import numpy as np +import PIL.Image + +from pycocotools import mask +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import tf_record_creation_util +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + +flags = tf.app.flags +tf.flags.DEFINE_boolean( + 'include_masks', False, 'Whether to include instance segmentations masks ' + '(PNG encoded) in the result. default: False.') +tf.flags.DEFINE_string('train_image_dir', '', 'Training image directory.') +tf.flags.DEFINE_string('val_image_dir', '', 'Validation image directory.') +tf.flags.DEFINE_string('test_image_dir', '', 'Test image directory.') +tf.flags.DEFINE_string('train_annotations_file', '', + 'Training annotations JSON file.') +tf.flags.DEFINE_string('val_annotations_file', '', + 'Validation annotations JSON file.') +tf.flags.DEFINE_string('testdev_annotations_file', '', + 'Test-dev annotations JSON file.') +tf.flags.DEFINE_string('train_keypoint_annotations_file', '', + 'Training annotations JSON file.') +tf.flags.DEFINE_string('val_keypoint_annotations_file', '', + 'Validation annotations JSON file.') +# DensePose is only available for coco 2014. +tf.flags.DEFINE_string('train_densepose_annotations_file', '', + 'Training annotations JSON file for DensePose.') +tf.flags.DEFINE_string('val_densepose_annotations_file', '', + 'Validation annotations JSON file for DensePose.') +tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.') +# Whether to only produce images/annotations on person class (for keypoint / +# densepose task). +tf.flags.DEFINE_boolean('remove_non_person_annotations', False, 'Whether to ' + 'remove all annotations for non-person objects.') +tf.flags.DEFINE_boolean('remove_non_person_images', False, 'Whether to ' + 'remove all examples that do not contain a person.') + +FLAGS = flags.FLAGS + +logger = tf.get_logger() +logger.setLevel(logging.INFO) + +_COCO_KEYPOINT_NAMES = [ + b'nose', b'left_eye', b'right_eye', b'left_ear', b'right_ear', + b'left_shoulder', b'right_shoulder', b'left_elbow', b'right_elbow', + b'left_wrist', b'right_wrist', b'left_hip', b'right_hip', + b'left_knee', b'right_knee', b'left_ankle', b'right_ankle' +] + +_COCO_PART_NAMES = [ + b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot', + b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back', + b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back', + b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front', + b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front', + b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back', + b'left_lower_arm_front', b'right_lower_arm_front', b'right_face', + b'left_face', +] + +_DP_PART_ID_OFFSET = 1 + + +def clip_to_unit(x): + return min(max(x, 0.0), 1.0) + + +def create_tf_example(image, + annotations_list, + image_dir, + category_index, + include_masks=False, + keypoint_annotations_dict=None, + densepose_annotations_dict=None, + remove_non_person_annotations=False, + remove_non_person_images=False): + """Converts image and annotations to a tf.Example proto. + + Args: + image: dict with keys: [u'license', u'file_name', u'coco_url', u'height', + u'width', u'date_captured', u'flickr_url', u'id'] + annotations_list: + list of dicts with keys: [u'segmentation', u'area', u'iscrowd', + u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box + coordinates in the official COCO dataset are given as [x, y, width, + height] tuples using absolute coordinates where x, y represent the + top-left (0-indexed) corner. This function converts to the format + expected by the Tensorflow Object Detection API (which is which is + [ymin, xmin, ymax, xmax] with coordinates normalized relative to image + size). + image_dir: directory containing the image files. + category_index: a dict containing COCO category information keyed by the + 'id' field of each category. See the label_map_util.create_category_index + function. + include_masks: Whether to include instance segmentations masks + (PNG encoded) in the result. default: False. + keypoint_annotations_dict: A dictionary that maps from annotation_id to a + dictionary with keys: [u'keypoints', u'num_keypoints'] represeting the + keypoint information for this person object annotation. If None, then + no keypoint annotations will be populated. + densepose_annotations_dict: A dictionary that maps from annotation_id to a + dictionary with keys: [u'dp_I', u'dp_x', u'dp_y', 'dp_U', 'dp_V'] + representing part surface coordinates. For more information see + http://densepose.org/. + remove_non_person_annotations: Whether to remove any annotations that are + not the "person" class. + remove_non_person_images: Whether to remove any images that do not contain + at least one "person" annotation. + + Returns: + key: SHA256 hash of the image. + example: The converted tf.Example + num_annotations_skipped: Number of (invalid) annotations that were ignored. + num_keypoint_annotation_skipped: Number of keypoint annotations that were + skipped. + num_densepose_annotation_skipped: Number of DensePose annotations that were + skipped. + + Raises: + ValueError: if the image pointed to by data['filename'] is not a valid JPEG + """ + image_height = image['height'] + image_width = image['width'] + filename = image['file_name'] + image_id = image['id'] + + full_path = os.path.join(image_dir, filename) + with tf.gfile.GFile(full_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + key = hashlib.sha256(encoded_jpg).hexdigest() + + xmin = [] + xmax = [] + ymin = [] + ymax = [] + is_crowd = [] + category_names = [] + category_ids = [] + area = [] + encoded_mask_png = [] + keypoints_x = [] + keypoints_y = [] + keypoints_visibility = [] + keypoints_name = [] + num_keypoints = [] + include_keypoint = keypoint_annotations_dict is not None + num_annotations_skipped = 0 + num_keypoint_annotation_used = 0 + num_keypoint_annotation_skipped = 0 + dp_part_index = [] + dp_x = [] + dp_y = [] + dp_u = [] + dp_v = [] + dp_num_points = [] + densepose_keys = ['dp_I', 'dp_U', 'dp_V', 'dp_x', 'dp_y', 'bbox'] + include_densepose = densepose_annotations_dict is not None + num_densepose_annotation_used = 0 + num_densepose_annotation_skipped = 0 + for object_annotations in annotations_list: + (x, y, width, height) = tuple(object_annotations['bbox']) + if width <= 0 or height <= 0: + num_annotations_skipped += 1 + continue + if x + width > image_width or y + height > image_height: + num_annotations_skipped += 1 + continue + category_id = int(object_annotations['category_id']) + category_name = category_index[category_id]['name'].encode('utf8') + if remove_non_person_annotations and category_name != b'person': + num_annotations_skipped += 1 + continue + xmin.append(float(x) / image_width) + xmax.append(float(x + width) / image_width) + ymin.append(float(y) / image_height) + ymax.append(float(y + height) / image_height) + is_crowd.append(object_annotations['iscrowd']) + category_ids.append(category_id) + category_names.append(category_name) + area.append(object_annotations['area']) + + if include_masks: + run_len_encoding = mask.frPyObjects(object_annotations['segmentation'], + image_height, image_width) + binary_mask = mask.decode(run_len_encoding) + if not object_annotations['iscrowd']: + binary_mask = np.amax(binary_mask, axis=2) + pil_image = PIL.Image.fromarray(binary_mask) + output_io = io.BytesIO() + pil_image.save(output_io, format='PNG') + encoded_mask_png.append(output_io.getvalue()) + + if include_keypoint: + annotation_id = object_annotations['id'] + if annotation_id in keypoint_annotations_dict: + num_keypoint_annotation_used += 1 + keypoint_annotations = keypoint_annotations_dict[annotation_id] + keypoints = keypoint_annotations['keypoints'] + num_kpts = keypoint_annotations['num_keypoints'] + keypoints_x_abs = keypoints[::3] + keypoints_x.extend( + [float(x_abs) / image_width for x_abs in keypoints_x_abs]) + keypoints_y_abs = keypoints[1::3] + keypoints_y.extend( + [float(y_abs) / image_height for y_abs in keypoints_y_abs]) + keypoints_visibility.extend(keypoints[2::3]) + keypoints_name.extend(_COCO_KEYPOINT_NAMES) + num_keypoints.append(num_kpts) + else: + keypoints_x.extend([0.0] * len(_COCO_KEYPOINT_NAMES)) + keypoints_y.extend([0.0] * len(_COCO_KEYPOINT_NAMES)) + keypoints_visibility.extend([0] * len(_COCO_KEYPOINT_NAMES)) + keypoints_name.extend(_COCO_KEYPOINT_NAMES) + num_keypoints.append(0) + + if include_densepose: + annotation_id = object_annotations['id'] + if (annotation_id in densepose_annotations_dict and + all(key in densepose_annotations_dict[annotation_id] + for key in densepose_keys)): + dp_annotations = densepose_annotations_dict[annotation_id] + num_densepose_annotation_used += 1 + dp_num_points.append(len(dp_annotations['dp_I'])) + dp_part_index.extend([int(i - _DP_PART_ID_OFFSET) + for i in dp_annotations['dp_I']]) + # DensePose surface coordinates are defined on a [256, 256] grid + # relative to each instance box (i.e. absolute coordinates in range + # [0., 256.]). The following converts the coordinates + # so that they are expressed in normalized image coordinates. + dp_x_box_rel = [ + clip_to_unit(val / 256.) for val in dp_annotations['dp_x']] + dp_x_norm = [(float(x) + x_box_rel * width) / image_width + for x_box_rel in dp_x_box_rel] + dp_y_box_rel = [ + clip_to_unit(val / 256.) for val in dp_annotations['dp_y']] + dp_y_norm = [(float(y) + y_box_rel * height) / image_height + for y_box_rel in dp_y_box_rel] + dp_x.extend(dp_x_norm) + dp_y.extend(dp_y_norm) + dp_u.extend(dp_annotations['dp_U']) + dp_v.extend(dp_annotations['dp_V']) + else: + dp_num_points.append(0) + + if (remove_non_person_images and + not any(name == b'person' for name in category_names)): + return (key, None, num_annotations_skipped, + num_keypoint_annotation_skipped, num_densepose_annotation_skipped) + feature_dict = { + 'image/height': + dataset_util.int64_feature(image_height), + 'image/width': + dataset_util.int64_feature(image_width), + 'image/filename': + dataset_util.bytes_feature(filename.encode('utf8')), + 'image/source_id': + dataset_util.bytes_feature(str(image_id).encode('utf8')), + 'image/key/sha256': + dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': + dataset_util.bytes_feature(encoded_jpg), + 'image/format': + dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/object/bbox/xmin': + dataset_util.float_list_feature(xmin), + 'image/object/bbox/xmax': + dataset_util.float_list_feature(xmax), + 'image/object/bbox/ymin': + dataset_util.float_list_feature(ymin), + 'image/object/bbox/ymax': + dataset_util.float_list_feature(ymax), + 'image/object/class/text': + dataset_util.bytes_list_feature(category_names), + 'image/object/is_crowd': + dataset_util.int64_list_feature(is_crowd), + 'image/object/area': + dataset_util.float_list_feature(area), + } + if include_masks: + feature_dict['image/object/mask'] = ( + dataset_util.bytes_list_feature(encoded_mask_png)) + if include_keypoint: + feature_dict['image/object/keypoint/x'] = ( + dataset_util.float_list_feature(keypoints_x)) + feature_dict['image/object/keypoint/y'] = ( + dataset_util.float_list_feature(keypoints_y)) + feature_dict['image/object/keypoint/num'] = ( + dataset_util.int64_list_feature(num_keypoints)) + feature_dict['image/object/keypoint/visibility'] = ( + dataset_util.int64_list_feature(keypoints_visibility)) + feature_dict['image/object/keypoint/text'] = ( + dataset_util.bytes_list_feature(keypoints_name)) + num_keypoint_annotation_skipped = ( + len(keypoint_annotations_dict) - num_keypoint_annotation_used) + if include_densepose: + feature_dict['image/object/densepose/num'] = ( + dataset_util.int64_list_feature(dp_num_points)) + feature_dict['image/object/densepose/part_index'] = ( + dataset_util.int64_list_feature(dp_part_index)) + feature_dict['image/object/densepose/x'] = ( + dataset_util.float_list_feature(dp_x)) + feature_dict['image/object/densepose/y'] = ( + dataset_util.float_list_feature(dp_y)) + feature_dict['image/object/densepose/u'] = ( + dataset_util.float_list_feature(dp_u)) + feature_dict['image/object/densepose/v'] = ( + dataset_util.float_list_feature(dp_v)) + num_densepose_annotation_skipped = ( + len(densepose_annotations_dict) - num_densepose_annotation_used) + + example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) + return (key, example, num_annotations_skipped, + num_keypoint_annotation_skipped, num_densepose_annotation_skipped) + + +def _create_tf_record_from_coco_annotations(annotations_file, image_dir, + output_path, include_masks, + num_shards, + keypoint_annotations_file='', + densepose_annotations_file='', + remove_non_person_annotations=False, + remove_non_person_images=False): + """Loads COCO annotation json files and converts to tf.Record format. + + Args: + annotations_file: JSON file containing bounding box annotations. + image_dir: Directory containing the image files. + output_path: Path to output tf.Record file. + include_masks: Whether to include instance segmentations masks + (PNG encoded) in the result. default: False. + num_shards: number of output file shards. + keypoint_annotations_file: JSON file containing the person keypoint + annotations. If empty, then no person keypoint annotations will be + generated. + densepose_annotations_file: JSON file containing the DensePose annotations. + If empty, then no DensePose annotations will be generated. + remove_non_person_annotations: Whether to remove any annotations that are + not the "person" class. + remove_non_person_images: Whether to remove any images that do not contain + at least one "person" annotation. + """ + with contextlib2.ExitStack() as tf_record_close_stack, \ + tf.gfile.GFile(annotations_file, 'r') as fid: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, output_path, num_shards) + groundtruth_data = json.load(fid) + images = groundtruth_data['images'] + category_index = label_map_util.create_category_index( + groundtruth_data['categories']) + + annotations_index = {} + if 'annotations' in groundtruth_data: + logging.info('Found groundtruth annotations. Building annotations index.') + for annotation in groundtruth_data['annotations']: + image_id = annotation['image_id'] + if image_id not in annotations_index: + annotations_index[image_id] = [] + annotations_index[image_id].append(annotation) + missing_annotation_count = 0 + for image in images: + image_id = image['id'] + if image_id not in annotations_index: + missing_annotation_count += 1 + annotations_index[image_id] = [] + logging.info('%d images are missing annotations.', + missing_annotation_count) + + keypoint_annotations_index = {} + if keypoint_annotations_file: + with tf.gfile.GFile(keypoint_annotations_file, 'r') as kid: + keypoint_groundtruth_data = json.load(kid) + if 'annotations' in keypoint_groundtruth_data: + for annotation in keypoint_groundtruth_data['annotations']: + image_id = annotation['image_id'] + if image_id not in keypoint_annotations_index: + keypoint_annotations_index[image_id] = {} + keypoint_annotations_index[image_id][annotation['id']] = annotation + + densepose_annotations_index = {} + if densepose_annotations_file: + with tf.gfile.GFile(densepose_annotations_file, 'r') as fid: + densepose_groundtruth_data = json.load(fid) + if 'annotations' in densepose_groundtruth_data: + for annotation in densepose_groundtruth_data['annotations']: + image_id = annotation['image_id'] + if image_id not in densepose_annotations_index: + densepose_annotations_index[image_id] = {} + densepose_annotations_index[image_id][annotation['id']] = annotation + + total_num_annotations_skipped = 0 + total_num_keypoint_annotations_skipped = 0 + total_num_densepose_annotations_skipped = 0 + for idx, image in enumerate(images): + if idx % 100 == 0: + logging.info('On image %d of %d', idx, len(images)) + annotations_list = annotations_index[image['id']] + keypoint_annotations_dict = None + if keypoint_annotations_file: + keypoint_annotations_dict = {} + if image['id'] in keypoint_annotations_index: + keypoint_annotations_dict = keypoint_annotations_index[image['id']] + densepose_annotations_dict = None + if densepose_annotations_file: + densepose_annotations_dict = {} + if image['id'] in densepose_annotations_index: + densepose_annotations_dict = densepose_annotations_index[image['id']] + (_, tf_example, num_annotations_skipped, num_keypoint_annotations_skipped, + num_densepose_annotations_skipped) = create_tf_example( + image, annotations_list, image_dir, category_index, include_masks, + keypoint_annotations_dict, densepose_annotations_dict, + remove_non_person_annotations, remove_non_person_images) + total_num_annotations_skipped += num_annotations_skipped + total_num_keypoint_annotations_skipped += num_keypoint_annotations_skipped + total_num_densepose_annotations_skipped += ( + num_densepose_annotations_skipped) + shard_idx = idx % num_shards + if tf_example: + output_tfrecords[shard_idx].write(tf_example.SerializeToString()) + logging.info('Finished writing, skipped %d annotations.', + total_num_annotations_skipped) + if keypoint_annotations_file: + logging.info('Finished writing, skipped %d keypoint annotations.', + total_num_keypoint_annotations_skipped) + if densepose_annotations_file: + logging.info('Finished writing, skipped %d DensePose annotations.', + total_num_densepose_annotations_skipped) + + +def main(_): + assert FLAGS.train_image_dir, '`train_image_dir` missing.' + assert FLAGS.val_image_dir, '`val_image_dir` missing.' + assert FLAGS.test_image_dir, '`test_image_dir` missing.' + assert FLAGS.train_annotations_file, '`train_annotations_file` missing.' + assert FLAGS.val_annotations_file, '`val_annotations_file` missing.' + assert FLAGS.testdev_annotations_file, '`testdev_annotations_file` missing.' + + if not tf.gfile.IsDirectory(FLAGS.output_dir): + tf.gfile.MakeDirs(FLAGS.output_dir) + train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record') + val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record') + testdev_output_path = os.path.join(FLAGS.output_dir, 'coco_testdev.record') + + _create_tf_record_from_coco_annotations( + FLAGS.train_annotations_file, + FLAGS.train_image_dir, + train_output_path, + FLAGS.include_masks, + num_shards=100, + keypoint_annotations_file=FLAGS.train_keypoint_annotations_file, + densepose_annotations_file=FLAGS.train_densepose_annotations_file, + remove_non_person_annotations=FLAGS.remove_non_person_annotations, + remove_non_person_images=FLAGS.remove_non_person_images) + _create_tf_record_from_coco_annotations( + FLAGS.val_annotations_file, + FLAGS.val_image_dir, + val_output_path, + FLAGS.include_masks, + num_shards=50, + keypoint_annotations_file=FLAGS.val_keypoint_annotations_file, + densepose_annotations_file=FLAGS.val_densepose_annotations_file, + remove_non_person_annotations=FLAGS.remove_non_person_annotations, + remove_non_person_images=FLAGS.remove_non_person_images) + _create_tf_record_from_coco_annotations( + FLAGS.testdev_annotations_file, + FLAGS.test_image_dir, + testdev_output_path, + FLAGS.include_masks, + num_shards=50) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_coco_tf_record_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_coco_tf_record_test.py new file mode 100644 index 0000000000000000000000000000000000000000..659142b7b7022a4243025146162eaac4b8c9f165 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_coco_tf_record_test.py @@ -0,0 +1,497 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test for create_coco_tf_record.py.""" + +import io +import json +import os + +import numpy as np +import PIL.Image +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import create_coco_tf_record + + +class CreateCocoTFRecordTest(tf.test.TestCase): + + def _assertProtoEqual(self, proto_field, expectation): + """Helper function to assert if a proto field equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertListEqual(proto_list, expectation) + + def _assertProtoClose(self, proto_field, expectation): + """Helper function to assert if a proto field nearly equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertAllClose(proto_list, expectation) + + def test_create_tf_example(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(256, 256, 3) + tmp_dir = self.get_temp_dir() + save_path = os.path.join(tmp_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 256, + 'width': 256, + 'id': 11, + } + + annotations_list = [{ + 'area': .5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 2, + 'id': 1000, + }] + + image_dir = tmp_dir + category_index = { + 1: { + 'name': 'dog', + 'id': 1 + }, + 2: { + 'name': 'cat', + 'id': 2 + }, + 3: { + 'name': 'human', + 'id': 3 + } + } + + (_, example, + num_annotations_skipped, _, _) = create_coco_tf_record.create_tf_example( + image, annotations_list, image_dir, category_index) + + self.assertEqual(num_annotations_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('cat')]) + + def test_create_tf_example_with_instance_masks(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(8, 8, 3) + tmp_dir = self.get_temp_dir() + save_path = os.path.join(tmp_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 8, + 'width': 8, + 'id': 11, + } + + annotations_list = [{ + 'area': .5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [0, 0, 8, 8], + 'segmentation': [[4, 0, 0, 0, 0, 4], [8, 4, 4, 8, 8, 8]], + 'category_id': 1, + 'id': 1000, + }] + + image_dir = tmp_dir + category_index = { + 1: { + 'name': 'dog', + 'id': 1 + }, + } + + (_, example, + num_annotations_skipped, _, _) = create_coco_tf_record.create_tf_example( + image, annotations_list, image_dir, category_index, include_masks=True) + + self.assertEqual(num_annotations_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [8]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [8]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('dog')]) + encoded_mask_pngs = [ + io.BytesIO(encoded_masks) for encoded_masks in example.features.feature[ + 'image/object/mask'].bytes_list.value + ] + pil_masks = [ + np.array(PIL.Image.open(encoded_mask_png)) + for encoded_mask_png in encoded_mask_pngs + ] + self.assertEqual(len(pil_masks), 1) + self.assertAllEqual(pil_masks[0], + [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]]) + + def test_create_tf_example_with_keypoints(self): + image_dir = self.get_temp_dir() + image_file_name = 'tmp_image.jpg' + image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype( + np.uint8) + save_path = os.path.join(image_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 256, + 'width': 256, + 'id': 11, + } + + min_x, min_y = 64, 64 + max_x, max_y = 128, 128 + keypoints = [] + num_visible_keypoints = 0 + xv = [] + yv = [] + vv = [] + for _ in range(17): + xc = min_x + int(np.random.rand()*(max_x - min_x)) + yc = min_y + int(np.random.rand()*(max_y - min_y)) + vis = np.random.randint(0, 3) + xv.append(xc) + yv.append(yc) + vv.append(vis) + keypoints.extend([xc, yc, vis]) + num_visible_keypoints += (vis > 0) + + annotations_list = [{ + 'area': 0.5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 1, + 'id': 1000 + }] + + keypoint_annotations_dict = { + 1000: { + 'keypoints': keypoints, + 'num_keypoints': num_visible_keypoints + } + } + + category_index = { + 1: { + 'name': 'person', + 'id': 1 + } + } + + _, example, _, num_keypoint_annotation_skipped, _ = ( + create_coco_tf_record.create_tf_example( + image, + annotations_list, + image_dir, + category_index, + include_masks=False, + keypoint_annotations_dict=keypoint_annotations_dict)) + + self.assertEqual(num_keypoint_annotation_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('person')]) + self._assertProtoClose( + example.features.feature['image/object/keypoint/x'].float_list.value, + np.array(xv, dtype=np.float32) / 256) + self._assertProtoClose( + example.features.feature['image/object/keypoint/y'].float_list.value, + np.array(yv, dtype=np.float32) / 256) + self._assertProtoEqual( + example.features.feature['image/object/keypoint/text'].bytes_list.value, + create_coco_tf_record._COCO_KEYPOINT_NAMES) + self._assertProtoEqual( + example.features.feature[ + 'image/object/keypoint/visibility'].int64_list.value, vv) + + def test_create_tf_example_with_dense_pose(self): + image_dir = self.get_temp_dir() + image_file_name = 'tmp_image.jpg' + image_data = np.random.randint(low=0, high=256, size=(256, 256, 3)).astype( + np.uint8) + save_path = os.path.join(image_dir, image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + image = { + 'file_name': image_file_name, + 'height': 256, + 'width': 256, + 'id': 11, + } + + min_x, min_y = 64, 64 + max_x, max_y = 128, 128 + keypoints = [] + num_visible_keypoints = 0 + xv = [] + yv = [] + vv = [] + for _ in range(17): + xc = min_x + int(np.random.rand()*(max_x - min_x)) + yc = min_y + int(np.random.rand()*(max_y - min_y)) + vis = np.random.randint(0, 3) + xv.append(xc) + yv.append(yc) + vv.append(vis) + keypoints.extend([xc, yc, vis]) + num_visible_keypoints += (vis > 0) + + annotations_list = [{ + 'area': 0.5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 1, + 'id': 1000 + }] + + num_points = 45 + dp_i = np.random.randint(1, 25, (num_points,)).astype(np.float32) + dp_u = np.random.randn(num_points) + dp_v = np.random.randn(num_points) + dp_x = np.random.rand(num_points)*256. + dp_y = np.random.rand(num_points)*256. + densepose_annotations_dict = { + 1000: { + 'dp_I': dp_i, + 'dp_U': dp_u, + 'dp_V': dp_v, + 'dp_x': dp_x, + 'dp_y': dp_y, + 'bbox': [64, 64, 128, 128], + } + } + + category_index = { + 1: { + 'name': 'person', + 'id': 1 + } + } + + _, example, _, _, num_densepose_annotation_skipped = ( + create_coco_tf_record.create_tf_example( + image, + annotations_list, + image_dir, + category_index, + include_masks=False, + densepose_annotations_dict=densepose_annotations_dict)) + + self.assertEqual(num_densepose_annotation_skipped, 0) + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(str(image['id']))]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('person')]) + self._assertProtoEqual( + example.features.feature['image/object/densepose/num'].int64_list.value, + [num_points]) + self.assertAllEqual( + example.features.feature[ + 'image/object/densepose/part_index'].int64_list.value, + dp_i.astype(np.int64) - create_coco_tf_record._DP_PART_ID_OFFSET) + self.assertAllClose( + example.features.feature['image/object/densepose/u'].float_list.value, + dp_u) + self.assertAllClose( + example.features.feature['image/object/densepose/v'].float_list.value, + dp_v) + expected_dp_x = (64 + dp_x * 128. / 256.) / 256. + expected_dp_y = (64 + dp_y * 128. / 256.) / 256. + self.assertAllClose( + example.features.feature['image/object/densepose/x'].float_list.value, + expected_dp_x) + self.assertAllClose( + example.features.feature['image/object/densepose/y'].float_list.value, + expected_dp_y) + + def test_create_sharded_tf_record(self): + tmp_dir = self.get_temp_dir() + image_paths = ['tmp1_image.jpg', 'tmp2_image.jpg'] + for image_path in image_paths: + image_data = np.random.rand(256, 256, 3) + save_path = os.path.join(tmp_dir, image_path) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + images = [{ + 'file_name': image_paths[0], + 'height': 256, + 'width': 256, + 'id': 11, + }, { + 'file_name': image_paths[1], + 'height': 256, + 'width': 256, + 'id': 12, + }] + + annotations = [{ + 'area': .5, + 'iscrowd': False, + 'image_id': 11, + 'bbox': [64, 64, 128, 128], + 'category_id': 2, + 'id': 1000, + }] + + category_index = [{ + 'name': 'dog', + 'id': 1 + }, { + 'name': 'cat', + 'id': 2 + }, { + 'name': 'human', + 'id': 3 + }] + groundtruth_data = {'images': images, 'annotations': annotations, + 'categories': category_index} + annotation_file = os.path.join(tmp_dir, 'annotation.json') + with open(annotation_file, 'w') as annotation_fid: + json.dump(groundtruth_data, annotation_fid) + + output_path = os.path.join(tmp_dir, 'out.record') + create_coco_tf_record._create_tf_record_from_coco_annotations( + annotation_file, + tmp_dir, + output_path, + False, + 2) + self.assertTrue(os.path.exists(output_path + '-00000-of-00002')) + self.assertTrue(os.path.exists(output_path + '-00001-of-00002')) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_kitti_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_kitti_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..fe4f13ec80b4f552f316c6cc544c0fa4edf8f0bd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_kitti_tf_record.py @@ -0,0 +1,310 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Convert raw KITTI detection dataset to TFRecord for object_detection. + +Converts KITTI detection dataset to TFRecords with a standard format allowing + to use this dataset to train object detectors. The raw dataset can be + downloaded from: + http://kitti.is.tue.mpg.de/kitti/data_object_image_2.zip. + http://kitti.is.tue.mpg.de/kitti/data_object_label_2.zip + Permission can be requested at the main website. + + KITTI detection dataset contains 7481 training images. Using this code with + the default settings will set aside the first 500 images as a validation set. + This can be altered using the flags, see details below. + +Example usage: + python object_detection/dataset_tools/create_kitti_tf_record.py \ + --data_dir=/home/user/kitti \ + --output_path=/home/user/kitti.record +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import hashlib +import io +import os + +import numpy as np +import PIL.Image as pil +import tensorflow.compat.v1 as tf + +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util +from object_detection.utils.np_box_ops import iou + +tf.app.flags.DEFINE_string('data_dir', '', 'Location of root directory for the ' + 'data. Folder structure is assumed to be:' + '/training/label_2 (annotations) and' + '/data_object_image_2/training/image_2' + '(images).') +tf.app.flags.DEFINE_string('output_path', '', 'Path to which TFRecord files' + 'will be written. The TFRecord with the training set' + 'will be located at: _train.tfrecord.' + 'And the TFRecord with the validation set will be' + 'located at: _val.tfrecord') +tf.app.flags.DEFINE_string('classes_to_use', 'car,pedestrian,dontcare', + 'Comma separated list of class names that will be' + 'used. Adding the dontcare class will remove all' + 'bboxs in the dontcare regions.') +tf.app.flags.DEFINE_string('label_map_path', 'data/kitti_label_map.pbtxt', + 'Path to label map proto.') +tf.app.flags.DEFINE_integer('validation_set_size', '500', 'Number of images to' + 'be used as a validation set.') +FLAGS = tf.app.flags.FLAGS + + +def convert_kitti_to_tfrecords(data_dir, output_path, classes_to_use, + label_map_path, validation_set_size): + """Convert the KITTI detection dataset to TFRecords. + + Args: + data_dir: The full path to the unzipped folder containing the unzipped data + from data_object_image_2 and data_object_label_2.zip. + Folder structure is assumed to be: data_dir/training/label_2 (annotations) + and data_dir/data_object_image_2/training/image_2 (images). + output_path: The path to which TFRecord files will be written. The TFRecord + with the training set will be located at: _train.tfrecord + And the TFRecord with the validation set will be located at: + _val.tfrecord + classes_to_use: List of strings naming the classes for which data should be + converted. Use the same names as presented in the KIITI README file. + Adding dontcare class will remove all other bounding boxes that overlap + with areas marked as dontcare regions. + label_map_path: Path to label map proto + validation_set_size: How many images should be left as the validation set. + (Ffirst `validation_set_size` examples are selected to be in the + validation set). + """ + label_map_dict = label_map_util.get_label_map_dict(label_map_path) + train_count = 0 + val_count = 0 + + annotation_dir = os.path.join(data_dir, + 'training', + 'label_2') + + image_dir = os.path.join(data_dir, + 'data_object_image_2', + 'training', + 'image_2') + + train_writer = tf.python_io.TFRecordWriter('%s_train.tfrecord'% + output_path) + val_writer = tf.python_io.TFRecordWriter('%s_val.tfrecord'% + output_path) + + images = sorted(tf.gfile.ListDirectory(image_dir)) + for img_name in images: + img_num = int(img_name.split('.')[0]) + is_validation_img = img_num < validation_set_size + img_anno = read_annotation_file(os.path.join(annotation_dir, + str(img_num).zfill(6)+'.txt')) + + image_path = os.path.join(image_dir, img_name) + + # Filter all bounding boxes of this frame that are of a legal class, and + # don't overlap with a dontcare region. + # TODO(talremez) filter out targets that are truncated or heavily occluded. + annotation_for_image = filter_annotations(img_anno, classes_to_use) + + example = prepare_example(image_path, annotation_for_image, label_map_dict) + if is_validation_img: + val_writer.write(example.SerializeToString()) + val_count += 1 + else: + train_writer.write(example.SerializeToString()) + train_count += 1 + + train_writer.close() + val_writer.close() + + +def prepare_example(image_path, annotations, label_map_dict): + """Converts a dictionary with annotations for an image to tf.Example proto. + + Args: + image_path: The complete path to image. + annotations: A dictionary representing the annotation of a single object + that appears in the image. + label_map_dict: A map from string label names to integer ids. + + Returns: + example: The converted tf.Example. + """ + with tf.gfile.GFile(image_path, 'rb') as fid: + encoded_png = fid.read() + encoded_png_io = io.BytesIO(encoded_png) + image = pil.open(encoded_png_io) + image = np.asarray(image) + + key = hashlib.sha256(encoded_png).hexdigest() + + width = int(image.shape[1]) + height = int(image.shape[0]) + + xmin_norm = annotations['2d_bbox_left'] / float(width) + ymin_norm = annotations['2d_bbox_top'] / float(height) + xmax_norm = annotations['2d_bbox_right'] / float(width) + ymax_norm = annotations['2d_bbox_bottom'] / float(height) + + difficult_obj = [0]*len(xmin_norm) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')), + 'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')), + 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': dataset_util.bytes_feature(encoded_png), + 'image/format': dataset_util.bytes_feature('png'.encode('utf8')), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin_norm), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax_norm), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin_norm), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax_norm), + 'image/object/class/text': dataset_util.bytes_list_feature( + [x.encode('utf8') for x in annotations['type']]), + 'image/object/class/label': dataset_util.int64_list_feature( + [label_map_dict[x] for x in annotations['type']]), + 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), + 'image/object/truncated': dataset_util.float_list_feature( + annotations['truncated']), + 'image/object/alpha': dataset_util.float_list_feature( + annotations['alpha']), + 'image/object/3d_bbox/height': dataset_util.float_list_feature( + annotations['3d_bbox_height']), + 'image/object/3d_bbox/width': dataset_util.float_list_feature( + annotations['3d_bbox_width']), + 'image/object/3d_bbox/length': dataset_util.float_list_feature( + annotations['3d_bbox_length']), + 'image/object/3d_bbox/x': dataset_util.float_list_feature( + annotations['3d_bbox_x']), + 'image/object/3d_bbox/y': dataset_util.float_list_feature( + annotations['3d_bbox_y']), + 'image/object/3d_bbox/z': dataset_util.float_list_feature( + annotations['3d_bbox_z']), + 'image/object/3d_bbox/rot_y': dataset_util.float_list_feature( + annotations['3d_bbox_rot_y']), + })) + + return example + + +def filter_annotations(img_all_annotations, used_classes): + """Filters out annotations from the unused classes and dontcare regions. + + Filters out the annotations that belong to classes we do now wish to use and + (optionally) also removes all boxes that overlap with dontcare regions. + + Args: + img_all_annotations: A list of annotation dictionaries. See documentation of + read_annotation_file for more details about the format of the annotations. + used_classes: A list of strings listing the classes we want to keep, if the + list contains "dontcare", all bounding boxes with overlapping with dont + care regions will also be filtered out. + + Returns: + img_filtered_annotations: A list of annotation dictionaries that have passed + the filtering. + """ + + img_filtered_annotations = {} + + # Filter the type of the objects. + relevant_annotation_indices = [ + i for i, x in enumerate(img_all_annotations['type']) if x in used_classes + ] + + for key in img_all_annotations.keys(): + img_filtered_annotations[key] = ( + img_all_annotations[key][relevant_annotation_indices]) + + if 'dontcare' in used_classes: + dont_care_indices = [i for i, + x in enumerate(img_filtered_annotations['type']) + if x == 'dontcare'] + + # bounding box format [y_min, x_min, y_max, x_max] + all_boxes = np.stack([img_filtered_annotations['2d_bbox_top'], + img_filtered_annotations['2d_bbox_left'], + img_filtered_annotations['2d_bbox_bottom'], + img_filtered_annotations['2d_bbox_right']], + axis=1) + + ious = iou(boxes1=all_boxes, + boxes2=all_boxes[dont_care_indices]) + + # Remove all bounding boxes that overlap with a dontcare region. + if ious.size > 0: + boxes_to_remove = np.amax(ious, axis=1) > 0.0 + for key in img_all_annotations.keys(): + img_filtered_annotations[key] = ( + img_filtered_annotations[key][np.logical_not(boxes_to_remove)]) + + return img_filtered_annotations + + +def read_annotation_file(filename): + """Reads a KITTI annotation file. + + Converts a KITTI annotation file into a dictionary containing all the + relevant information. + + Args: + filename: the path to the annotataion text file. + + Returns: + anno: A dictionary with the converted annotation information. See annotation + README file for details on the different fields. + """ + with open(filename) as f: + content = f.readlines() + content = [x.strip().split(' ') for x in content] + + anno = {} + anno['type'] = np.array([x[0].lower() for x in content]) + anno['truncated'] = np.array([float(x[1]) for x in content]) + anno['occluded'] = np.array([int(x[2]) for x in content]) + anno['alpha'] = np.array([float(x[3]) for x in content]) + + anno['2d_bbox_left'] = np.array([float(x[4]) for x in content]) + anno['2d_bbox_top'] = np.array([float(x[5]) for x in content]) + anno['2d_bbox_right'] = np.array([float(x[6]) for x in content]) + anno['2d_bbox_bottom'] = np.array([float(x[7]) for x in content]) + + anno['3d_bbox_height'] = np.array([float(x[8]) for x in content]) + anno['3d_bbox_width'] = np.array([float(x[9]) for x in content]) + anno['3d_bbox_length'] = np.array([float(x[10]) for x in content]) + anno['3d_bbox_x'] = np.array([float(x[11]) for x in content]) + anno['3d_bbox_y'] = np.array([float(x[12]) for x in content]) + anno['3d_bbox_z'] = np.array([float(x[13]) for x in content]) + anno['3d_bbox_rot_y'] = np.array([float(x[14]) for x in content]) + + return anno + + +def main(_): + convert_kitti_to_tfrecords( + data_dir=FLAGS.data_dir, + output_path=FLAGS.output_path, + classes_to_use=FLAGS.classes_to_use.split(','), + label_map_path=FLAGS.label_map_path, + validation_set_size=FLAGS.validation_set_size) + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_kitti_tf_record_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_kitti_tf_record_test.py new file mode 100644 index 0000000000000000000000000000000000000000..606c684ef90ee23fcb496b11322dc5b4bb9e0d57 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_kitti_tf_record_test.py @@ -0,0 +1,132 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test for create_kitti_tf_record.py.""" + +import os + +import numpy as np +import PIL.Image +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import create_kitti_tf_record + + +class CreateKittiTFRecordTest(tf.test.TestCase): + + def _assertProtoEqual(self, proto_field, expectation): + """Helper function to assert if a proto field equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertListEqual(proto_list, expectation) + + def test_dict_to_tf_example(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(256, 256, 3) + save_path = os.path.join(self.get_temp_dir(), image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + annotations = {} + annotations['2d_bbox_left'] = np.array([64]) + annotations['2d_bbox_top'] = np.array([64]) + annotations['2d_bbox_right'] = np.array([192]) + annotations['2d_bbox_bottom'] = np.array([192]) + annotations['type'] = ['car'] + annotations['truncated'] = np.array([1]) + annotations['alpha'] = np.array([2]) + annotations['3d_bbox_height'] = np.array([10]) + annotations['3d_bbox_width'] = np.array([11]) + annotations['3d_bbox_length'] = np.array([12]) + annotations['3d_bbox_x'] = np.array([13]) + annotations['3d_bbox_y'] = np.array([14]) + annotations['3d_bbox_z'] = np.array([15]) + annotations['3d_bbox_rot_y'] = np.array([4]) + + label_map_dict = { + 'background': 0, + 'car': 1, + } + + example = create_kitti_tf_record.prepare_example( + save_path, + annotations, + label_map_dict) + + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(save_path)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(save_path)]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('png')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('car')]) + self._assertProtoEqual( + example.features.feature['image/object/class/label'].int64_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/truncated'].float_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/alpha'].float_list.value, + [2]) + self._assertProtoEqual(example.features.feature[ + 'image/object/3d_bbox/height'].float_list.value, [10]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/width'].float_list.value, + [11]) + self._assertProtoEqual(example.features.feature[ + 'image/object/3d_bbox/length'].float_list.value, [12]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/x'].float_list.value, + [13]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/y'].float_list.value, + [14]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/z'].float_list.value, + [15]) + self._assertProtoEqual( + example.features.feature['image/object/3d_bbox/rot_y'].float_list.value, + [4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_oid_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_oid_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..9b35765bacc2aaddb12698bcc6965bd92ee7a66f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_oid_tf_record.py @@ -0,0 +1,117 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Creates TFRecords of Open Images dataset for object detection. + +Example usage: + python object_detection/dataset_tools/create_oid_tf_record.py \ + --input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \ + --input_image_label_annotations_csv=/path/to/input/annotations-label.csv \ + --input_images_directory=/path/to/input/image_pixels_directory \ + --input_label_map=/path/to/input/labels_bbox_545.labelmap \ + --output_tf_record_path_prefix=/path/to/output/prefix.tfrecord + +CSVs with bounding box annotations and image metadata (including the image URLs) +can be downloaded from the Open Images GitHub repository: +https://github.com/openimages/dataset + +This script will include every image found in the input_images_directory in the +output TFRecord, even if the image has no corresponding bounding box annotations +in the input_annotations_csv. If input_image_label_annotations_csv is specified, +it will add image-level labels as well. Note that the information of whether a +label is positivelly or negativelly verified is NOT added to tfrecord. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import contextlib2 +import pandas as pd +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import oid_tfrecord_creation +from object_detection.dataset_tools import tf_record_creation_util +from object_detection.utils import label_map_util + +tf.flags.DEFINE_string('input_box_annotations_csv', None, + 'Path to CSV containing image bounding box annotations') +tf.flags.DEFINE_string('input_images_directory', None, + 'Directory containing the image pixels ' + 'downloaded from the OpenImages GitHub repository.') +tf.flags.DEFINE_string('input_image_label_annotations_csv', None, + 'Path to CSV containing image-level labels annotations') +tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto') +tf.flags.DEFINE_string( + 'output_tf_record_path_prefix', None, + 'Path to the output TFRecord. The shard index and the number of shards ' + 'will be appended for each output shard.') +tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards') + +FLAGS = tf.flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + + required_flags = [ + 'input_box_annotations_csv', 'input_images_directory', 'input_label_map', + 'output_tf_record_path_prefix' + ] + for flag_name in required_flags: + if not getattr(FLAGS, flag_name): + raise ValueError('Flag --{} is required'.format(flag_name)) + + label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map) + all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv) + if FLAGS.input_image_label_annotations_csv: + all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv) + all_label_annotations.rename( + columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) + else: + all_label_annotations = None + all_images = tf.gfile.Glob( + os.path.join(FLAGS.input_images_directory, '*.jpg')) + all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images] + all_image_ids = pd.DataFrame({'ImageID': all_image_ids}) + all_annotations = pd.concat( + [all_box_annotations, all_image_ids, all_label_annotations]) + + tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids)) + + with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, FLAGS.output_tf_record_path_prefix, + FLAGS.num_shards) + + for counter, image_data in enumerate(all_annotations.groupby('ImageID')): + tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000, + counter) + + image_id, image_annotations = image_data + # In OID image file names are formed by appending ".jpg" to the image ID. + image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg') + with tf.gfile.Open(image_path) as image_file: + encoded_image = image_file.read() + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + image_annotations, label_map, encoded_image) + if tf_example: + shard_idx = int(image_id, 16) % FLAGS.num_shards + output_tfrecords[shard_idx].write(tf_example.SerializeToString()) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pascal_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pascal_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..8d79a3391c4eaadcb658406c7240d8efc0c0f02e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pascal_tf_record.py @@ -0,0 +1,185 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Convert raw PASCAL dataset to TFRecord for object_detection. + +Example usage: + python object_detection/dataset_tools/create_pascal_tf_record.py \ + --data_dir=/home/user/VOCdevkit \ + --year=VOC2012 \ + --output_path=/home/user/pascal.record +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib +import io +import logging +import os + +from lxml import etree +import PIL.Image +import tensorflow.compat.v1 as tf + +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + + +flags = tf.app.flags +flags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.') +flags.DEFINE_string('set', 'train', 'Convert training set, validation set or ' + 'merged set.') +flags.DEFINE_string('annotations_dir', 'Annotations', + '(Relative) path to annotations directory.') +flags.DEFINE_string('year', 'VOC2007', 'Desired challenge year.') +flags.DEFINE_string('output_path', '', 'Path to output TFRecord') +flags.DEFINE_string('label_map_path', 'data/pascal_label_map.pbtxt', + 'Path to label map proto') +flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore ' + 'difficult instances') +FLAGS = flags.FLAGS + +SETS = ['train', 'val', 'trainval', 'test'] +YEARS = ['VOC2007', 'VOC2012', 'merged'] + + +def dict_to_tf_example(data, + dataset_directory, + label_map_dict, + ignore_difficult_instances=False, + image_subdirectory='JPEGImages'): + """Convert XML derived dict to tf.Example proto. + + Notice that this function normalizes the bounding box coordinates provided + by the raw data. + + Args: + data: dict holding PASCAL XML fields for a single image (obtained by + running dataset_util.recursive_parse_xml_to_dict) + dataset_directory: Path to root directory holding PASCAL dataset + label_map_dict: A map from string label names to integers ids. + ignore_difficult_instances: Whether to skip difficult instances in the + dataset (default: False). + image_subdirectory: String specifying subdirectory within the + PASCAL dataset directory holding the actual image data. + + Returns: + example: The converted tf.Example. + + Raises: + ValueError: if the image pointed to by data['filename'] is not a valid JPEG + """ + img_path = os.path.join(data['folder'], image_subdirectory, data['filename']) + full_path = os.path.join(dataset_directory, img_path) + with tf.gfile.GFile(full_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + if image.format != 'JPEG': + raise ValueError('Image format not JPEG') + key = hashlib.sha256(encoded_jpg).hexdigest() + + width = int(data['size']['width']) + height = int(data['size']['height']) + + xmin = [] + ymin = [] + xmax = [] + ymax = [] + classes = [] + classes_text = [] + truncated = [] + poses = [] + difficult_obj = [] + if 'object' in data: + for obj in data['object']: + difficult = bool(int(obj['difficult'])) + if ignore_difficult_instances and difficult: + continue + + difficult_obj.append(int(difficult)) + + xmin.append(float(obj['bndbox']['xmin']) / width) + ymin.append(float(obj['bndbox']['ymin']) / height) + xmax.append(float(obj['bndbox']['xmax']) / width) + ymax.append(float(obj['bndbox']['ymax']) / height) + classes_text.append(obj['name'].encode('utf8')) + classes.append(label_map_dict[obj['name']]) + truncated.append(int(obj['truncated'])) + poses.append(obj['pose'].encode('utf8')) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/source_id': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), + 'image/object/truncated': dataset_util.int64_list_feature(truncated), + 'image/object/view': dataset_util.bytes_list_feature(poses), + })) + return example + + +def main(_): + if FLAGS.set not in SETS: + raise ValueError('set must be in : {}'.format(SETS)) + if FLAGS.year not in YEARS: + raise ValueError('year must be in : {}'.format(YEARS)) + + data_dir = FLAGS.data_dir + years = ['VOC2007', 'VOC2012'] + if FLAGS.year != 'merged': + years = [FLAGS.year] + + writer = tf.python_io.TFRecordWriter(FLAGS.output_path) + + label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) + + for year in years: + logging.info('Reading from PASCAL %s dataset.', year) + examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', + 'aeroplane_' + FLAGS.set + '.txt') + annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir) + examples_list = dataset_util.read_examples_list(examples_path) + for idx, example in enumerate(examples_list): + if idx % 100 == 0: + logging.info('On image %d of %d', idx, len(examples_list)) + path = os.path.join(annotations_dir, example + '.xml') + with tf.gfile.GFile(path, 'r') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str) + data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] + + tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict, + FLAGS.ignore_difficult_instances) + writer.write(tf_example.SerializeToString()) + + writer.close() + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pascal_tf_record_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pascal_tf_record_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c751a1391c5a47bc676de5a4d701d52c75b4334d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pascal_tf_record_test.py @@ -0,0 +1,121 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test for create_pascal_tf_record.py.""" + +import os + +import numpy as np +import PIL.Image +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import create_pascal_tf_record + + +class CreatePascalTFRecordTest(tf.test.TestCase): + + def _assertProtoEqual(self, proto_field, expectation): + """Helper function to assert if a proto field equals some value. + + Args: + proto_field: The protobuf field to compare. + expectation: The expected value of the protobuf field. + """ + proto_list = [p for p in proto_field] + self.assertListEqual(proto_list, expectation) + + def test_dict_to_tf_example(self): + image_file_name = 'tmp_image.jpg' + image_data = np.random.rand(256, 256, 3) + save_path = os.path.join(self.get_temp_dir(), image_file_name) + image = PIL.Image.fromarray(image_data, 'RGB') + image.save(save_path) + + data = { + 'folder': '', + 'filename': image_file_name, + 'size': { + 'height': 256, + 'width': 256, + }, + 'object': [ + { + 'difficult': 1, + 'bndbox': { + 'xmin': 64, + 'ymin': 64, + 'xmax': 192, + 'ymax': 192, + }, + 'name': 'person', + 'truncated': 0, + 'pose': '', + }, + ], + } + + label_map_dict = { + 'background': 0, + 'person': 1, + 'notperson': 2, + } + + example = create_pascal_tf_record.dict_to_tf_example( + data, self.get_temp_dir(), label_map_dict, image_subdirectory='') + self._assertProtoEqual( + example.features.feature['image/height'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/width'].int64_list.value, [256]) + self._assertProtoEqual( + example.features.feature['image/filename'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/source_id'].bytes_list.value, + [six.b(image_file_name)]) + self._assertProtoEqual( + example.features.feature['image/format'].bytes_list.value, + [six.b('jpeg')]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymin'].float_list.value, + [0.25]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/xmax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/bbox/ymax'].float_list.value, + [0.75]) + self._assertProtoEqual( + example.features.feature['image/object/class/text'].bytes_list.value, + [six.b('person')]) + self._assertProtoEqual( + example.features.feature['image/object/class/label'].int64_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/difficult'].int64_list.value, + [1]) + self._assertProtoEqual( + example.features.feature['image/object/truncated'].int64_list.value, + [0]) + self._assertProtoEqual( + example.features.feature['image/object/view'].bytes_list.value, + [six.b('')]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pet_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pet_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..78524b5054229c101a5894290bf97dcc11c6d815 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pet_tf_record.py @@ -0,0 +1,318 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Convert the Oxford pet dataset to TFRecord for object_detection. + +See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar + Cats and Dogs + IEEE Conference on Computer Vision and Pattern Recognition, 2012 + http://www.robots.ox.ac.uk/~vgg/data/pets/ + +Example usage: + python object_detection/dataset_tools/create_pet_tf_record.py \ + --data_dir=/home/user/pet \ + --output_dir=/home/user/pet/output +""" + +import hashlib +import io +import logging +import os +import random +import re + +import contextlib2 +from lxml import etree +import numpy as np +import PIL.Image +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import tf_record_creation_util +from object_detection.utils import dataset_util +from object_detection.utils import label_map_util + +flags = tf.app.flags +flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.') +flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.') +flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt', + 'Path to label map proto') +flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes ' + 'for pet faces. Otherwise generates bounding boxes (as ' + 'well as segmentations for full pet bodies). Note that ' + 'in the latter case, the resulting files are much larger.') +flags.DEFINE_string('mask_type', 'png', 'How to represent instance ' + 'segmentation masks. Options are "png" or "numerical".') +flags.DEFINE_integer('num_shards', 10, 'Number of TFRecord shards') + +FLAGS = flags.FLAGS + + +def get_class_name_from_filename(file_name): + """Gets the class name from a file. + + Args: + file_name: The file name to get the class name from. + ie. "american_pit_bull_terrier_105.jpg" + + Returns: + A string of the class name. + """ + match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I) + return match.groups()[0] + + +def dict_to_tf_example(data, + mask_path, + label_map_dict, + image_subdirectory, + ignore_difficult_instances=False, + faces_only=True, + mask_type='png'): + """Convert XML derived dict to tf.Example proto. + + Notice that this function normalizes the bounding box coordinates provided + by the raw data. + + Args: + data: dict holding PASCAL XML fields for a single image (obtained by + running dataset_util.recursive_parse_xml_to_dict) + mask_path: String path to PNG encoded mask. + label_map_dict: A map from string label names to integers ids. + image_subdirectory: String specifying subdirectory within the + Pascal dataset directory holding the actual image data. + ignore_difficult_instances: Whether to skip difficult instances in the + dataset (default: False). + faces_only: If True, generates bounding boxes for pet faces. Otherwise + generates bounding boxes (as well as segmentations for full pet bodies). + mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to + smaller file sizes. + + Returns: + example: The converted tf.Example. + + Raises: + ValueError: if the image pointed to by data['filename'] is not a valid JPEG + """ + img_path = os.path.join(image_subdirectory, data['filename']) + with tf.gfile.GFile(img_path, 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = PIL.Image.open(encoded_jpg_io) + if image.format != 'JPEG': + raise ValueError('Image format not JPEG') + key = hashlib.sha256(encoded_jpg).hexdigest() + + with tf.gfile.GFile(mask_path, 'rb') as fid: + encoded_mask_png = fid.read() + encoded_png_io = io.BytesIO(encoded_mask_png) + mask = PIL.Image.open(encoded_png_io) + if mask.format != 'PNG': + raise ValueError('Mask format not PNG') + + mask_np = np.asarray(mask) + nonbackground_indices_x = np.any(mask_np != 2, axis=0) + nonbackground_indices_y = np.any(mask_np != 2, axis=1) + nonzero_x_indices = np.where(nonbackground_indices_x) + nonzero_y_indices = np.where(nonbackground_indices_y) + + width = int(data['size']['width']) + height = int(data['size']['height']) + + xmins = [] + ymins = [] + xmaxs = [] + ymaxs = [] + classes = [] + classes_text = [] + truncated = [] + poses = [] + difficult_obj = [] + masks = [] + if 'object' in data: + for obj in data['object']: + difficult = bool(int(obj['difficult'])) + if ignore_difficult_instances and difficult: + continue + difficult_obj.append(int(difficult)) + + if faces_only: + xmin = float(obj['bndbox']['xmin']) + xmax = float(obj['bndbox']['xmax']) + ymin = float(obj['bndbox']['ymin']) + ymax = float(obj['bndbox']['ymax']) + else: + xmin = float(np.min(nonzero_x_indices)) + xmax = float(np.max(nonzero_x_indices)) + ymin = float(np.min(nonzero_y_indices)) + ymax = float(np.max(nonzero_y_indices)) + + xmins.append(xmin / width) + ymins.append(ymin / height) + xmaxs.append(xmax / width) + ymaxs.append(ymax / height) + class_name = get_class_name_from_filename(data['filename']) + classes_text.append(class_name.encode('utf8')) + classes.append(label_map_dict[class_name]) + truncated.append(int(obj['truncated'])) + poses.append(obj['pose'].encode('utf8')) + if not faces_only: + mask_remapped = (mask_np != 2).astype(np.uint8) + masks.append(mask_remapped) + + feature_dict = { + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/source_id': dataset_util.bytes_feature( + data['filename'].encode('utf8')), + 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), + 'image/object/truncated': dataset_util.int64_list_feature(truncated), + 'image/object/view': dataset_util.bytes_list_feature(poses), + } + if not faces_only: + if mask_type == 'numerical': + mask_stack = np.stack(masks).astype(np.float32) + masks_flattened = np.reshape(mask_stack, [-1]) + feature_dict['image/object/mask'] = ( + dataset_util.float_list_feature(masks_flattened.tolist())) + elif mask_type == 'png': + encoded_mask_png_list = [] + for mask in masks: + img = PIL.Image.fromarray(mask) + output = io.BytesIO() + img.save(output, format='PNG') + encoded_mask_png_list.append(output.getvalue()) + feature_dict['image/object/mask'] = ( + dataset_util.bytes_list_feature(encoded_mask_png_list)) + + example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) + return example + + +def create_tf_record(output_filename, + num_shards, + label_map_dict, + annotations_dir, + image_dir, + examples, + faces_only=True, + mask_type='png'): + """Creates a TFRecord file from examples. + + Args: + output_filename: Path to where output file is saved. + num_shards: Number of shards for output file. + label_map_dict: The label map dictionary. + annotations_dir: Directory where annotation files are stored. + image_dir: Directory where image files are stored. + examples: Examples to parse and save to tf record. + faces_only: If True, generates bounding boxes for pet faces. Otherwise + generates bounding boxes (as well as segmentations for full pet bodies). + mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to + smaller file sizes. + """ + with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, output_filename, num_shards) + for idx, example in enumerate(examples): + if idx % 100 == 0: + logging.info('On image %d of %d', idx, len(examples)) + xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml') + mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png') + + if not os.path.exists(xml_path): + logging.warning('Could not find %s, ignoring example.', xml_path) + continue + with tf.gfile.GFile(xml_path, 'r') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str) + data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation'] + + try: + tf_example = dict_to_tf_example( + data, + mask_path, + label_map_dict, + image_dir, + faces_only=faces_only, + mask_type=mask_type) + if tf_example: + shard_idx = idx % num_shards + output_tfrecords[shard_idx].write(tf_example.SerializeToString()) + except ValueError: + logging.warning('Invalid example: %s, ignoring.', xml_path) + + +# TODO(derekjchow): Add test for pet/PASCAL main files. +def main(_): + data_dir = FLAGS.data_dir + label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path) + + logging.info('Reading from Pet dataset.') + image_dir = os.path.join(data_dir, 'images') + annotations_dir = os.path.join(data_dir, 'annotations') + examples_path = os.path.join(annotations_dir, 'trainval.txt') + examples_list = dataset_util.read_examples_list(examples_path) + + # Test images are not included in the downloaded data set, so we shall perform + # our own split. + random.seed(42) + random.shuffle(examples_list) + num_examples = len(examples_list) + num_train = int(0.7 * num_examples) + train_examples = examples_list[:num_train] + val_examples = examples_list[num_train:] + logging.info('%d training and %d validation examples.', + len(train_examples), len(val_examples)) + + train_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_train.record') + val_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_val.record') + if not FLAGS.faces_only: + train_output_path = os.path.join(FLAGS.output_dir, + 'pets_fullbody_with_masks_train.record') + val_output_path = os.path.join(FLAGS.output_dir, + 'pets_fullbody_with_masks_val.record') + create_tf_record( + train_output_path, + FLAGS.num_shards, + label_map_dict, + annotations_dir, + image_dir, + train_examples, + faces_only=FLAGS.faces_only, + mask_type=FLAGS.mask_type) + create_tf_record( + val_output_path, + FLAGS.num_shards, + label_map_dict, + annotations_dir, + image_dir, + val_examples, + faces_only=FLAGS.faces_only, + mask_type=FLAGS.mask_type) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pycocotools_package.sh b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pycocotools_package.sh new file mode 100644 index 0000000000000000000000000000000000000000..88ea5114c237503ca63714b1276b89b3639b9926 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_pycocotools_package.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download pycocotools and make package for CMLE jobs. +# +# usage: +# bash object_detection/dataset_tools/create_pycocotools_package.sh \ +# /tmp/pycocotools +set -e + +if [ -z "$1" ]; then + echo "usage create_pycocotools_package.sh [output dir]" + exit +fi + +# Create the output directory. +OUTPUT_DIR="${1%/}" +SCRATCH_DIR="${OUTPUT_DIR}/raw" +mkdir -p "${OUTPUT_DIR}" +mkdir -p "${SCRATCH_DIR}" + +cd ${SCRATCH_DIR} +git clone https://github.com/cocodataset/cocoapi.git +cd cocoapi/PythonAPI && mv ../common ./ + +sed "s/\.\.\/common/common/g" setup.py > setup.py.updated +cp -f setup.py.updated setup.py +rm setup.py.updated + +sed "s/\.\.\/common/common/g" pycocotools/_mask.pyx > _mask.pyx.updated +cp -f _mask.pyx.updated pycocotools/_mask.pyx +rm _mask.pyx.updated + +sed "s/import matplotlib\.pyplot as plt/import matplotlib;matplotlib\.use\(\'Agg\'\);import matplotlib\.pyplot as plt/g" pycocotools/coco.py > coco.py.updated +cp -f coco.py.updated pycocotools/coco.py +rm coco.py.updated + +cd "${OUTPUT_DIR}" +tar -czf pycocotools-2.0.tar.gz -C "${SCRATCH_DIR}/cocoapi/" PythonAPI/ +rm -rf ${SCRATCH_DIR} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_tf_record.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_tf_record.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0fd4b8f8bd1b2a948d85f5b6c7f56e47c74d54 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/create_tf_record.py @@ -0,0 +1,106 @@ +""" +Usage: + # From tensorflow/models/ + # Create train data: + python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record + # Create test data: + python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record + +python3 research/object_detection/dataset_tools/create_tf_record.py --csv_input=data/test_labels.csv --output_path=test.record --image_dir=images/test +""" +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import + +import os +import io +import pandas as pd +import tensorflow as tf + +from PIL import Image +import sys +sys.path.append('../') +from object_detection.utils import dataset_util +from collections import namedtuple, OrderedDict + +flags = tf.app.flags +flags.DEFINE_string('csv_input', '', 'data/train_labels.csv') +flags.DEFINE_string('output_path', '', 'data/train.record') +flags.DEFINE_string('image_dir', '', 'images/train') +FLAGS = flags.FLAGS + + +# TO-DO replace this with label map +def class_text_to_int(row_label): + if row_label == 'myrobot': + return 1 + if row_label == 'corobot': + return 2 + else: + return 0 + + +def split(df, group): + data = namedtuple('data', ['filename', 'object']) + gb = df.groupby(group) + return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] + + +def create_tf_example(group, path): + with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = Image.open(encoded_jpg_io) + width, height = image.size + + filename = group.filename.encode('utf8') + image_format = b'jpg' + xmins = [] + xmaxs = [] + ymins = [] + ymaxs = [] + classes_text = [] + classes = [] + + for index, row in group.object.iterrows(): + xmins.append(row['xmin'] / width) + xmaxs.append(row['xmax'] / width) + ymins.append(row['ymin'] / height) + ymaxs.append(row['ymax'] / height) + classes_text.append(row['class'].encode('utf8')) + classes.append(class_text_to_int(row['class'])) + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(filename), + 'image/source_id': dataset_util.bytes_feature(filename), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example + + +def main(_): + writer = tf.python_io.TFRecordWriter(FLAGS.output_path) + path = os.path.join(FLAGS.image_dir) + examples = pd.read_csv(FLAGS.csv_input) + grouped = split(examples, 'filename') + for group in grouped: + tf_example = create_tf_example(group, path) + writer.write(tf_example.SerializeToString()) + + writer.close() + output_path = os.path.join(os.getcwd(), FLAGS.output_path) + print('Successfully created the TFRecords: {}'.format(output_path)) + + +if __name__ == '__main__': + tf.app.run() + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/densepose/UV_symmetry_transforms.mat b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/densepose/UV_symmetry_transforms.mat new file mode 100644 index 0000000000000000000000000000000000000000..2836cac4d6b37a16fbff8ac6efda1d7ecb88a711 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/densepose/UV_symmetry_transforms.mat differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/download_and_preprocess_ava.sh b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/download_and_preprocess_ava.sh new file mode 100755 index 0000000000000000000000000000000000000000..723f6a7fcf5421e4bbbd015b8579b14cf3b0d61f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/download_and_preprocess_ava.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# This script downloads the videos for the AVA dataset. There are no arguments. +# Copy this script into the desired parent directory of the ava_vids_raw/ +# directory created in this script to store the raw videos. + +mkdir ava_vids_raw +cd ava_vids_raw + +curl -O s3.amazonaws.com/ava-dataset/annotations/ava_file_names_trainval_v2.1.txt + +echo "Downloading all videos." + +cat "ava_file_names_trainval_v2.1.txt" | while read line +do + curl -O s3.amazonaws.com/ava-dataset/trainval/$line + echo "Downloaded " $line +done + +rm "ava_file_names_trainval_v2.1.txt" +cd .. + +# Trimming causes issues with frame seeking in the python script, so it is best left out. +# If included, need to modify the python script to subtract 900 seconds wheen seeking. + +# echo "Trimming all videos." + +# mkdir ava_vids_trimmed +# for filename in ava_vids_raw/*; do +# ffmpeg -ss 900 -to 1800 -i $filename -c copy ava_vids_trimmed/${filename##*/} +# done diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/download_and_preprocess_mscoco.sh b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/download_and_preprocess_mscoco.sh new file mode 100644 index 0000000000000000000000000000000000000000..843ba86938d35eed18dd6f7968ea87c90551fc13 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/download_and_preprocess_mscoco.sh @@ -0,0 +1,106 @@ +#!/bin/bash +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess the MSCOCO data set for detection. +# +# The outputs of this script are TFRecord files containing serialized +# tf.Example protocol buffers. See create_coco_tf_record.py for details of how +# the tf.Example protocol buffers are constructed and see +# http://cocodataset.org/#overview for an overview of the dataset. +# +# usage: +# bash object_detection/dataset_tools/download_and_preprocess_mscoco.sh \ +# /tmp/mscoco +set -e + +if [ -z "$1" ]; then + echo "usage download_and_preprocess_mscoco.sh [data dir]" + exit +fi + +if [ "$(uname)" == "Darwin" ]; then + UNZIP="tar -xf" +else + UNZIP="unzip -nq" +fi + +# Create the output directories. +OUTPUT_DIR="${1%/}" +SCRATCH_DIR="${OUTPUT_DIR}/raw-data" +mkdir -p "${OUTPUT_DIR}" +mkdir -p "${SCRATCH_DIR}" +CURRENT_DIR=$(pwd) + +# Helper function to download and unpack a .zip file. +function download_and_unzip() { + local BASE_URL=${1} + local FILENAME=${2} + + if [ ! -f ${FILENAME} ]; then + echo "Downloading ${FILENAME} to $(pwd)" + wget -nd -c "${BASE_URL}/${FILENAME}" + else + echo "Skipping download of ${FILENAME}" + fi + echo "Unzipping ${FILENAME}" + ${UNZIP} ${FILENAME} +} + +cd ${SCRATCH_DIR} + +# Download the images. +BASE_IMAGE_URL="http://images.cocodataset.org/zips" + +TRAIN_IMAGE_FILE="train2017.zip" +download_and_unzip ${BASE_IMAGE_URL} ${TRAIN_IMAGE_FILE} +TRAIN_IMAGE_DIR="${SCRATCH_DIR}/train2017" + +VAL_IMAGE_FILE="val2017.zip" +download_and_unzip ${BASE_IMAGE_URL} ${VAL_IMAGE_FILE} +VAL_IMAGE_DIR="${SCRATCH_DIR}/val2017" + +TEST_IMAGE_FILE="test2017.zip" +download_and_unzip ${BASE_IMAGE_URL} ${TEST_IMAGE_FILE} +TEST_IMAGE_DIR="${SCRATCH_DIR}/test2017" + +# Download the annotations. +BASE_INSTANCES_URL="http://images.cocodataset.org/annotations" +INSTANCES_FILE="annotations_trainval2017.zip" +download_and_unzip ${BASE_INSTANCES_URL} ${INSTANCES_FILE} + +TRAIN_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/instances_train2017.json" +VAL_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/instances_val2017.json" + +# Download the test image info. +BASE_IMAGE_INFO_URL="http://images.cocodataset.org/annotations" +IMAGE_INFO_FILE="image_info_test2017.zip" +download_and_unzip ${BASE_IMAGE_INFO_URL} ${IMAGE_INFO_FILE} + +TESTDEV_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/image_info_test-dev2017.json" + +# Build TFRecords of the image data. +cd "${CURRENT_DIR}" +python object_detection/dataset_tools/create_coco_tf_record.py \ + --logtostderr \ + --include_masks \ + --train_image_dir="${TRAIN_IMAGE_DIR}" \ + --val_image_dir="${VAL_IMAGE_DIR}" \ + --test_image_dir="${TEST_IMAGE_DIR}" \ + --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ + --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ + --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ + --output_dir="${OUTPUT_DIR}" + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fcf1431e63326874a25bae1cf9682181e78beb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_hierarchical_labels_expansion.py @@ -0,0 +1,233 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""An executable to expand image-level labels, boxes and segments. + +The expansion is performed using class hierarchy, provided in JSON file. + +The expected file formats are the following: +- for box and segment files: CSV file is expected to have LabelName field +- for image-level labels: CSV file is expected to have LabelName and Confidence +fields + +Note, that LabelName is the only field used for expansion. + +Example usage: +python models/research/object_detection/dataset_tools/\ +oid_hierarchical_labels_expansion.py \ +--json_hierarchy_file= \ +--input_annotations= \ +--output_annotations= \ +--annotation_type=<1 (for boxes and segments) or 2 (for image-level labels)> +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import json +from absl import app +from absl import flags +import six + +flags.DEFINE_string( + 'json_hierarchy_file', None, + 'Path to the file containing label hierarchy in JSON format.') +flags.DEFINE_string( + 'input_annotations', None, 'Path to Open Images annotations file' + '(either bounding boxes, segments or image-level labels).') +flags.DEFINE_string('output_annotations', None, 'Path to the output file.') +flags.DEFINE_integer( + 'annotation_type', None, + 'Type of the input annotations: 1 - boxes or segments,' + '2 - image-level labels.' +) + +FLAGS = flags.FLAGS + + +def _update_dict(initial_dict, update): + """Updates dictionary with update content. + + Args: + initial_dict: initial dictionary. + update: updated dictionary. + """ + + for key, value_list in update.items(): + if key in initial_dict: + initial_dict[key].update(value_list) + else: + initial_dict[key] = set(value_list) + + +def _build_plain_hierarchy(hierarchy, skip_root=False): + """Expands tree hierarchy representation to parent-child dictionary. + + Args: + hierarchy: labels hierarchy as JSON file. + skip_root: if true skips root from the processing (done for the case when all + classes under hierarchy are collected under virtual node). + + Returns: + keyed_parent - dictionary of parent - all its children nodes. + keyed_child - dictionary of children - all its parent nodes + children - all children of the current node. + """ + all_children = set([]) + all_keyed_parent = {} + all_keyed_child = {} + if 'Subcategory' in hierarchy: + for node in hierarchy['Subcategory']: + keyed_parent, keyed_child, children = _build_plain_hierarchy(node) + # Update is not done through dict.update() since some children have multi- + # ple parents in the hiearchy. + _update_dict(all_keyed_parent, keyed_parent) + _update_dict(all_keyed_child, keyed_child) + all_children.update(children) + + if not skip_root: + all_keyed_parent[hierarchy['LabelName']] = copy.deepcopy(all_children) + all_children.add(hierarchy['LabelName']) + for child, _ in all_keyed_child.items(): + all_keyed_child[child].add(hierarchy['LabelName']) + all_keyed_child[hierarchy['LabelName']] = set([]) + + return all_keyed_parent, all_keyed_child, all_children + + +class OIDHierarchicalLabelsExpansion(object): + """ Main class to perform labels hierachical expansion.""" + + def __init__(self, hierarchy): + """Constructor. + + Args: + hierarchy: labels hierarchy as JSON object. + """ + + self._hierarchy_keyed_parent, self._hierarchy_keyed_child, _ = ( + _build_plain_hierarchy(hierarchy, skip_root=True)) + + def expand_boxes_or_segments_from_csv(self, csv_row, + labelname_column_index=1): + """Expands a row containing bounding boxes/segments from CSV file. + + Args: + csv_row: a single row of Open Images released groundtruth file. + labelname_column_index: 0-based index of LabelName column in CSV file. + + Returns: + a list of strings (including the initial row) corresponding to the ground + truth expanded to multiple annotation for evaluation with Open Images + Challenge 2018/2019 metrics. + """ + # Row header is expected to be the following for boxes: + # ImageID,LabelName,Confidence,XMin,XMax,YMin,YMax,IsGroupOf + # Row header is expected to be the following for segments: + # ImageID,LabelName,ImageWidth,ImageHeight,XMin,XMax,YMin,YMax, + # IsGroupOf,Mask + split_csv_row = six.ensure_str(csv_row).split(',') + result = [csv_row] + assert split_csv_row[ + labelname_column_index] in self._hierarchy_keyed_child + parent_nodes = self._hierarchy_keyed_child[ + split_csv_row[labelname_column_index]] + for parent_node in parent_nodes: + split_csv_row[labelname_column_index] = parent_node + result.append(','.join(split_csv_row)) + return result + + def expand_labels_from_csv(self, + csv_row, + labelname_column_index=1, + confidence_column_index=2): + """Expands a row containing labels from CSV file. + + Args: + csv_row: a single row of Open Images released groundtruth file. + labelname_column_index: 0-based index of LabelName column in CSV file. + confidence_column_index: 0-based index of Confidence column in CSV file. + + Returns: + a list of strings (including the initial row) corresponding to the ground + truth expanded to multiple annotation for evaluation with Open Images + Challenge 2018/2019 metrics. + """ + # Row header is expected to be exactly: + # ImageID,Source,LabelName,Confidence + split_csv_row = six.ensure_str(csv_row).split(',') + result = [csv_row] + if int(split_csv_row[confidence_column_index]) == 1: + assert split_csv_row[ + labelname_column_index] in self._hierarchy_keyed_child + parent_nodes = self._hierarchy_keyed_child[ + split_csv_row[labelname_column_index]] + for parent_node in parent_nodes: + split_csv_row[labelname_column_index] = parent_node + result.append(','.join(split_csv_row)) + else: + assert split_csv_row[ + labelname_column_index] in self._hierarchy_keyed_parent + child_nodes = self._hierarchy_keyed_parent[ + split_csv_row[labelname_column_index]] + for child_node in child_nodes: + split_csv_row[labelname_column_index] = child_node + result.append(','.join(split_csv_row)) + return result + + +def main(unused_args): + + del unused_args + + with open(FLAGS.json_hierarchy_file) as f: + hierarchy = json.load(f) + expansion_generator = OIDHierarchicalLabelsExpansion(hierarchy) + labels_file = False + if FLAGS.annotation_type == 2: + labels_file = True + elif FLAGS.annotation_type != 1: + print('--annotation_type expected value is 1 or 2.') + return -1 + confidence_column_index = -1 + labelname_column_index = -1 + with open(FLAGS.input_annotations, 'r') as source: + with open(FLAGS.output_annotations, 'w') as target: + header = source.readline() + target.writelines([header]) + column_names = header.strip().split(',') + labelname_column_index = column_names.index('LabelName') + if labels_file: + confidence_column_index = column_names.index('Confidence') + for line in source: + if labels_file: + expanded_lines = expansion_generator.expand_labels_from_csv( + line, labelname_column_index, confidence_column_index) + else: + expanded_lines = ( + expansion_generator.expand_boxes_or_segments_from_csv( + line, labelname_column_index)) + target.writelines(expanded_lines) + + +if __name__ == '__main__': + flags.mark_flag_as_required('json_hierarchy_file') + flags.mark_flag_as_required('input_annotations') + flags.mark_flag_as_required('output_annotations') + flags.mark_flag_as_required('annotation_type') + + app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ca010c5bed3ee0a92f352596d18b3a515b654282 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py @@ -0,0 +1,116 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the OpenImages label expansion (OIDHierarchicalLabelsExpansion).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import oid_hierarchical_labels_expansion + + +def create_test_data(): + hierarchy = { + 'LabelName': + 'a', + 'Subcategory': [{ + 'LabelName': 'b' + }, { + 'LabelName': + 'c', + 'Subcategory': [{ + 'LabelName': 'd' + }, { + 'LabelName': 'e' + }, { + 'LabelName': 'f', + 'Subcategory': [{ + 'LabelName': 'd' + },] + }] + }, { + 'LabelName': 'f', + 'Subcategory': [{ + 'LabelName': 'd' + },] + }] + } + bbox_rows = [ + '123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0', + '123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0' + ] + label_rows = [ + '123,verification,b,0', '123,verification,c,0', '124,verification,d,1' + ] + segm_rows = [ + '123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK', + '123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK', + ] + return hierarchy, bbox_rows, segm_rows, label_rows + + +class HierarchicalLabelsExpansionTest(tf.test.TestCase): + + def test_bbox_expansion(self): + hierarchy, bbox_rows, _, _ = create_test_data() + expansion_generator = ( + oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion( + hierarchy)) + all_result_rows = [] + for row in bbox_rows: + all_result_rows.extend( + expansion_generator.expand_boxes_or_segments_from_csv(row, 2)) + self.assertItemsEqual([ + '123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0', + '123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0', + '123,xclick,f,1,0.2,0.3,0.1,0.2,1,1,0,0,0', + '123,xclick,c,1,0.2,0.3,0.1,0.2,1,1,0,0,0' + ], all_result_rows) + + def test_segm_expansion(self): + hierarchy, _, segm_rows, _ = create_test_data() + expansion_generator = ( + oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion( + hierarchy)) + all_result_rows = [] + for row in segm_rows: + all_result_rows.extend( + expansion_generator.expand_boxes_or_segments_from_csv(row, 2)) + self.assertItemsEqual([ + '123,cc,b,100,100,0.1,0.2,0.1,0.2,0,MASK', + '123,cc,d,100,100,0.2,0.3,0.1,0.2,0,MASK', + '123,cc,f,100,100,0.2,0.3,0.1,0.2,0,MASK', + '123,cc,c,100,100,0.2,0.3,0.1,0.2,0,MASK' + ], all_result_rows) + + def test_labels_expansion(self): + hierarchy, _, _, label_rows = create_test_data() + expansion_generator = ( + oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion( + hierarchy)) + all_result_rows = [] + for row in label_rows: + all_result_rows.extend( + expansion_generator.expand_labels_from_csv(row, 2, 3)) + self.assertItemsEqual([ + '123,verification,b,0', '123,verification,c,0', '123,verification,d,0', + '123,verification,f,0', '123,verification,e,0', '124,verification,d,1', + '124,verification,f,1', '124,verification,c,1' + ], all_result_rows) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_tfrecord_creation.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_tfrecord_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..0cddbbb9cd3ac5df3b8c1bde3e535b8f46fc9988 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_tfrecord_creation.py @@ -0,0 +1,112 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Utilities for creating TFRecords of TF examples for the Open Images dataset. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.utils import dataset_util + + +def tf_example_from_annotations_data_frame(annotations_data_frame, label_map, + encoded_image): + """Populates a TF Example message with image annotations from a data frame. + + Args: + annotations_data_frame: Data frame containing the annotations for a single + image. + label_map: String to integer label map. + encoded_image: The encoded image string + + Returns: + The populated TF Example, if the label of at least one object is present in + label_map. Otherwise, returns None. + """ + + filtered_data_frame = annotations_data_frame[ + annotations_data_frame.LabelName.isin(label_map)] + filtered_data_frame_boxes = filtered_data_frame[ + ~filtered_data_frame.YMin.isnull()] + filtered_data_frame_labels = filtered_data_frame[ + filtered_data_frame.YMin.isnull()] + image_id = annotations_data_frame.ImageID.iloc[0] + + feature_map = { + standard_fields.TfExampleFields.object_bbox_ymin: + dataset_util.float_list_feature( + filtered_data_frame_boxes.YMin.to_numpy()), + standard_fields.TfExampleFields.object_bbox_xmin: + dataset_util.float_list_feature( + filtered_data_frame_boxes.XMin.to_numpy()), + standard_fields.TfExampleFields.object_bbox_ymax: + dataset_util.float_list_feature( + filtered_data_frame_boxes.YMax.to_numpy()), + standard_fields.TfExampleFields.object_bbox_xmax: + dataset_util.float_list_feature( + filtered_data_frame_boxes.XMax.to_numpy()), + standard_fields.TfExampleFields.object_class_text: + dataset_util.bytes_list_feature([ + six.ensure_binary(label_text) + for label_text in filtered_data_frame_boxes.LabelName.to_numpy() + ]), + standard_fields.TfExampleFields.object_class_label: + dataset_util.int64_list_feature( + filtered_data_frame_boxes.LabelName.map( + lambda x: label_map[x]).to_numpy()), + standard_fields.TfExampleFields.filename: + dataset_util.bytes_feature( + six.ensure_binary('{}.jpg'.format(image_id))), + standard_fields.TfExampleFields.source_id: + dataset_util.bytes_feature(six.ensure_binary(image_id)), + standard_fields.TfExampleFields.image_encoded: + dataset_util.bytes_feature(six.ensure_binary(encoded_image)), + } + + if 'IsGroupOf' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_group_of] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsGroupOf.to_numpy().astype(int)) + if 'IsOccluded' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_occluded] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsOccluded.to_numpy().astype( + int)) + if 'IsTruncated' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_truncated] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsTruncated.to_numpy().astype( + int)) + if 'IsDepiction' in filtered_data_frame.columns: + feature_map[standard_fields.TfExampleFields. + object_depiction] = dataset_util.int64_list_feature( + filtered_data_frame_boxes.IsDepiction.to_numpy().astype( + int)) + + if 'ConfidenceImageLabel' in filtered_data_frame_labels.columns: + feature_map[standard_fields.TfExampleFields. + image_class_label] = dataset_util.int64_list_feature( + filtered_data_frame_labels.LabelName.map( + lambda x: label_map[x]).to_numpy()) + feature_map[standard_fields.TfExampleFields + .image_class_text] = dataset_util.bytes_list_feature([ + six.ensure_binary(label_text) for label_text in + filtered_data_frame_labels.LabelName.to_numpy() + ]), + return tf.train.Example(features=tf.train.Features(feature=feature_map)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_tfrecord_creation_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_tfrecord_creation_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e945f46d6159104735ea809c17ebe9b3cf73d1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/oid_tfrecord_creation_test.py @@ -0,0 +1,200 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for oid_tfrecord_creation.py.""" + +import pandas as pd +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import oid_tfrecord_creation + + +def create_test_data(): + data = { + 'ImageID': ['i1', 'i1', 'i1', 'i1', 'i1', 'i2', 'i2'], + 'LabelName': ['a', 'a', 'b', 'b', 'c', 'b', 'c'], + 'YMin': [0.3, 0.6, 0.8, 0.1, None, 0.0, 0.0], + 'XMin': [0.1, 0.3, 0.7, 0.0, None, 0.1, 0.1], + 'XMax': [0.2, 0.3, 0.8, 0.5, None, 0.9, 0.9], + 'YMax': [0.3, 0.6, 1, 0.8, None, 0.8, 0.8], + 'IsOccluded': [0, 1, 1, 0, None, 0, 0], + 'IsTruncated': [0, 0, 0, 1, None, 0, 0], + 'IsGroupOf': [0, 0, 0, 0, None, 0, 1], + 'IsDepiction': [1, 0, 0, 0, None, 0, 0], + 'ConfidenceImageLabel': [None, None, None, None, 0, None, None], + } + df = pd.DataFrame(data=data) + label_map = {'a': 0, 'b': 1, 'c': 2} + return label_map, df + + +class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase): + + def test_simple(self): + label_map, df = create_test_data() + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + df[df.ImageID == 'i1'], label_map, 'encoded_image_test') + self.assertProtoEquals(six.ensure_str(""" + features { + feature { + key: "image/encoded" + value { bytes_list { value: "encoded_image_test" } } } + feature { + key: "image/filename" + value { bytes_list { value: "i1.jpg" } } } + feature { + key: "image/object/bbox/ymin" + value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } } + feature { + key: "image/object/bbox/xmin" + value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } } + feature { + key: "image/object/bbox/ymax" + value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } } + feature { + key: "image/object/bbox/xmax" + value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } } + feature { + key: "image/object/class/label" + value { int64_list { value: [0, 0, 1, 1] } } } + feature { + key: "image/object/class/text" + value { bytes_list { value: ["a", "a", "b", "b"] } } } + feature { + key: "image/source_id" + value { bytes_list { value: "i1" } } } + feature { + key: "image/object/depiction" + value { int64_list { value: [1, 0, 0, 0] } } } + feature { + key: "image/object/group_of" + value { int64_list { value: [0, 0, 0, 0] } } } + feature { + key: "image/object/occluded" + value { int64_list { value: [0, 1, 1, 0] } } } + feature { + key: "image/object/truncated" + value { int64_list { value: [0, 0, 0, 1] } } } + feature { + key: "image/class/label" + value { int64_list { value: [2] } } } + feature { + key: "image/class/text" + value { bytes_list { value: ["c"] } } } } + """), tf_example) + + def test_no_attributes(self): + label_map, df = create_test_data() + + del df['IsDepiction'] + del df['IsGroupOf'] + del df['IsOccluded'] + del df['IsTruncated'] + del df['ConfidenceImageLabel'] + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + df[df.ImageID == 'i2'], label_map, 'encoded_image_test') + self.assertProtoEquals(six.ensure_str(""" + features { + feature { + key: "image/encoded" + value { bytes_list { value: "encoded_image_test" } } } + feature { + key: "image/filename" + value { bytes_list { value: "i2.jpg" } } } + feature { + key: "image/object/bbox/ymin" + value { float_list { value: [0.0, 0.0] } } } + feature { + key: "image/object/bbox/xmin" + value { float_list { value: [0.1, 0.1] } } } + feature { + key: "image/object/bbox/ymax" + value { float_list { value: [0.8, 0.8] } } } + feature { + key: "image/object/bbox/xmax" + value { float_list { value: [0.9, 0.9] } } } + feature { + key: "image/object/class/label" + value { int64_list { value: [1, 2] } } } + feature { + key: "image/object/class/text" + value { bytes_list { value: ["b", "c"] } } } + feature { + key: "image/source_id" + value { bytes_list { value: "i2" } } } } + """), tf_example) + + def test_label_filtering(self): + label_map, df = create_test_data() + + label_map = {'a': 0} + + tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( + df[df.ImageID == 'i1'], label_map, 'encoded_image_test') + self.assertProtoEquals( + six.ensure_str(""" + features { + feature { + key: "image/encoded" + value { bytes_list { value: "encoded_image_test" } } } + feature { + key: "image/filename" + value { bytes_list { value: "i1.jpg" } } } + feature { + key: "image/object/bbox/ymin" + value { float_list { value: [0.3, 0.6] } } } + feature { + key: "image/object/bbox/xmin" + value { float_list { value: [0.1, 0.3] } } } + feature { + key: "image/object/bbox/ymax" + value { float_list { value: [0.3, 0.6] } } } + feature { + key: "image/object/bbox/xmax" + value { float_list { value: [0.2, 0.3] } } } + feature { + key: "image/object/class/label" + value { int64_list { value: [0, 0] } } } + feature { + key: "image/object/class/text" + value { bytes_list { value: ["a", "a"] } } } + feature { + key: "image/source_id" + value { bytes_list { value: "i1" } } } + feature { + key: "image/object/depiction" + value { int64_list { value: [1, 0] } } } + feature { + key: "image/object/group_of" + value { int64_list { value: [0, 0] } } } + feature { + key: "image/object/occluded" + value { int64_list { value: [0, 1] } } } + feature { + key: "image/object/truncated" + value { int64_list { value: [0, 0] } } } + feature { + key: "image/class/label" + value { int64_list { } } } + feature { + key: "image/class/text" + value { bytes_list { } } } } + """), tf_example) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/seq_example_util.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/seq_example_util.py new file mode 100644 index 0000000000000000000000000000000000000000..408082033489be74bba639bdcabe151e290b28db --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/seq_example_util.py @@ -0,0 +1,282 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common utility for object detection tf.train.SequenceExamples.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf + + +def context_float_feature(ndarray): + """Converts a numpy float array to a context float feature. + + Args: + ndarray: A numpy float array. + + Returns: + A context float feature. + """ + feature = tf.train.Feature() + for val in ndarray: + feature.float_list.value.append(val) + return feature + + +def context_int64_feature(ndarray): + """Converts a numpy array to a context int64 feature. + + Args: + ndarray: A numpy int64 array. + + Returns: + A context int64 feature. + """ + feature = tf.train.Feature() + for val in ndarray: + feature.int64_list.value.append(val) + return feature + + +def context_bytes_feature(ndarray): + """Converts a numpy bytes array to a context bytes feature. + + Args: + ndarray: A numpy bytes array. + + Returns: + A context bytes feature. + """ + feature = tf.train.Feature() + for val in ndarray: + if isinstance(val, np.ndarray): + val = val.tolist() + feature.bytes_list.value.append(tf.compat.as_bytes(val)) + return feature + + +def sequence_float_feature(ndarray): + """Converts a numpy float array to a sequence float feature. + + Args: + ndarray: A numpy float array. + + Returns: + A sequence float feature. + """ + feature_list = tf.train.FeatureList() + for row in ndarray: + feature = feature_list.feature.add() + if row.size: + feature.float_list.value[:] = row + return feature_list + + +def sequence_int64_feature(ndarray): + """Converts a numpy int64 array to a sequence int64 feature. + + Args: + ndarray: A numpy int64 array. + + Returns: + A sequence int64 feature. + """ + feature_list = tf.train.FeatureList() + for row in ndarray: + feature = feature_list.feature.add() + if row.size: + feature.int64_list.value[:] = row + return feature_list + + +def sequence_bytes_feature(ndarray): + """Converts a bytes float array to a sequence bytes feature. + + Args: + ndarray: A numpy bytes array. + + Returns: + A sequence bytes feature. + """ + feature_list = tf.train.FeatureList() + for row in ndarray: + if isinstance(row, np.ndarray): + row = row.tolist() + feature = feature_list.feature.add() + if row: + row = [tf.compat.as_bytes(val) for val in row] + feature.bytes_list.value[:] = row + return feature_list + + +def sequence_strings_feature(strings): + new_str_arr = [] + for single_str in strings: + new_str_arr.append(tf.train.Feature( + bytes_list=tf.train.BytesList( + value=[single_str.encode('utf8')]))) + return tf.train.FeatureList(feature=new_str_arr) + + +def boxes_to_box_components(bboxes): + """Converts a list of numpy arrays (boxes) to box components. + + Args: + bboxes: A numpy array of bounding boxes. + + Returns: + Bounding box component lists. + """ + ymin_list = [] + xmin_list = [] + ymax_list = [] + xmax_list = [] + for bbox in bboxes: + if bbox != []: # pylint: disable=g-explicit-bool-comparison + bbox = np.array(bbox).astype(np.float32) + ymin, xmin, ymax, xmax = np.split(bbox, 4, axis=1) + else: + ymin, xmin, ymax, xmax = [], [], [], [] + ymin_list.append(np.reshape(ymin, [-1])) + xmin_list.append(np.reshape(xmin, [-1])) + ymax_list.append(np.reshape(ymax, [-1])) + xmax_list.append(np.reshape(xmax, [-1])) + return ymin_list, xmin_list, ymax_list, xmax_list + + +def make_sequence_example(dataset_name, + video_id, + encoded_images, + image_height, + image_width, + image_format=None, + image_source_ids=None, + timestamps=None, + is_annotated=None, + bboxes=None, + label_strings=None, + detection_bboxes=None, + detection_classes=None, + detection_scores=None, + use_strs_for_source_id=False): + """Constructs tf.SequenceExamples. + + Args: + dataset_name: String with dataset name. + video_id: String with video id. + encoded_images: A [num_frames] list (or numpy array) of encoded image + frames. + image_height: Height of the images. + image_width: Width of the images. + image_format: Format of encoded images. + image_source_ids: (Optional) A [num_frames] list of unique string ids for + each image. + timestamps: (Optional) A [num_frames] list (or numpy array) array with image + timestamps. + is_annotated: (Optional) A [num_frames] list (or numpy array) array + in which each element indicates whether the frame has been annotated + (1) or not (0). + bboxes: (Optional) A list (with num_frames elements) of [num_boxes_i, 4] + numpy float32 arrays holding boxes for each frame. + label_strings: (Optional) A list (with num_frames_elements) of [num_boxes_i] + numpy string arrays holding object string labels for each frame. + detection_bboxes: (Optional) A list (with num_frames elements) of + [num_boxes_i, 4] numpy float32 arrays holding prediction boxes for each + frame. + detection_classes: (Optional) A list (with num_frames_elements) of + [num_boxes_i] numpy int64 arrays holding predicted classes for each frame. + detection_scores: (Optional) A list (with num_frames_elements) of + [num_boxes_i] numpy float32 arrays holding predicted object scores for + each frame. + use_strs_for_source_id: (Optional) Whether to write the source IDs as + strings rather than byte lists of characters. + + Returns: + A tf.train.SequenceExample. + """ + num_frames = len(encoded_images) + image_encoded = np.expand_dims(encoded_images, axis=-1) + if timestamps is None: + timestamps = np.arange(num_frames) + image_timestamps = np.expand_dims(timestamps, axis=-1) + + # Context fields. + context_dict = { + 'example/dataset_name': context_bytes_feature([dataset_name]), + 'clip/start/timestamp': context_int64_feature([image_timestamps[0][0]]), + 'clip/end/timestamp': context_int64_feature([image_timestamps[-1][0]]), + 'clip/frames': context_int64_feature([num_frames]), + 'image/channels': context_int64_feature([3]), + 'image/height': context_int64_feature([image_height]), + 'image/width': context_int64_feature([image_width]), + 'clip/media_id': context_bytes_feature([video_id]) + } + + # Sequence fields. + feature_list = { + 'image/encoded': sequence_bytes_feature(image_encoded), + 'image/timestamp': sequence_int64_feature(image_timestamps), + } + + # Add optional fields. + if image_format is not None: + context_dict['image/format'] = context_bytes_feature([image_format]) + if image_source_ids is not None: + if use_strs_for_source_id: + feature_list['image/source_id'] = sequence_strings_feature( + image_source_ids) + else: + feature_list['image/source_id'] = sequence_bytes_feature(image_source_ids) + if bboxes is not None: + bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax = boxes_to_box_components(bboxes) + feature_list['region/bbox/xmin'] = sequence_float_feature(bbox_xmin) + feature_list['region/bbox/xmax'] = sequence_float_feature(bbox_xmax) + feature_list['region/bbox/ymin'] = sequence_float_feature(bbox_ymin) + feature_list['region/bbox/ymax'] = sequence_float_feature(bbox_ymax) + if is_annotated is None: + is_annotated = np.ones(num_frames, dtype=np.int64) + is_annotated = np.expand_dims(is_annotated, axis=-1) + feature_list['region/is_annotated'] = sequence_int64_feature(is_annotated) + + if label_strings is not None: + feature_list['region/label/string'] = sequence_bytes_feature( + label_strings) + + if detection_bboxes is not None: + det_bbox_ymin, det_bbox_xmin, det_bbox_ymax, det_bbox_xmax = ( + boxes_to_box_components(detection_bboxes)) + feature_list['predicted/region/bbox/xmin'] = sequence_float_feature( + det_bbox_xmin) + feature_list['predicted/region/bbox/xmax'] = sequence_float_feature( + det_bbox_xmax) + feature_list['predicted/region/bbox/ymin'] = sequence_float_feature( + det_bbox_ymin) + feature_list['predicted/region/bbox/ymax'] = sequence_float_feature( + det_bbox_ymax) + if detection_classes is not None: + feature_list['predicted/region/label/index'] = sequence_int64_feature( + detection_classes) + if detection_scores is not None: + feature_list['predicted/region/label/confidence'] = sequence_float_feature( + detection_scores) + + context = tf.train.Features(feature=context_dict) + feature_lists = tf.train.FeatureLists(feature_list=feature_list) + + sequence_example = tf.train.SequenceExample( + context=context, + feature_lists=feature_lists) + return sequence_example diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/seq_example_util_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/seq_example_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6c037f8ace714705be757436642424fdeace27 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/seq_example_util_test.py @@ -0,0 +1,366 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.utils.seq_example_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import seq_example_util +from object_detection.utils import tf_version + + +class SeqExampleUtilTest(tf.test.TestCase): + + def materialize_tensors(self, list_of_tensors): + if tf_version.is_tf2(): + return [tensor.numpy() for tensor in list_of_tensors] + else: + with self.cached_session() as sess: + return sess.run(list_of_tensors) + + def test_make_unlabeled_example(self): + num_frames = 5 + image_height = 100 + image_width = 200 + dataset_name = b'unlabeled_dataset' + video_id = b'video_000' + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + image_source_ids = [str(idx) for idx in range(num_frames)] + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.materialize_tensors(encoded_images_list) + seq_example = seq_example_util.make_sequence_example( + dataset_name=dataset_name, + video_id=video_id, + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + image_format='JPEG', + image_source_ids=image_source_ids) + + context_feature_dict = seq_example.context.feature + self.assertEqual( + dataset_name, + context_feature_dict['example/dataset_name'].bytes_list.value[0]) + self.assertEqual( + 0, + context_feature_dict['clip/start/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames - 1, + context_feature_dict['clip/end/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames, + context_feature_dict['clip/frames'].int64_list.value[0]) + self.assertEqual( + 3, + context_feature_dict['image/channels'].int64_list.value[0]) + self.assertEqual( + b'JPEG', + context_feature_dict['image/format'].bytes_list.value[0]) + self.assertEqual( + image_height, + context_feature_dict['image/height'].int64_list.value[0]) + self.assertEqual( + image_width, + context_feature_dict['image/width'].int64_list.value[0]) + self.assertEqual( + video_id, + context_feature_dict['clip/media_id'].bytes_list.value[0]) + + seq_feature_dict = seq_example.feature_lists.feature_list + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + self.assertAllEqual(list(range(num_frames)), timestamps) + source_ids = [ + feature.bytes_list.value[0] for feature + in seq_feature_dict['image/source_id'].feature] + self.assertAllEqual( + [six.ensure_binary(str(idx)) for idx in range(num_frames)], + source_ids) + + def test_make_labeled_example(self): + num_frames = 3 + image_height = 100 + image_width = 200 + dataset_name = b'unlabeled_dataset' + video_id = b'video_000' + labels = [b'dog', b'cat', b'wolf'] + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.materialize_tensors(encoded_images_list) + timestamps = [100000, 110000, 120000] + is_annotated = [1, 0, 1] + bboxes = [ + np.array([[0., 0., 0., 0.], + [0., 0., 1., 1.]], dtype=np.float32), + np.zeros([0, 4], dtype=np.float32), + np.array([], dtype=np.float32) + ] + label_strings = [ + np.array(labels), + np.array([]), + np.array([]) + ] + + seq_example = seq_example_util.make_sequence_example( + dataset_name=dataset_name, + video_id=video_id, + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + timestamps=timestamps, + is_annotated=is_annotated, + bboxes=bboxes, + label_strings=label_strings) + + context_feature_dict = seq_example.context.feature + self.assertEqual( + dataset_name, + context_feature_dict['example/dataset_name'].bytes_list.value[0]) + self.assertEqual( + timestamps[0], + context_feature_dict['clip/start/timestamp'].int64_list.value[0]) + self.assertEqual( + timestamps[-1], + context_feature_dict['clip/end/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames, + context_feature_dict['clip/frames'].int64_list.value[0]) + + seq_feature_dict = seq_example.feature_lists.feature_list + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + actual_timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + self.assertAllEqual(timestamps, actual_timestamps) + # Frame 0. + self.assertAllEqual( + is_annotated[0], + seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 1.], + seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 1.], + seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + labels, + seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:]) + + # Frame 1. + self.assertAllEqual( + is_annotated[1], + seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [], + seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:]) + + def test_make_labeled_example_with_predictions(self): + num_frames = 2 + image_height = 100 + image_width = 200 + dataset_name = b'unlabeled_dataset' + video_id = b'video_000' + images = tf.cast(tf.random.uniform( + [num_frames, image_height, image_width, 3], + maxval=256, + dtype=tf.int32), dtype=tf.uint8) + images_list = tf.unstack(images, axis=0) + encoded_images_list = [tf.io.encode_jpeg(image) for image in images_list] + encoded_images = self.materialize_tensors(encoded_images_list) + bboxes = [ + np.array([[0., 0., 0.75, 0.75], + [0., 0., 1., 1.]], dtype=np.float32), + np.array([[0., 0.25, 0.5, 0.75]], dtype=np.float32) + ] + label_strings = [ + np.array(['cat', 'frog']), + np.array(['cat']) + ] + detection_bboxes = [ + np.array([[0., 0., 0.75, 0.75]], dtype=np.float32), + np.zeros([0, 4], dtype=np.float32) + ] + detection_classes = [ + np.array([5], dtype=np.int64), + np.array([], dtype=np.int64) + ] + detection_scores = [ + np.array([0.9], dtype=np.float32), + np.array([], dtype=np.float32) + ] + + seq_example = seq_example_util.make_sequence_example( + dataset_name=dataset_name, + video_id=video_id, + encoded_images=encoded_images, + image_height=image_height, + image_width=image_width, + bboxes=bboxes, + label_strings=label_strings, + detection_bboxes=detection_bboxes, + detection_classes=detection_classes, + detection_scores=detection_scores) + + context_feature_dict = seq_example.context.feature + self.assertEqual( + dataset_name, + context_feature_dict['example/dataset_name'].bytes_list.value[0]) + self.assertEqual( + 0, + context_feature_dict['clip/start/timestamp'].int64_list.value[0]) + self.assertEqual( + 1, + context_feature_dict['clip/end/timestamp'].int64_list.value[0]) + self.assertEqual( + num_frames, + context_feature_dict['clip/frames'].int64_list.value[0]) + + seq_feature_dict = seq_example.feature_lists.feature_list + self.assertLen( + seq_feature_dict['image/encoded'].feature[:], + num_frames) + actual_timestamps = [ + feature.int64_list.value[0] for feature + in seq_feature_dict['image/timestamp'].feature] + self.assertAllEqual([0, 1], actual_timestamps) + # Frame 0. + self.assertAllEqual( + 1, + seq_feature_dict['region/is_annotated'].feature[0].int64_list.value[0]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0., 0.], + seq_feature_dict['region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75, 1.], + seq_feature_dict['region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75, 1.], + seq_feature_dict['region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + [b'cat', b'frog'], + seq_feature_dict['region/label/string'].feature[0].bytes_list.value[:]) + self.assertAllClose( + [0.], + seq_feature_dict[ + 'predicted/region/bbox/ymin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.], + seq_feature_dict[ + 'predicted/region/bbox/xmin'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75], + seq_feature_dict[ + 'predicted/region/bbox/ymax'].feature[0].float_list.value[:]) + self.assertAllClose( + [0.75], + seq_feature_dict[ + 'predicted/region/bbox/xmax'].feature[0].float_list.value[:]) + self.assertAllEqual( + [5], + seq_feature_dict[ + 'predicted/region/label/index'].feature[0].int64_list.value[:]) + self.assertAllClose( + [0.9], + seq_feature_dict[ + 'predicted/region/label/confidence'].feature[0].float_list.value[:]) + + # Frame 1. + self.assertAllEqual( + 1, + seq_feature_dict['region/is_annotated'].feature[1].int64_list.value[0]) + self.assertAllClose( + [0.0], + seq_feature_dict['region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.25], + seq_feature_dict['region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.5], + seq_feature_dict['region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [0.75], + seq_feature_dict['region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [b'cat'], + seq_feature_dict['region/label/string'].feature[1].bytes_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/ymin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/xmin'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/ymax'].feature[1].float_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/bbox/xmax'].feature[1].float_list.value[:]) + self.assertAllEqual( + [], + seq_feature_dict[ + 'predicted/region/label/index'].feature[1].int64_list.value[:]) + self.assertAllClose( + [], + seq_feature_dict[ + 'predicted/region/label/confidence'].feature[1].float_list.value[:]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/tf_record_creation_util.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/tf_record_creation_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e54bcbcecdf95a9a4c524425ac612fcbe6eaeef3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/tf_record_creation_util.py @@ -0,0 +1,48 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Utilities for creating TFRecords of TF examples for the Open Images dataset. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + + +def open_sharded_output_tfrecords(exit_stack, base_path, num_shards): + """Opens all TFRecord shards for writing and adds them to an exit stack. + + Args: + exit_stack: A context2.ExitStack used to automatically closed the TFRecords + opened in this function. + base_path: The base path for all shards + num_shards: The number of shards + + Returns: + The list of opened TFRecords. Position k in the list corresponds to shard k. + """ + tf_record_output_filenames = [ + '{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards) + for idx in range(num_shards) + ] + + tfrecords = [ + exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name)) + for file_name in tf_record_output_filenames + ] + + return tfrecords diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/tf_record_creation_util_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/tf_record_creation_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5722c86472e617f5e2e2aba916ad9e90c418948b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dataset_tools/tf_record_creation_util_test.py @@ -0,0 +1,49 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tf_record_creation_util.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import contextlib2 +import six +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.dataset_tools import tf_record_creation_util + + +class OpenOutputTfrecordsTests(tf.test.TestCase): + + def test_sharded_tfrecord_writes(self): + with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, + os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10) + for idx in range(10): + output_tfrecords[idx].write(six.ensure_binary('test_{}'.format(idx))) + + for idx in range(10): + tf_record_path = '{}-{:05d}-of-00010'.format( + os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx) + records = list(tf.python_io.tf_record_iterator(tf_record_path)) + self.assertAllEqual(records, ['test_{}'.format(idx).encode('utf-8')]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/android/Dockerfile b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/android/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..470f669dccd057dcdbae0e929258fc0eb5f96703 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/android/Dockerfile @@ -0,0 +1,140 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# #========================================================================== + +# Pull TF nightly-devel docker image +FROM tensorflow/tensorflow:nightly-devel + +# Get the tensorflow models research directory, and move it into tensorflow +# source folder to match recommendation of installation +RUN git clone --depth 1 https://github.com/tensorflow/models.git && \ + mv models /tensorflow/models + + +# Install gcloud and gsutil commands +# https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu +RUN apt-get -y update && apt-get install -y gpg-agent && \ + export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ + echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + apt-get update -y && apt-get install google-cloud-sdk -y + + +# Install the Tensorflow Object Detection API from here +# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md + +# Install object detection api dependencies - use non-interactive mode to set +# default tzdata config during installation. +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get install -y protobuf-compiler python-pil python-lxml python-tk && \ + pip install Cython && \ + pip install contextlib2 && \ + pip install jupyter && \ + pip install matplotlib + +# Install pycocoapi +RUN git clone --depth 1 https://github.com/cocodataset/cocoapi.git && \ + cd cocoapi/PythonAPI && \ + make -j8 && \ + cp -r pycocotools /tensorflow/models/research && \ + cd ../../ && \ + rm -rf cocoapi + +# Get protoc 3.0.0, rather than the old version already in the container +RUN curl -OL "https://github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip" && \ + unzip protoc-3.0.0-linux-x86_64.zip -d proto3 && \ + mv proto3/bin/* /usr/local/bin && \ + mv proto3/include/* /usr/local/include && \ + rm -rf proto3 protoc-3.0.0-linux-x86_64.zip + +# Run protoc on the object detection repo +RUN cd /tensorflow/models/research && \ + protoc object_detection/protos/*.proto --python_out=. + +# Set the PYTHONPATH to finish installing the API +ENV PYTHONPATH $PYTHONPATH:/tensorflow/models/research:/tensorflow/models/research/slim + + +# Install wget (to make life easier below) and editors (to allow people to edit +# the files inside the container) +RUN apt-get install -y wget vim emacs nano + + +# Grab various data files which are used throughout the demo: dataset, +# pretrained model, and pretrained TensorFlow Lite model. Install these all in +# the same directories as recommended by the blog post. + +# Pets example dataset +RUN mkdir -p /tmp/pet_faces_tfrecord/ && \ + cd /tmp/pet_faces_tfrecord && \ + curl "http://download.tensorflow.org/models/object_detection/pet_faces_tfrecord.tar.gz" | tar xzf - + +# Pretrained model +# This one doesn't need its own directory, since it comes in a folder. +RUN cd /tmp && \ + curl -O "http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz" && \ + tar xzf ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz && \ + rm ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz + +# Trained TensorFlow Lite model. This should get replaced by one generated from +# export_tflite_ssd_graph.py when that command is called. +RUN cd /tmp && \ + curl -L -o tflite.zip \ + https://storage.googleapis.com/download.tensorflow.org/models/tflite/frozengraphs_ssd_mobilenet_v1_0.75_quant_pets_2018_06_29.zip && \ + unzip tflite.zip -d tflite && \ + rm tflite.zip + + +# Install Android development tools +# Inspired by the following sources: +# https://github.com/bitrise-docker/android/blob/master/Dockerfile +# https://github.com/reddit/docker-android-build/blob/master/Dockerfile + +# Set environment variables +ENV ANDROID_HOME /opt/android-sdk-linux +ENV ANDROID_NDK_HOME /opt/android-ndk-r14b +ENV PATH ${PATH}:${ANDROID_HOME}/tools:${ANDROID_HOME}/tools/bin:${ANDROID_HOME}/platform-tools + +# Install SDK tools +RUN cd /opt && \ + curl -OL https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip && \ + unzip sdk-tools-linux-4333796.zip -d ${ANDROID_HOME} && \ + rm sdk-tools-linux-4333796.zip + +# Accept licenses before installing components, no need to echo y for each component +# License is valid for all the standard components in versions installed from this file +# Non-standard components: MIPS system images, preview versions, GDK (Google Glass) and Android Google TV require separate licenses, not accepted there +RUN yes | sdkmanager --licenses + +# Install platform tools, SDK platform, and other build tools +RUN yes | sdkmanager \ + "tools" \ + "platform-tools" \ + "platforms;android-27" \ + "platforms;android-23" \ + "build-tools;27.0.3" \ + "build-tools;23.0.3" + +# Install Android NDK (r14b) +RUN cd /opt && \ + curl -L -o android-ndk.zip http://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ + unzip -q android-ndk.zip && \ + rm -f android-ndk.zip + +# Configure the build to use the things we just downloaded +RUN cd /tensorflow && \ + printf '\n\n\nn\ny\nn\nn\nn\ny\nn\nn\nn\nn\nn\nn\n\ny\n%s\n\n\n' ${ANDROID_HOME}|./configure + + +WORKDIR /tensorflow diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/android/README.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/android/README.md new file mode 100644 index 0000000000000000000000000000000000000000..69016cbb019fca4556b825262bc647c9ea6533fc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/android/README.md @@ -0,0 +1,69 @@ +# Dockerfile for the TPU and TensorFlow Lite Object Detection tutorial + +This Docker image automates the setup involved with training +object detection models on Google Cloud and building the Android TensorFlow Lite +demo app. We recommend using this container if you decide to work through our +tutorial on ["Training and serving a real-time mobile object detector in +30 minutes with Cloud TPUs"](https://medium.com/tensorflow/training-and-serving-a-realtime-mobile-object-detector-in-30-minutes-with-cloud-tpus-b78971cf1193), though of course it may be useful even if you would +like to use the Object Detection API outside the context of the tutorial. + +A couple words of warning: + +1. Docker containers do not have persistent storage. This means that any changes + you make to files inside the container will not persist if you restart + the container. When running through the tutorial, + **do not close the container**. +2. To be able to deploy the [Android app]( + https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/android) + (which you will build at the end of the tutorial), + you will need to kill any instances of `adb` running on the host machine. You + can accomplish this by closing all instances of Android Studio, and then + running `adb kill-server`. + +You can install Docker by following the [instructions here]( +https://docs.docker.com/install/). + +## Running The Container + +From this directory, build the Dockerfile as follows (this takes a while): + +``` +docker build --tag detect-tf . +``` + +Run the container: + +``` +docker run --rm -it --privileged -p 6006:6006 detect-tf +``` + +When running the container, you will find yourself inside the `/tensorflow` +directory, which is the path to the TensorFlow [source +tree](https://github.com/tensorflow/tensorflow). + +## Text Editing + +The tutorial also +requires you to occasionally edit files inside the source tree. +This Docker images comes with `vim`, `nano`, and `emacs` preinstalled for your +convenience. + +## What's In This Container + +This container is derived from the nightly build of TensorFlow, and contains the +sources for TensorFlow at `/tensorflow`, as well as the +[TensorFlow Models](https://github.com/tensorflow/models) which are available at +`/tensorflow/models` (and contain the Object Detection API as a subdirectory +at `/tensorflow/models/research/object_detection`). +The Oxford-IIIT Pets dataset, the COCO pre-trained SSD + MobileNet (v1) +checkpoint, and example +trained model are all available in `/tmp` in their respective folders. + +This container also has the `gsutil` and `gcloud` utilities, the `bazel` build +tool, and all dependencies necessary to use the Object Detection API, and +compile and install the TensorFlow Lite Android demo app. + +At various points throughout the tutorial, you may see references to the +*research directory*. This refers to the `research` folder within the +models repository, located at +`/tensorflow/models/research`. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf1/Dockerfile b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9d77523096ab12bbda292595c9ada1cd42c97500 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf1/Dockerfile @@ -0,0 +1,41 @@ +FROM tensorflow/tensorflow:1.15.2-gpu-py3 + +ARG DEBIAN_FRONTEND=noninteractive + +# Install apt dependencies +RUN apt-get update && apt-get install -y \ + git \ + gpg-agent \ + python3-cairocffi \ + protobuf-compiler \ + python3-pil \ + python3-lxml \ + python3-tk \ + wget + +# Install gcloud and gsutil commands +# https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu +RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ + echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + apt-get update -y && apt-get install google-cloud-sdk -y + +# Add new user to avoid running as root +RUN useradd -ms /bin/bash tensorflow +USER tensorflow +WORKDIR /home/tensorflow + +# Copy this version of of the model garden into the image +COPY --chown=tensorflow . /home/tensorflow/models + +# Compile protobuf configs +RUN (cd /home/tensorflow/models/research/ && protoc object_detection/protos/*.proto --python_out=.) +WORKDIR /home/tensorflow/models/research/ + +RUN cp object_detection/packages/tf1/setup.py ./ +ENV PATH="/home/tensorflow/.local/bin:${PATH}" + +RUN python -m pip install --user -U pip +RUN python -m pip install --user . + +ENV TF_CPP_MIN_LOG_LEVEL 3 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf1/README.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e4503ca0fa54edb745194f382b025f976f08d6f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf1/README.md @@ -0,0 +1,11 @@ +# TensorFlow Object Detection on Docker + +These instructions are experimental. + +## Building and running: + +```bash +# From the root of the git repository +docker build -f research/object_detection/dockerfiles/tf1/Dockerfile -t od . +docker run -it od +``` diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf2/Dockerfile b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..c4dfc6b23070bfffea7a07f5e23fe0bac0901c99 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf2/Dockerfile @@ -0,0 +1,41 @@ +FROM tensorflow/tensorflow:2.2.0-gpu + +ARG DEBIAN_FRONTEND=noninteractive + +# Install apt dependencies +RUN apt-get update && apt-get install -y \ + git \ + gpg-agent \ + python3-cairocffi \ + protobuf-compiler \ + python3-pil \ + python3-lxml \ + python3-tk \ + wget + +# Install gcloud and gsutil commands +# https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu +RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ + echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + apt-get update -y && apt-get install google-cloud-sdk -y + +# Add new user to avoid running as root +RUN useradd -ms /bin/bash tensorflow +USER tensorflow +WORKDIR /home/tensorflow + +# Copy this version of of the model garden into the image +COPY --chown=tensorflow . /home/tensorflow/models + +# Compile protobuf configs +RUN (cd /home/tensorflow/models/research/ && protoc object_detection/protos/*.proto --python_out=.) +WORKDIR /home/tensorflow/models/research/ + +RUN cp object_detection/packages/tf2/setup.py ./ +ENV PATH="/home/tensorflow/.local/bin:${PATH}" + +RUN python -m pip install -U pip +RUN python -m pip install . + +ENV TF_CPP_MIN_LOG_LEVEL 3 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf2/README.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..14b5184c55d801400f523c80d6d21b9c38960a26 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/dockerfiles/tf2/README.md @@ -0,0 +1,11 @@ +# TensorFlow Object Detection on Docker + +These instructions are experimental. + +## Building and running: + +```bash +# From the root of the git repository +docker build -f research/object_detection/dockerfiles/tf2/Dockerfile -t od . +docker run -it od +``` diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/eval_util.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/eval_util.py new file mode 100644 index 0000000000000000000000000000000000000000..0a44be4b95d232345333612569c71c93a062a37c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/eval_util.py @@ -0,0 +1,1206 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common utility functions for evaluation.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import re +import time + +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf + +import tf_slim as slim + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.metrics import lvis_evaluation +from object_detection.protos import eval_pb2 +from object_detection.utils import label_map_util +from object_detection.utils import object_detection_evaluation +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import visualization_utils as vis_utils + +EVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics' + +# A dictionary of metric names to classes that implement the metric. The classes +# in the dictionary must implement +# utils.object_detection_evaluation.DetectionEvaluator interface. +EVAL_METRICS_CLASS_DICT = { + 'coco_detection_metrics': + coco_evaluation.CocoDetectionEvaluator, + 'coco_keypoint_metrics': + coco_evaluation.CocoKeypointEvaluator, + 'coco_mask_metrics': + coco_evaluation.CocoMaskEvaluator, + 'coco_panoptic_metrics': + coco_evaluation.CocoPanopticSegmentationEvaluator, + 'lvis_mask_metrics': + lvis_evaluation.LVISMaskEvaluator, + 'oid_challenge_detection_metrics': + object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, + 'oid_challenge_segmentation_metrics': + object_detection_evaluation + .OpenImagesInstanceSegmentationChallengeEvaluator, + 'pascal_voc_detection_metrics': + object_detection_evaluation.PascalDetectionEvaluator, + 'weighted_pascal_voc_detection_metrics': + object_detection_evaluation.WeightedPascalDetectionEvaluator, + 'precision_at_recall_detection_metrics': + object_detection_evaluation.PrecisionAtRecallDetectionEvaluator, + 'pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.PascalInstanceSegmentationEvaluator, + 'weighted_pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, + 'oid_V2_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, +} + +EVAL_DEFAULT_METRIC = 'coco_detection_metrics' + + +def write_metrics(metrics, global_step, summary_dir): + """Write metrics to a summary directory. + + Args: + metrics: A dictionary containing metric names and values. + global_step: Global step at which the metrics are computed. + summary_dir: Directory to write tensorflow summaries to. + """ + tf.logging.info('Writing metrics to tf summary.') + summary_writer = tf.summary.FileWriterCache.get(summary_dir) + for key in sorted(metrics): + summary = tf.Summary(value=[ + tf.Summary.Value(tag=key, simple_value=metrics[key]), + ]) + summary_writer.add_summary(summary, global_step) + tf.logging.info('%s: %f', key, metrics[key]) + tf.logging.info('Metrics written to tf summary.') + + +# TODO(rathodv): Add tests. +def visualize_detection_results(result_dict, + tag, + global_step, + categories, + summary_dir='', + export_dir='', + agnostic_mode=False, + show_groundtruth=False, + groundtruth_box_visualization_color='black', + min_score_thresh=.5, + max_num_predictions=20, + skip_scores=False, + skip_labels=False, + keep_image_id_for_visualization_export=False): + """Visualizes detection results and writes visualizations to image summaries. + + This function visualizes an image with its detected bounding boxes and writes + to image summaries which can be viewed on tensorboard. It optionally also + writes images to a directory. In the case of missing entry in the label map, + unknown class name in the visualization is shown as "N/A". + + Args: + result_dict: a dictionary holding groundtruth and detection + data corresponding to each image being evaluated. The following keys + are required: + 'original_image': a numpy array representing the image with shape + [1, height, width, 3] or [1, height, width, 1] + 'detection_boxes': a numpy array of shape [N, 4] + 'detection_scores': a numpy array of shape [N] + 'detection_classes': a numpy array of shape [N] + The following keys are optional: + 'groundtruth_boxes': a numpy array of shape [N, 4] + 'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2] + Detections are assumed to be provided in decreasing order of score and for + display, and we assume that scores are probabilities between 0 and 1. + tag: tensorboard tag (string) to associate with image. + global_step: global step at which the visualization are generated. + categories: a list of dictionaries representing all possible categories. + Each dict in this list has the following keys: + 'id': (required) an integer id uniquely identifying this category + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza' + 'supercategory': (optional) string representing the supercategory + e.g., 'animal', 'vehicle', 'food', etc + summary_dir: the output directory to which the image summaries are written. + export_dir: the output directory to which images are written. If this is + empty (default), then images are not exported. + agnostic_mode: boolean (default: False) controlling whether to evaluate in + class-agnostic mode or not. + show_groundtruth: boolean (default: False) controlling whether to show + groundtruth boxes in addition to detected boxes + groundtruth_box_visualization_color: box color for visualizing groundtruth + boxes + min_score_thresh: minimum score threshold for a box to be visualized + max_num_predictions: maximum number of detections to visualize + skip_scores: whether to skip score when drawing a single detection + skip_labels: whether to skip label when drawing a single detection + keep_image_id_for_visualization_export: whether to keep image identifier in + filename when exported to export_dir + Raises: + ValueError: if result_dict does not contain the expected keys (i.e., + 'original_image', 'detection_boxes', 'detection_scores', + 'detection_classes') + """ + detection_fields = fields.DetectionResultFields + input_fields = fields.InputDataFields + if not set([ + input_fields.original_image, + detection_fields.detection_boxes, + detection_fields.detection_scores, + detection_fields.detection_classes, + ]).issubset(set(result_dict.keys())): + raise ValueError('result_dict does not contain all expected keys.') + if show_groundtruth and input_fields.groundtruth_boxes not in result_dict: + raise ValueError('If show_groundtruth is enabled, result_dict must contain ' + 'groundtruth_boxes.') + tf.logging.info('Creating detection visualizations.') + category_index = label_map_util.create_category_index(categories) + + image = np.squeeze(result_dict[input_fields.original_image], axis=0) + if image.shape[2] == 1: # If one channel image, repeat in RGB. + image = np.tile(image, [1, 1, 3]) + detection_boxes = result_dict[detection_fields.detection_boxes] + detection_scores = result_dict[detection_fields.detection_scores] + detection_classes = np.int32((result_dict[ + detection_fields.detection_classes])) + detection_keypoints = result_dict.get(detection_fields.detection_keypoints) + detection_masks = result_dict.get(detection_fields.detection_masks) + detection_boundaries = result_dict.get(detection_fields.detection_boundaries) + + # Plot groundtruth underneath detections + if show_groundtruth: + groundtruth_boxes = result_dict[input_fields.groundtruth_boxes] + groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints) + vis_utils.visualize_boxes_and_labels_on_image_array( + image=image, + boxes=groundtruth_boxes, + classes=None, + scores=None, + category_index=category_index, + keypoints=groundtruth_keypoints, + use_normalized_coordinates=False, + max_boxes_to_draw=None, + groundtruth_box_visualization_color=groundtruth_box_visualization_color) + vis_utils.visualize_boxes_and_labels_on_image_array( + image, + detection_boxes, + detection_classes, + detection_scores, + category_index, + instance_masks=detection_masks, + instance_boundaries=detection_boundaries, + keypoints=detection_keypoints, + use_normalized_coordinates=False, + max_boxes_to_draw=max_num_predictions, + min_score_thresh=min_score_thresh, + agnostic_mode=agnostic_mode, + skip_scores=skip_scores, + skip_labels=skip_labels) + + if export_dir: + if keep_image_id_for_visualization_export and result_dict[fields. + InputDataFields() + .key]: + export_path = os.path.join(export_dir, 'export-{}-{}.png'.format( + tag, result_dict[fields.InputDataFields().key])) + else: + export_path = os.path.join(export_dir, 'export-{}.png'.format(tag)) + vis_utils.save_image_array_as_png(image, export_path) + + summary = tf.Summary(value=[ + tf.Summary.Value( + tag=tag, + image=tf.Summary.Image( + encoded_image_string=vis_utils.encode_image_array_as_png_str( + image))) + ]) + summary_writer = tf.summary.FileWriterCache.get(summary_dir) + summary_writer.add_summary(summary, global_step) + + tf.logging.info('Detection visualizations written to summary with tag %s.', + tag) + + +def _run_checkpoint_once(tensor_dict, + evaluators=None, + batch_processor=None, + checkpoint_dirs=None, + variables_to_restore=None, + restore_fn=None, + num_batches=1, + master='', + save_graph=False, + save_graph_dir='', + losses_dict=None, + eval_export_path=None, + process_metrics_fn=None): + """Evaluates metrics defined in evaluators and returns summaries. + + This function loads the latest checkpoint in checkpoint_dirs and evaluates + all metrics defined in evaluators. The metrics are processed in batch by the + batch_processor. + + Args: + tensor_dict: a dictionary holding tensors representing a batch of detections + and corresponding groundtruth annotations. + evaluators: a list of object of type DetectionEvaluator to be used for + evaluation. Note that the metric names produced by different evaluators + must be unique. + batch_processor: a function taking four arguments: + 1. tensor_dict: the same tensor_dict that is passed in as the first + argument to this function. + 2. sess: a tensorflow session + 3. batch_index: an integer representing the index of the batch amongst + all batches + By default, batch_processor is None, which defaults to running: + return sess.run(tensor_dict) + To skip an image, it suffices to return an empty dictionary in place of + result_dict. + checkpoint_dirs: list of directories to load into an EnsembleModel. If it + has only one directory, EnsembleModel will not be used -- + a DetectionModel + will be instantiated directly. Not used if restore_fn is set. + variables_to_restore: None, or a dictionary mapping variable names found in + a checkpoint to model variables. The dictionary would normally be + generated by creating a tf.train.ExponentialMovingAverage object and + calling its variables_to_restore() method. Not used if restore_fn is set. + restore_fn: None, or a function that takes a tf.Session object and correctly + restores all necessary variables from the correct checkpoint file. If + None, attempts to restore from the first directory in checkpoint_dirs. + num_batches: the number of batches to use for evaluation. + master: the location of the Tensorflow session. + save_graph: whether or not the Tensorflow graph is stored as a pbtxt file. + save_graph_dir: where to store the Tensorflow graph on disk. If save_graph + is True this must be non-empty. + losses_dict: optional dictionary of scalar detection losses. + eval_export_path: Path for saving a json file that contains the detection + results in json format. + process_metrics_fn: a callback called with evaluation results after each + evaluation is done. It could be used e.g. to back up checkpoints with + best evaluation scores, or to call an external system to update evaluation + results in order to drive best hyper-parameter search. Parameters are: + int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics, + str checkpoint_file path. + + Returns: + global_step: the count of global steps. + all_evaluator_metrics: A dictionary containing metric names and values. + + Raises: + ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least + one element. + ValueError: if save_graph is True and save_graph_dir is not defined. + """ + if save_graph and not save_graph_dir: + raise ValueError('`save_graph_dir` must be defined.') + sess = tf.Session(master, graph=tf.get_default_graph()) + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + sess.run(tf.tables_initializer()) + checkpoint_file = None + if restore_fn: + restore_fn(sess) + else: + if not checkpoint_dirs: + raise ValueError('`checkpoint_dirs` must have at least one entry.') + checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0]) + saver = tf.train.Saver(variables_to_restore) + saver.restore(sess, checkpoint_file) + + if save_graph: + tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt') + + counters = {'skipped': 0, 'success': 0} + aggregate_result_losses_dict = collections.defaultdict(list) + with slim.queues.QueueRunners(sess): + try: + for batch in range(int(num_batches)): + if (batch + 1) % 100 == 0: + tf.logging.info('Running eval ops batch %d/%d', batch + 1, + num_batches) + if not batch_processor: + try: + if not losses_dict: + losses_dict = {} + result_dict, result_losses_dict = sess.run([tensor_dict, + losses_dict]) + counters['success'] += 1 + except tf.errors.InvalidArgumentError: + tf.logging.info('Skipping image') + counters['skipped'] += 1 + result_dict = {} + else: + result_dict, result_losses_dict = batch_processor( + tensor_dict, sess, batch, counters, losses_dict=losses_dict) + if not result_dict: + continue + for key, value in iter(result_losses_dict.items()): + aggregate_result_losses_dict[key].append(value) + for evaluator in evaluators: + # TODO(b/65130867): Use image_id tensor once we fix the input data + # decoders to return correct image_id. + # TODO(akuznetsa): result_dict contains batches of images, while + # add_single_ground_truth_image_info expects a single image. Fix + if (isinstance(result_dict, dict) and + fields.InputDataFields.key in result_dict and + result_dict[fields.InputDataFields.key]): + image_id = result_dict[fields.InputDataFields.key] + else: + image_id = batch + evaluator.add_single_ground_truth_image_info( + image_id=image_id, groundtruth_dict=result_dict) + evaluator.add_single_detected_image_info( + image_id=image_id, detections_dict=result_dict) + tf.logging.info('Running eval batches done.') + except tf.errors.OutOfRangeError: + tf.logging.info('Done evaluating -- epoch limit reached') + finally: + # When done, ask the threads to stop. + tf.logging.info('# success: %d', counters['success']) + tf.logging.info('# skipped: %d', counters['skipped']) + all_evaluator_metrics = {} + if eval_export_path and eval_export_path is not None: + for evaluator in evaluators: + if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or + isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)): + tf.logging.info('Started dumping to json file.') + evaluator.dump_detections_to_json_file( + json_output_path=eval_export_path) + tf.logging.info('Finished dumping to json file.') + for evaluator in evaluators: + metrics = evaluator.evaluate() + evaluator.clear() + if any(key in all_evaluator_metrics for key in metrics): + raise ValueError('Metric names between evaluators must not collide.') + all_evaluator_metrics.update(metrics) + global_step = tf.train.global_step(sess, tf.train.get_global_step()) + + for key, value in iter(aggregate_result_losses_dict.items()): + all_evaluator_metrics['Losses/' + key] = np.mean(value) + if process_metrics_fn and checkpoint_file: + m = re.search(r'model.ckpt-(\d+)$', checkpoint_file) + if not m: + tf.logging.error('Failed to parse checkpoint number from: %s', + checkpoint_file) + else: + checkpoint_number = int(m.group(1)) + process_metrics_fn(checkpoint_number, all_evaluator_metrics, + checkpoint_file) + sess.close() + return (global_step, all_evaluator_metrics) + + +# TODO(rathodv): Add tests. +def repeated_checkpoint_run(tensor_dict, + summary_dir, + evaluators, + batch_processor=None, + checkpoint_dirs=None, + variables_to_restore=None, + restore_fn=None, + num_batches=1, + eval_interval_secs=120, + max_number_of_evaluations=None, + max_evaluation_global_step=None, + master='', + save_graph=False, + save_graph_dir='', + losses_dict=None, + eval_export_path=None, + process_metrics_fn=None): + """Periodically evaluates desired tensors using checkpoint_dirs or restore_fn. + + This function repeatedly loads a checkpoint and evaluates a desired + set of tensors (provided by tensor_dict) and hands the resulting numpy + arrays to a function result_processor which can be used to further + process/save/visualize the results. + + Args: + tensor_dict: a dictionary holding tensors representing a batch of detections + and corresponding groundtruth annotations. + summary_dir: a directory to write metrics summaries. + evaluators: a list of object of type DetectionEvaluator to be used for + evaluation. Note that the metric names produced by different evaluators + must be unique. + batch_processor: a function taking three arguments: + 1. tensor_dict: the same tensor_dict that is passed in as the first + argument to this function. + 2. sess: a tensorflow session + 3. batch_index: an integer representing the index of the batch amongst + all batches + By default, batch_processor is None, which defaults to running: + return sess.run(tensor_dict) + checkpoint_dirs: list of directories to load into a DetectionModel or an + EnsembleModel if restore_fn isn't set. Also used to determine when to run + next evaluation. Must have at least one element. + variables_to_restore: None, or a dictionary mapping variable names found in + a checkpoint to model variables. The dictionary would normally be + generated by creating a tf.train.ExponentialMovingAverage object and + calling its variables_to_restore() method. Not used if restore_fn is set. + restore_fn: a function that takes a tf.Session object and correctly restores + all necessary variables from the correct checkpoint file. + num_batches: the number of batches to use for evaluation. + eval_interval_secs: the number of seconds between each evaluation run. + max_number_of_evaluations: the max number of iterations of the evaluation. + If the value is left as None the evaluation continues indefinitely. + max_evaluation_global_step: global step when evaluation stops. + master: the location of the Tensorflow session. + save_graph: whether or not the Tensorflow graph is saved as a pbtxt file. + save_graph_dir: where to save on disk the Tensorflow graph. If store_graph + is True this must be non-empty. + losses_dict: optional dictionary of scalar detection losses. + eval_export_path: Path for saving a json file that contains the detection + results in json format. + process_metrics_fn: a callback called with evaluation results after each + evaluation is done. It could be used e.g. to back up checkpoints with + best evaluation scores, or to call an external system to update evaluation + results in order to drive best hyper-parameter search. Parameters are: + int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics, + str checkpoint_file path. + + Returns: + metrics: A dictionary containing metric names and values in the latest + evaluation. + + Raises: + ValueError: if max_num_of_evaluations is not None or a positive number. + ValueError: if checkpoint_dirs doesn't have at least one element. + """ + if max_number_of_evaluations and max_number_of_evaluations <= 0: + raise ValueError( + '`max_number_of_evaluations` must be either None or a positive number.') + if max_evaluation_global_step and max_evaluation_global_step <= 0: + raise ValueError( + '`max_evaluation_global_step` must be either None or positive.') + + if not checkpoint_dirs: + raise ValueError('`checkpoint_dirs` must have at least one entry.') + + last_evaluated_model_path = None + number_of_evaluations = 0 + while True: + start = time.time() + tf.logging.info('Starting evaluation at ' + time.strftime( + '%Y-%m-%d-%H:%M:%S', time.gmtime())) + model_path = tf.train.latest_checkpoint(checkpoint_dirs[0]) + if not model_path: + tf.logging.info('No model found in %s. Will try again in %d seconds', + checkpoint_dirs[0], eval_interval_secs) + elif model_path == last_evaluated_model_path: + tf.logging.info('Found already evaluated checkpoint. Will try again in ' + '%d seconds', eval_interval_secs) + else: + last_evaluated_model_path = model_path + global_step, metrics = _run_checkpoint_once( + tensor_dict, + evaluators, + batch_processor, + checkpoint_dirs, + variables_to_restore, + restore_fn, + num_batches, + master, + save_graph, + save_graph_dir, + losses_dict=losses_dict, + eval_export_path=eval_export_path, + process_metrics_fn=process_metrics_fn) + write_metrics(metrics, global_step, summary_dir) + if (max_evaluation_global_step and + global_step >= max_evaluation_global_step): + tf.logging.info('Finished evaluation!') + break + number_of_evaluations += 1 + + if (max_number_of_evaluations and + number_of_evaluations >= max_number_of_evaluations): + tf.logging.info('Finished evaluation!') + break + time_to_next_eval = start + eval_interval_secs - time.time() + if time_to_next_eval > 0: + time.sleep(time_to_next_eval) + + return metrics + + +def _scale_box_to_absolute(args): + boxes, image_shape = args + return box_list_ops.to_absolute_coordinates( + box_list.BoxList(boxes), image_shape[0], image_shape[1]).get() + + +def _resize_detection_masks(arg_tuple): + """Resizes detection masks. + + Args: + arg_tuple: A (detection_boxes, detection_masks, image_shape, pad_shape) + tuple where + detection_boxes is a tf.float32 tensor of size [num_masks, 4] containing + the box corners. Row i contains [ymin, xmin, ymax, xmax] of the box + corresponding to mask i. Note that the box corners are in + normalized coordinates. + detection_masks is a tensor of size + [num_masks, mask_height, mask_width]. + image_shape is a tensor of shape [2] + pad_shape is a tensor of shape [2] --- this is assumed to be greater + than or equal to image_shape along both dimensions and represents a + shape to-be-padded-to. + + Returns: + """ + detection_boxes, detection_masks, image_shape, pad_shape = arg_tuple + detection_masks_reframed = ops.reframe_box_masks_to_image_masks( + detection_masks, detection_boxes, image_shape[0], image_shape[1]) + paddings = tf.concat( + [tf.zeros([3, 1], dtype=tf.int32), + tf.expand_dims( + tf.concat([tf.zeros([1], dtype=tf.int32), + pad_shape-image_shape], axis=0), + 1)], axis=1) + detection_masks_reframed = tf.pad(detection_masks_reframed, paddings) + + # If the masks are currently float, binarize them. Otherwise keep them as + # integers, since they have already been thresholded. + if detection_masks_reframed.dtype == tf.float32: + detection_masks_reframed = tf.greater(detection_masks_reframed, 0.5) + return tf.cast(detection_masks_reframed, tf.uint8) + + +def resize_detection_masks(detection_boxes, detection_masks, + original_image_spatial_shapes): + """Resizes per-box detection masks to be relative to the entire image. + + Note that this function only works when the spatial size of all images in + the batch is the same. If not, this function should be used with batch_size=1. + + Args: + detection_boxes: A [batch_size, num_instances, 4] float tensor containing + bounding boxes. + detection_masks: A [batch_size, num_instances, height, width] float tensor + containing binary instance masks per box. + original_image_spatial_shapes: a [batch_size, 3] shaped int tensor + holding the spatial dimensions of each image in the batch. + Returns: + masks: Masks resized to the spatial extents given by + (original_image_spatial_shapes[0, 0], original_image_spatial_shapes[0, 1]) + """ + # modify original image spatial shapes to be max along each dim + # in evaluator, should have access to original_image_spatial_shape field + # in add_Eval_Dict + max_spatial_shape = tf.reduce_max( + original_image_spatial_shapes, axis=0, keep_dims=True) + tiled_max_spatial_shape = tf.tile( + max_spatial_shape, + multiples=[tf.shape(original_image_spatial_shapes)[0], 1]) + return shape_utils.static_or_dynamic_map_fn( + _resize_detection_masks, + elems=[detection_boxes, + detection_masks, + original_image_spatial_shapes, + tiled_max_spatial_shape], + dtype=tf.uint8) + + +def _resize_groundtruth_masks(args): + """Resizes groundtruth masks to the original image size.""" + mask, true_image_shape, original_image_shape, pad_shape = args + true_height = true_image_shape[0] + true_width = true_image_shape[1] + mask = mask[:, :true_height, :true_width] + mask = tf.expand_dims(mask, 3) + mask = tf.image.resize_images( + mask, + original_image_shape, + method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, + align_corners=True) + + paddings = tf.concat( + [tf.zeros([3, 1], dtype=tf.int32), + tf.expand_dims( + tf.concat([tf.zeros([1], dtype=tf.int32), + pad_shape-original_image_shape], axis=0), + 1)], axis=1) + mask = tf.pad(tf.squeeze(mask, 3), paddings) + return tf.cast(mask, tf.uint8) + + +def _resize_surface_coordinate_masks(args): + detection_boxes, surface_coords, image_shape = args + surface_coords_v, surface_coords_u = tf.unstack(surface_coords, axis=-1) + surface_coords_v_reframed = ops.reframe_box_masks_to_image_masks( + surface_coords_v, detection_boxes, image_shape[0], image_shape[1]) + surface_coords_u_reframed = ops.reframe_box_masks_to_image_masks( + surface_coords_u, detection_boxes, image_shape[0], image_shape[1]) + return tf.stack([surface_coords_v_reframed, surface_coords_u_reframed], + axis=-1) + + +def _scale_keypoint_to_absolute(args): + keypoints, image_shape = args + return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1]) + + +def result_dict_for_single_example(image, + key, + detections, + groundtruth=None, + class_agnostic=False, + scale_to_absolute=False): + """Merges all detection and groundtruth information for a single example. + + Note that evaluation tools require classes that are 1-indexed, and so this + function performs the offset. If `class_agnostic` is True, all output classes + have label 1. + + Args: + image: A single 4D uint8 image tensor of shape [1, H, W, C]. + key: A single string tensor identifying the image. + detections: A dictionary of detections, returned from + DetectionModel.postprocess(). + groundtruth: (Optional) Dictionary of groundtruth items, with fields: + 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in + normalized coordinates. + 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. + 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional) + 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional) + 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional) + 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional) + 'groundtruth_instance_masks': 3D int64 tensor of instance masks + (Optional). + 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with + keypoints (Optional). + class_agnostic: Boolean indicating whether the detections are class-agnostic + (i.e. binary). Default False. + scale_to_absolute: Boolean indicating whether boxes and keypoints should be + scaled to absolute coordinates. Note that for IoU based evaluations, it + does not matter whether boxes are expressed in absolute or relative + coordinates. Default False. + + Returns: + A dictionary with: + 'original_image': A [1, H, W, C] uint8 image tensor. + 'key': A string tensor with image identifier. + 'detection_boxes': [max_detections, 4] float32 tensor of boxes, in + normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. + 'detection_scores': [max_detections] float32 tensor of scores. + 'detection_classes': [max_detections] int64 tensor of 1-indexed classes. + 'detection_masks': [max_detections, H, W] float32 tensor of binarized + masks, reframed to full image masks. + 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in + normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. (Optional) + 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. + (Optional) + 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional) + 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional) + 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional) + 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional) + 'groundtruth_instance_masks': 3D int64 tensor of instance masks + (Optional). + 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with + keypoints (Optional). + """ + + if groundtruth: + max_gt_boxes = tf.shape( + groundtruth[fields.InputDataFields.groundtruth_boxes])[0] + for gt_key in groundtruth: + # expand groundtruth dict along the batch dimension. + groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0) + + for detection_key in detections: + detections[detection_key] = tf.expand_dims( + detections[detection_key][0], axis=0) + + batched_output_dict = result_dict_for_batched_example( + image, + tf.expand_dims(key, 0), + detections, + groundtruth, + class_agnostic, + scale_to_absolute, + max_gt_boxes=max_gt_boxes) + + exclude_keys = [ + fields.InputDataFields.original_image, + fields.DetectionResultFields.num_detections, + fields.InputDataFields.num_groundtruth_boxes + ] + + output_dict = { + fields.InputDataFields.original_image: + batched_output_dict[fields.InputDataFields.original_image] + } + + for key in batched_output_dict: + # remove the batch dimension. + if key not in exclude_keys: + output_dict[key] = tf.squeeze(batched_output_dict[key], 0) + return output_dict + + +def result_dict_for_batched_example(images, + keys, + detections, + groundtruth=None, + class_agnostic=False, + scale_to_absolute=False, + original_image_spatial_shapes=None, + true_image_shapes=None, + max_gt_boxes=None): + """Merges all detection and groundtruth information for a single example. + + Note that evaluation tools require classes that are 1-indexed, and so this + function performs the offset. If `class_agnostic` is True, all output classes + have label 1. + The groundtruth coordinates of boxes/keypoints in 'groundtruth' dictionary are + normalized relative to the (potentially padded) input image, while the + coordinates in 'detection' dictionary are normalized relative to the true + image shape. + + Args: + images: A single 4D uint8 image tensor of shape [batch_size, H, W, C]. + keys: A [batch_size] string/int tensor with image identifier. + detections: A dictionary of detections, returned from + DetectionModel.postprocess(). + groundtruth: (Optional) Dictionary of groundtruth items, with fields: + 'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor + of boxes, in normalized coordinates. + 'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of + 1-indexed classes. + 'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of + bbox area. (Optional) + 'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64 + tensor. (Optional) + 'groundtruth_difficult': [batch_size, max_number_of_boxes] int64 + tensor. (Optional) + 'groundtruth_group_of': [batch_size, max_number_of_boxes] int64 + tensor. (Optional) + 'groundtruth_instance_masks': 4D int64 tensor of instance + masks (Optional). + 'groundtruth_keypoints': [batch_size, max_number_of_boxes, num_keypoints, + 2] float32 tensor with keypoints (Optional). + 'groundtruth_keypoint_visibilities': [batch_size, max_number_of_boxes, + num_keypoints] bool tensor with keypoint visibilities (Optional). + 'groundtruth_labeled_classes': [batch_size, num_classes] int64 + tensor of 1-indexed classes. (Optional) + 'groundtruth_dp_num_points': [batch_size, max_number_of_boxes] int32 + tensor. (Optional) + 'groundtruth_dp_part_ids': [batch_size, max_number_of_boxes, + max_sampled_points] int32 tensor. (Optional) + 'groundtruth_dp_surface_coords_list': [batch_size, max_number_of_boxes, + max_sampled_points, 4] float32 tensor. (Optional) + class_agnostic: Boolean indicating whether the detections are class-agnostic + (i.e. binary). Default False. + scale_to_absolute: Boolean indicating whether boxes and keypoints should be + scaled to absolute coordinates. Note that for IoU based evaluations, it + does not matter whether boxes are expressed in absolute or relative + coordinates. Default False. + original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2] + used to resize the image. When set to None, the image size is retained. + true_image_shapes: A 2D int32 tensor of shape [batch_size, 3] + containing the size of the unpadded original_image. + max_gt_boxes: [batch_size] tensor representing the maximum number of + groundtruth boxes to pad. + + Returns: + A dictionary with: + 'original_image': A [batch_size, H, W, C] uint8 image tensor. + 'original_image_spatial_shape': A [batch_size, 2] tensor containing the + original image sizes. + 'true_image_shape': A [batch_size, 3] tensor containing the size of + the unpadded original_image. + 'key': A [batch_size] string tensor with image identifier. + 'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes, + in normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. + 'detection_scores': [batch_size, max_detections] float32 tensor of scores. + 'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed + classes. + 'detection_masks': [batch_size, max_detections, H, W] uint8 tensor of + instance masks, reframed to full image masks. Note that these may be + binarized (e.g. {0, 1}), or may contain 1-indexed part labels. (Optional) + 'detection_keypoints': [batch_size, max_detections, num_keypoints, 2] + float32 tensor containing keypoint coordinates. (Optional) + 'detection_keypoint_scores': [batch_size, max_detections, num_keypoints] + float32 tensor containing keypoint scores. (Optional) + 'detection_surface_coords': [batch_size, max_detection, H, W, 2] float32 + tensor with normalized surface coordinates (e.g. DensePose UV + coordinates). (Optional) + 'num_detections': [batch_size] int64 tensor containing number of valid + detections. + 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in + normalized or absolute coordinates, depending on the value of + `scale_to_absolute`. (Optional) + 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed + classes. (Optional) + 'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox + area. (Optional) + 'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional) + 'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional) + 'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional) + 'groundtruth_instance_masks': 4D int64 tensor of instance masks + (Optional). + 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 + tensor with keypoints (Optional). + 'groundtruth_keypoint_visibilities': [batch_size, num_boxes, num_keypoints] + bool tensor with keypoint visibilities (Optional). + 'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor + of 1-indexed classes. (Optional) + 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number + of groundtruth boxes per image. + + Raises: + ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape + [2]. + ValueError: if true_image_shapes is not 2D int32 tensor of shape + [3]. + """ + label_id_offset = 1 # Applying label id offset (b/63711816) + + input_data_fields = fields.InputDataFields + if original_image_spatial_shapes is None: + original_image_spatial_shapes = tf.tile( + tf.expand_dims(tf.shape(images)[1:3], axis=0), + multiples=[tf.shape(images)[0], 1]) + else: + if (len(original_image_spatial_shapes.shape) != 2 and + original_image_spatial_shapes.shape[1] != 2): + raise ValueError( + '`original_image_spatial_shape` should be a 2D tensor of shape ' + '[batch_size, 2].') + + if true_image_shapes is None: + true_image_shapes = tf.tile( + tf.expand_dims(tf.shape(images)[1:4], axis=0), + multiples=[tf.shape(images)[0], 1]) + else: + if (len(true_image_shapes.shape) != 2 + and true_image_shapes.shape[1] != 3): + raise ValueError('`true_image_shapes` should be a 2D tensor of ' + 'shape [batch_size, 3].') + + output_dict = { + input_data_fields.original_image: + images, + input_data_fields.key: + keys, + input_data_fields.original_image_spatial_shape: ( + original_image_spatial_shapes), + input_data_fields.true_image_shape: + true_image_shapes + } + + detection_fields = fields.DetectionResultFields + detection_boxes = detections[detection_fields.detection_boxes] + detection_scores = detections[detection_fields.detection_scores] + num_detections = tf.cast(detections[detection_fields.num_detections], + dtype=tf.int32) + + if class_agnostic: + detection_classes = tf.ones_like(detection_scores, dtype=tf.int64) + else: + detection_classes = ( + tf.to_int64(detections[detection_fields.detection_classes]) + + label_id_offset) + + if scale_to_absolute: + output_dict[detection_fields.detection_boxes] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_box_to_absolute, + elems=[detection_boxes, original_image_spatial_shapes], + dtype=tf.float32)) + else: + output_dict[detection_fields.detection_boxes] = detection_boxes + output_dict[detection_fields.detection_classes] = detection_classes + output_dict[detection_fields.detection_scores] = detection_scores + output_dict[detection_fields.num_detections] = num_detections + + if detection_fields.detection_masks in detections: + detection_masks = detections[detection_fields.detection_masks] + output_dict[detection_fields.detection_masks] = resize_detection_masks( + detection_boxes, detection_masks, original_image_spatial_shapes) + + if detection_fields.detection_surface_coords in detections: + detection_surface_coords = detections[ + detection_fields.detection_surface_coords] + output_dict[detection_fields.detection_surface_coords] = ( + shape_utils.static_or_dynamic_map_fn( + _resize_surface_coordinate_masks, + elems=[detection_boxes, detection_surface_coords, + original_image_spatial_shapes], + dtype=tf.float32)) + + if detection_fields.detection_keypoints in detections: + detection_keypoints = detections[detection_fields.detection_keypoints] + output_dict[detection_fields.detection_keypoints] = detection_keypoints + if scale_to_absolute: + output_dict[detection_fields.detection_keypoints] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_keypoint_to_absolute, + elems=[detection_keypoints, original_image_spatial_shapes], + dtype=tf.float32)) + if detection_fields.detection_keypoint_scores in detections: + output_dict[detection_fields.detection_keypoint_scores] = detections[ + detection_fields.detection_keypoint_scores] + else: + output_dict[detection_fields.detection_keypoint_scores] = tf.ones_like( + detections[detection_fields.detection_keypoints][:, :, :, 0]) + + if groundtruth: + if max_gt_boxes is None: + if input_data_fields.num_groundtruth_boxes in groundtruth: + max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes] + else: + raise ValueError( + 'max_gt_boxes must be provided when processing batched examples.') + + if input_data_fields.groundtruth_instance_masks in groundtruth: + masks = groundtruth[input_data_fields.groundtruth_instance_masks] + max_spatial_shape = tf.reduce_max( + original_image_spatial_shapes, axis=0, keep_dims=True) + tiled_max_spatial_shape = tf.tile( + max_spatial_shape, + multiples=[tf.shape(original_image_spatial_shapes)[0], 1]) + groundtruth[input_data_fields.groundtruth_instance_masks] = ( + shape_utils.static_or_dynamic_map_fn( + _resize_groundtruth_masks, + elems=[masks, true_image_shapes, + original_image_spatial_shapes, + tiled_max_spatial_shape], + dtype=tf.uint8)) + + output_dict.update(groundtruth) + + image_shape = tf.cast(tf.shape(images), tf.float32) + image_height, image_width = image_shape[1], image_shape[2] + + def _scale_box_to_normalized_true_image(args): + """Scale the box coordinates to be relative to the true image shape.""" + boxes, true_image_shape = args + true_image_shape = tf.cast(true_image_shape, tf.float32) + true_height, true_width = true_image_shape[0], true_image_shape[1] + normalized_window = tf.stack([0.0, 0.0, true_height / image_height, + true_width / image_width]) + return box_list_ops.change_coordinate_frame( + box_list.BoxList(boxes), normalized_window).get() + + groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes] + groundtruth_boxes = shape_utils.static_or_dynamic_map_fn( + _scale_box_to_normalized_true_image, + elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32) + output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxes + + if input_data_fields.groundtruth_keypoints in groundtruth: + # If groundtruth_keypoints is in the groundtruth dictionary. Update the + # coordinates to conform with the true image shape. + def _scale_keypoints_to_normalized_true_image(args): + """Scale the box coordinates to be relative to the true image shape.""" + keypoints, true_image_shape = args + true_image_shape = tf.cast(true_image_shape, tf.float32) + true_height, true_width = true_image_shape[0], true_image_shape[1] + normalized_window = tf.stack( + [0.0, 0.0, true_height / image_height, true_width / image_width]) + return keypoint_ops.change_coordinate_frame(keypoints, + normalized_window) + + groundtruth_keypoints = groundtruth[ + input_data_fields.groundtruth_keypoints] + groundtruth_keypoints = shape_utils.static_or_dynamic_map_fn( + _scale_keypoints_to_normalized_true_image, + elems=[groundtruth_keypoints, true_image_shapes], + dtype=tf.float32) + output_dict[ + input_data_fields.groundtruth_keypoints] = groundtruth_keypoints + + if scale_to_absolute: + groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes] + output_dict[input_data_fields.groundtruth_boxes] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_box_to_absolute, + elems=[groundtruth_boxes, original_image_spatial_shapes], + dtype=tf.float32)) + if input_data_fields.groundtruth_keypoints in groundtruth: + groundtruth_keypoints = output_dict[ + input_data_fields.groundtruth_keypoints] + output_dict[input_data_fields.groundtruth_keypoints] = ( + shape_utils.static_or_dynamic_map_fn( + _scale_keypoint_to_absolute, + elems=[groundtruth_keypoints, original_image_spatial_shapes], + dtype=tf.float32)) + + # For class-agnostic models, groundtruth classes all become 1. + if class_agnostic: + groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes] + groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64) + output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes + + output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes + + return output_dict + + +def get_evaluators(eval_config, categories, evaluator_options=None): + """Returns the evaluator class according to eval_config, valid for categories. + + Args: + eval_config: An `eval_pb2.EvalConfig`. + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + 'keypoints': (optional) dict mapping this category's keypoints to unique + ids. + evaluator_options: A dictionary of metric names (see + EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization + keyword arguments. For example: + evalator_options = { + 'coco_detection_metrics': {'include_metrics_per_category': True} + } + + Returns: + An list of instances of DetectionEvaluator. + + Raises: + ValueError: if metric is not in the metric class dictionary. + """ + evaluator_options = evaluator_options or {} + eval_metric_fn_keys = eval_config.metrics_set + if not eval_metric_fn_keys: + eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] + evaluators_list = [] + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: + raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) + kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key + in evaluator_options else {}) + evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key]( + categories, + **kwargs_dict)) + + if isinstance(eval_config, eval_pb2.EvalConfig): + parameterized_metrics = eval_config.parameterized_metric + for parameterized_metric in parameterized_metrics: + assert parameterized_metric.HasField('parameterized_metric') + if parameterized_metric.WhichOneof( + 'parameterized_metric') == EVAL_KEYPOINT_METRIC: + keypoint_metrics = parameterized_metric.coco_keypoint_metrics + # Create category to keypoints mapping dict. + category_keypoints = {} + class_label = keypoint_metrics.class_label + category = None + for cat in categories: + if cat['name'] == class_label: + category = cat + break + if not category: + continue + keypoints_for_this_class = category['keypoints'] + category_keypoints = [{ + 'id': keypoints_for_this_class[kp_name], 'name': kp_name + } for kp_name in keypoints_for_this_class] + # Create keypoint evaluator for this category. + evaluators_list.append(EVAL_METRICS_CLASS_DICT[EVAL_KEYPOINT_METRIC]( + category['id'], category_keypoints, class_label, + keypoint_metrics.keypoint_label_to_sigmas)) + return evaluators_list + + +def get_eval_metric_ops_for_evaluators(eval_config, + categories, + eval_dict): + """Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`. + + Args: + eval_config: An `eval_pb2.EvalConfig`. + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + eval_dict: An evaluation dictionary, returned from + result_dict_for_single_example(). + + Returns: + A dictionary of metric names to tuple of value_op and update_op that can be + used as eval metric ops in tf.EstimatorSpec. + """ + eval_metric_ops = {} + evaluator_options = evaluator_options_from_eval_config(eval_config) + evaluators_list = get_evaluators(eval_config, categories, evaluator_options) + for evaluator in evaluators_list: + eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops( + eval_dict)) + return eval_metric_ops + + +def evaluator_options_from_eval_config(eval_config): + """Produces a dictionary of evaluation options for each eval metric. + + Args: + eval_config: An `eval_pb2.EvalConfig`. + + Returns: + evaluator_options: A dictionary of metric names (see + EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization + keyword arguments. For example: + evalator_options = { + 'coco_detection_metrics': {'include_metrics_per_category': True} + } + """ + eval_metric_fn_keys = eval_config.metrics_set + evaluator_options = {} + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key in ( + 'coco_detection_metrics', 'coco_mask_metrics', 'lvis_mask_metrics'): + evaluator_options[eval_metric_fn_key] = { + 'include_metrics_per_category': ( + eval_config.include_metrics_per_category) + } + # For coco detection eval, if the eval_config proto contains the + # "skip_predictions_for_unlabeled_class" field, include this field in + # evaluator_options. + if eval_metric_fn_key == 'coco_detection_metrics' and hasattr( + eval_config, 'skip_predictions_for_unlabeled_class'): + evaluator_options[eval_metric_fn_key].update({ + 'skip_predictions_for_unlabeled_class': + (eval_config.skip_predictions_for_unlabeled_class) + }) + for super_category in eval_config.super_categories: + if 'super_categories' not in evaluator_options[eval_metric_fn_key]: + evaluator_options[eval_metric_fn_key]['super_categories'] = {} + key = super_category + value = eval_config.super_categories[key].split(',') + evaluator_options[eval_metric_fn_key]['super_categories'][key] = value + + elif eval_metric_fn_key == 'precision_at_recall_detection_metrics': + evaluator_options[eval_metric_fn_key] = { + 'recall_lower_bound': (eval_config.recall_lower_bound), + 'recall_upper_bound': (eval_config.recall_upper_bound) + } + return evaluator_options + + +def has_densepose(eval_dict): + return (fields.DetectionResultFields.detection_masks in eval_dict and + fields.DetectionResultFields.detection_surface_coords in eval_dict) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/eval_util_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/eval_util_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a39a5ff16749fdfbb091448c444c02de5d524b36 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/eval_util_test.py @@ -0,0 +1,461 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for eval_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized + +import numpy as np +import six +from six.moves import range +import tensorflow.compat.v1 as tf +from google.protobuf import text_format + +from object_detection import eval_util +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.protos import eval_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +class EvalUtilTest(test_case.TestCase, parameterized.TestCase): + + def _get_categories_list(self): + return [{'id': 1, 'name': 'person'}, + {'id': 2, 'name': 'dog'}, + {'id': 3, 'name': 'cat'}] + + def _get_categories_list_with_keypoints(self): + return [{ + 'id': 1, + 'name': 'person', + 'keypoints': { + 'left_eye': 0, + 'right_eye': 3 + } + }, { + 'id': 2, + 'name': 'dog', + 'keypoints': { + 'tail_start': 1, + 'mouth': 2 + } + }, { + 'id': 3, + 'name': 'cat' + }] + + def _make_evaluation_dict(self, + resized_groundtruth_masks=False, + batch_size=1, + max_gt_boxes=None, + scale_to_absolute=False): + input_data_fields = fields.InputDataFields + detection_fields = fields.DetectionResultFields + + image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8) + if batch_size == 1: + key = tf.constant('image1') + else: + key = tf.constant([str(i) for i in range(batch_size)]) + detection_boxes = tf.tile(tf.constant([[[0., 0., 1., 1.]]]), + multiples=[batch_size, 1, 1]) + detection_scores = tf.tile(tf.constant([[0.8]]), multiples=[batch_size, 1]) + detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1]) + detection_masks = tf.tile(tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32), + multiples=[batch_size, 1, 1, 1]) + num_detections = tf.ones([batch_size]) + groundtruth_boxes = tf.constant([[0., 0., 1., 1.]]) + groundtruth_classes = tf.constant([1]) + groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8) + original_image_spatial_shapes = tf.constant([[20, 20]], dtype=tf.int32) + + groundtruth_keypoints = tf.constant([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]) + if resized_groundtruth_masks: + groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8) + + if batch_size > 1: + groundtruth_boxes = tf.tile(tf.expand_dims(groundtruth_boxes, 0), + multiples=[batch_size, 1, 1]) + groundtruth_classes = tf.tile(tf.expand_dims(groundtruth_classes, 0), + multiples=[batch_size, 1]) + groundtruth_instance_masks = tf.tile( + tf.expand_dims(groundtruth_instance_masks, 0), + multiples=[batch_size, 1, 1, 1]) + groundtruth_keypoints = tf.tile( + tf.expand_dims(groundtruth_keypoints, 0), + multiples=[batch_size, 1, 1]) + original_image_spatial_shapes = tf.tile(original_image_spatial_shapes, + multiples=[batch_size, 1]) + + detections = { + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + detection_fields.num_detections: num_detections + } + groundtruth = { + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + input_data_fields.groundtruth_instance_masks: + groundtruth_instance_masks, + input_data_fields.original_image_spatial_shape: + original_image_spatial_shapes + } + if batch_size > 1: + return eval_util.result_dict_for_batched_example( + image, key, detections, groundtruth, + scale_to_absolute=scale_to_absolute, + max_gt_boxes=max_gt_boxes) + else: + return eval_util.result_dict_for_single_example( + image, key, detections, groundtruth, + scale_to_absolute=scale_to_absolute) + + @parameterized.parameters( + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} + ) + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, + max_gt_boxes=None, + scale_to_absolute=False): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend(['coco_detection_metrics']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict(batch_size=batch_size, + max_gt_boxes=max_gt_boxes, + scale_to_absolute=scale_to_absolute) + metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + metrics = {} + for key, (value_op, _) in six.iteritems(metric_ops): + metrics[key] = value_op + sess.run(update_op) + metrics = sess.run(metrics) + self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) + self.assertNotIn('DetectionMasks_Precision/mAP', metrics) + + @parameterized.parameters( + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} + ) + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_for_coco_detections_and_masks( + self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'coco_mask_metrics']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict(batch_size=batch_size, + max_gt_boxes=max_gt_boxes, + scale_to_absolute=scale_to_absolute) + metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] + _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + metrics = {} + for key, (value_op, _) in six.iteritems(metric_ops): + metrics[key] = value_op + sess.run(update_op_boxes) + sess.run(update_op_masks) + metrics = sess.run(metrics) + self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) + self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) + + @parameterized.parameters( + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, + {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, + {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} + ) + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( + self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'coco_mask_metrics']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict(batch_size=batch_size, + max_gt_boxes=max_gt_boxes, + scale_to_absolute=scale_to_absolute, + resized_groundtruth_masks=True) + metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] + _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + metrics = {} + for key, (value_op, _) in six.iteritems(metric_ops): + metrics[key] = value_op + sess.run(update_op_boxes) + sess.run(update_op_masks) + metrics = sess.run(metrics) + self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) + self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) + + @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') + def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend(['unsupported_metric']) + categories = self._get_categories_list() + eval_dict = self._make_evaluation_dict() + with self.assertRaises(ValueError): + eval_util.get_eval_metric_ops_for_evaluators( + eval_config, categories, eval_dict) + + def test_get_eval_metric_ops_for_evaluators(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend([ + 'coco_detection_metrics', 'coco_mask_metrics', + 'precision_at_recall_detection_metrics' + ]) + eval_config.include_metrics_per_category = True + eval_config.recall_lower_bound = 0.2 + eval_config.recall_upper_bound = 0.6 + + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + self.assertTrue(evaluator_options['coco_detection_metrics'] + ['include_metrics_per_category']) + self.assertFalse(evaluator_options['coco_detection_metrics'] + ['skip_predictions_for_unlabeled_class']) + self.assertTrue( + evaluator_options['coco_mask_metrics']['include_metrics_per_category']) + self.assertAlmostEqual( + evaluator_options['precision_at_recall_detection_metrics'] + ['recall_lower_bound'], eval_config.recall_lower_bound) + self.assertAlmostEqual( + evaluator_options['precision_at_recall_detection_metrics'] + ['recall_upper_bound'], eval_config.recall_upper_bound) + + def test_get_evaluator_with_evaluator_options(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) + eval_config.include_metrics_per_category = True + eval_config.skip_predictions_for_unlabeled_class = True + eval_config.recall_lower_bound = 0.2 + eval_config.recall_upper_bound = 0.6 + categories = self._get_categories_list() + + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + evaluator = eval_util.get_evaluators(eval_config, categories, + evaluator_options) + + self.assertTrue(evaluator[0]._include_metrics_per_category) + self.assertTrue(evaluator[0]._skip_predictions_for_unlabeled_class) + self.assertAlmostEqual(evaluator[1]._recall_lower_bound, + eval_config.recall_lower_bound) + self.assertAlmostEqual(evaluator[1]._recall_upper_bound, + eval_config.recall_upper_bound) + + def test_get_evaluator_with_no_evaluator_options(self): + eval_config = eval_pb2.EvalConfig() + eval_config.metrics_set.extend( + ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) + eval_config.include_metrics_per_category = True + eval_config.recall_lower_bound = 0.2 + eval_config.recall_upper_bound = 0.6 + categories = self._get_categories_list() + + evaluator = eval_util.get_evaluators( + eval_config, categories, evaluator_options=None) + + # Even though we are setting eval_config.include_metrics_per_category = True + # and bounds on recall, these options are never passed into the + # DetectionEvaluator constructor (via `evaluator_options`). + self.assertFalse(evaluator[0]._include_metrics_per_category) + self.assertAlmostEqual(evaluator[1]._recall_lower_bound, 0.0) + self.assertAlmostEqual(evaluator[1]._recall_upper_bound, 1.0) + + def test_get_evaluator_with_keypoint_metrics(self): + eval_config = eval_pb2.EvalConfig() + person_keypoints_metric = eval_config.parameterized_metric.add() + person_keypoints_metric.coco_keypoint_metrics.class_label = 'person' + person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'left_eye'] = 0.1 + person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'right_eye'] = 0.2 + dog_keypoints_metric = eval_config.parameterized_metric.add() + dog_keypoints_metric.coco_keypoint_metrics.class_label = 'dog' + dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'tail_start'] = 0.3 + dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'mouth'] = 0.4 + categories = self._get_categories_list_with_keypoints() + + evaluator = eval_util.get_evaluators( + eval_config, categories, evaluator_options=None) + + # Verify keypoint evaluator class variables. + self.assertLen(evaluator, 3) + self.assertFalse(evaluator[0]._include_metrics_per_category) + self.assertEqual(evaluator[1]._category_name, 'person') + self.assertEqual(evaluator[2]._category_name, 'dog') + self.assertAllEqual(evaluator[1]._keypoint_ids, [0, 3]) + self.assertAllEqual(evaluator[2]._keypoint_ids, [1, 2]) + self.assertAllClose([0.1, 0.2], evaluator[1]._oks_sigmas) + self.assertAllClose([0.3, 0.4], evaluator[2]._oks_sigmas) + + def test_get_evaluator_with_unmatched_label(self): + eval_config = eval_pb2.EvalConfig() + person_keypoints_metric = eval_config.parameterized_metric.add() + person_keypoints_metric.coco_keypoint_metrics.class_label = 'unmatched' + person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ + 'kpt'] = 0.1 + categories = self._get_categories_list_with_keypoints() + + evaluator = eval_util.get_evaluators( + eval_config, categories, evaluator_options=None) + self.assertLen(evaluator, 1) + self.assertNotIsInstance( + evaluator[0], coco_evaluation.CocoKeypointEvaluator) + + def test_padded_image_result_dict(self): + + input_data_fields = fields.InputDataFields + detection_fields = fields.DetectionResultFields + key = tf.constant([str(i) for i in range(2)]) + + detection_boxes = np.array([[[0., 0., 1., 1.]], [[0.0, 0.0, 0.5, 0.5]]], + dtype=np.float32) + detection_keypoints = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]], + dtype=np.float32) + def graph_fn(): + detections = { + detection_fields.detection_boxes: + tf.constant(detection_boxes), + detection_fields.detection_scores: + tf.constant([[1.], [1.]]), + detection_fields.detection_classes: + tf.constant([[1], [2]]), + detection_fields.num_detections: + tf.constant([1, 1]), + detection_fields.detection_keypoints: + tf.tile( + tf.reshape( + tf.constant(detection_keypoints), shape=[1, 1, 3, 2]), + multiples=[2, 1, 1, 1]) + } + + gt_boxes = detection_boxes + groundtruth = { + input_data_fields.groundtruth_boxes: + tf.constant(gt_boxes), + input_data_fields.groundtruth_classes: + tf.constant([[1.], [1.]]), + input_data_fields.groundtruth_keypoints: + tf.tile( + tf.reshape( + tf.constant(detection_keypoints), shape=[1, 1, 3, 2]), + multiples=[2, 1, 1, 1]) + } + + image = tf.zeros((2, 100, 100, 3), dtype=tf.float32) + + true_image_shapes = tf.constant([[100, 100, 3], [50, 100, 3]]) + original_image_spatial_shapes = tf.constant([[200, 200], [150, 300]]) + + result = eval_util.result_dict_for_batched_example( + image, key, detections, groundtruth, + scale_to_absolute=True, + true_image_shapes=true_image_shapes, + original_image_spatial_shapes=original_image_spatial_shapes, + max_gt_boxes=tf.constant(1)) + return (result[input_data_fields.groundtruth_boxes], + result[input_data_fields.groundtruth_keypoints], + result[detection_fields.detection_boxes], + result[detection_fields.detection_keypoints]) + (gt_boxes, gt_keypoints, detection_boxes, + detection_keypoints) = self.execute_cpu(graph_fn, []) + self.assertAllEqual( + [[[0., 0., 200., 200.]], [[0.0, 0.0, 150., 150.]]], + gt_boxes) + self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]], + [[[0., 0.], [150., 150.], [300., 300.]]]], + gt_keypoints) + + # Predictions from the model are not scaled. + self.assertAllEqual( + [[[0., 0., 200., 200.]], [[0.0, 0.0, 75., 150.]]], + detection_boxes) + self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]], + [[[0., 0.], [75., 150.], [150., 300.]]]], + detection_keypoints) + + def test_evaluator_options_from_eval_config_no_super_categories(self): + eval_config_text_proto = """ + metrics_set: "coco_detection_metrics" + metrics_set: "coco_mask_metrics" + include_metrics_per_category: true + use_moving_averages: false + batch_size: 1; + """ + eval_config = eval_pb2.EvalConfig() + text_format.Merge(eval_config_text_proto, eval_config) + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + self.assertNotIn('super_categories', evaluator_options['coco_mask_metrics']) + + def test_evaluator_options_from_eval_config_with_super_categories(self): + eval_config_text_proto = """ + metrics_set: "coco_detection_metrics" + metrics_set: "coco_mask_metrics" + include_metrics_per_category: true + use_moving_averages: false + batch_size: 1; + super_categories { + key: "supercat1" + value: "a,b,c" + } + super_categories { + key: "supercat2" + value: "d,e,f" + } + """ + eval_config = eval_pb2.EvalConfig() + text_format.Merge(eval_config_text_proto, eval_config) + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + self.assertIn('super_categories', evaluator_options['coco_mask_metrics']) + super_categories = evaluator_options[ + 'coco_mask_metrics']['super_categories'] + self.assertIn('supercat1', super_categories) + self.assertIn('supercat2', super_categories) + self.assertAllEqual(super_categories['supercat1'], ['a', 'b', 'c']) + self.assertAllEqual(super_categories['supercat2'], ['d', 'e', 'f']) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_inference_graph.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_inference_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0ee0dde056afacca9a876c7456cb82a82f3192 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_inference_graph.py @@ -0,0 +1,206 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Tool to export an object detection model for inference. + +Prepares an object detection tensorflow graph for inference using model +configuration and a trained checkpoint. Outputs inference +graph, associated checkpoint files, a frozen inference graph and a +SavedModel (https://tensorflow.github.io/serving/serving_basic.html). + +The inference graph contains one of three input nodes depending on the user +specified option. + * `image_tensor`: Accepts a uint8 4-D tensor of shape [None, None, None, 3] + * `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None] + containing encoded PNG or JPEG images. Image resolutions are expected to be + the same if more than 1 image is provided. + * `tf_example`: Accepts a 1-D string tensor of shape [None] containing + serialized TFExample protos. Image resolutions are expected to be the same + if more than 1 image is provided. + +and the following output nodes returned by the model.postprocess(..): + * `num_detections`: Outputs float32 tensors of the form [batch] + that specifies the number of valid boxes per image in the batch. + * `detection_boxes`: Outputs float32 tensors of the form + [batch, num_boxes, 4] containing detected boxes. + * `detection_scores`: Outputs float32 tensors of the form + [batch, num_boxes] containing class scores for the detections. + * `detection_classes`: Outputs float32 tensors of the form + [batch, num_boxes] containing classes for the detections. + * `raw_detection_boxes`: Outputs float32 tensors of the form + [batch, raw_num_boxes, 4] containing detection boxes without + post-processing. + * `raw_detection_scores`: Outputs float32 tensors of the form + [batch, raw_num_boxes, num_classes_with_background] containing class score + logits for raw detection boxes. + * `detection_masks`: (Optional) Outputs float32 tensors of the form + [batch, num_boxes, mask_height, mask_width] containing predicted instance + masks for each box if its present in the dictionary of postprocessed + tensors returned by the model. + * detection_multiclass_scores: (Optional) Outputs float32 tensor of shape + [batch, num_boxes, num_classes_with_background] for containing class + score distribution for detected boxes including background if any. + * detection_features: (Optional) float32 tensor of shape + [batch, num_boxes, roi_height, roi_width, depth] + containing classifier features + +Notes: + * This tool uses `use_moving_averages` from eval_config to decide which + weights to freeze. + +Example Usage: +-------------- +python export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +with contents: + - inference_graph.pbtxt + - model.ckpt.data-00000-of-00001 + - model.ckpt.info + - model.ckpt.meta + - frozen_inference_graph.pb + + saved_model (a directory) + +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the second stage post-processing score +threshold to be 0.5): + +python export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory \ + --config_override " \ + model{ \ + faster_rcnn { \ + second_stage_post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.5 \ + } \ + } \ + } \ + }" +""" +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection import exporter +from object_detection.protos import pipeline_pb2 + +flags = tf.app.flags + +flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be ' + 'one of [`image_tensor`, `encoded_image_string_tensor`, ' + '`tf_example`]') +flags.DEFINE_string('input_shape', None, + 'If input_type is `image_tensor`, this can explicitly set ' + 'the shape of this input tensor to a fixed size. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of integers. A value of -1 can be used for unknown ' + 'dimensions. If not specified, for an `image_tensor, the ' + 'default shape will be partially specified as ' + '`[None, None, None, 3]`.') +flags.DEFINE_string('pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_prefix', None, + 'Path to trained checkpoint, typically of the form ' + 'path/to/model.ckpt') +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string('config_override', '', + 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') +flags.DEFINE_boolean('write_inference_graph', False, + 'If true, writes inference graph to disk.') +flags.DEFINE_string('additional_output_tensor_names', None, + 'Additional Tensors to output, to be specified as a comma ' + 'separated list of tensor names.') +flags.DEFINE_boolean('use_side_inputs', False, + 'If True, uses side inputs as well as image inputs.') +flags.DEFINE_string('side_input_shapes', None, + 'If use_side_inputs is True, this explicitly sets ' + 'the shape of the side input tensors to a fixed size. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of integers. A value of -1 can be used for unknown ' + 'dimensions. A `/` denotes a break, starting the shape of ' + 'the next side input tensor. This flag is required if ' + 'using side inputs.') +flags.DEFINE_string('side_input_types', None, + 'If use_side_inputs is True, this explicitly sets ' + 'the type of the side input tensors. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of types, each of `string`, `integer`, or `float`. ' + 'This flag is required if using side inputs.') +flags.DEFINE_string('side_input_names', None, + 'If use_side_inputs is True, this explicitly sets ' + 'the names of the side input tensors required by the model ' + 'assuming the names will be a comma-separated list of ' + 'strings. This flag is required if using side inputs.') +tf.app.flags.mark_flag_as_required('pipeline_config_path') +tf.app.flags.mark_flag_as_required('trained_checkpoint_prefix') +tf.app.flags.mark_flag_as_required('output_directory') +FLAGS = flags.FLAGS + + +def main(_): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge(FLAGS.config_override, pipeline_config) + if FLAGS.input_shape: + input_shape = [ + int(dim) if dim != '-1' else None + for dim in FLAGS.input_shape.split(',') + ] + else: + input_shape = None + if FLAGS.use_side_inputs: + side_input_shapes, side_input_names, side_input_types = ( + exporter.parse_side_inputs( + FLAGS.side_input_shapes, + FLAGS.side_input_names, + FLAGS.side_input_types)) + else: + side_input_shapes = None + side_input_names = None + side_input_types = None + if FLAGS.additional_output_tensor_names: + additional_output_tensor_names = list( + FLAGS.additional_output_tensor_names.split(',')) + else: + additional_output_tensor_names = None + exporter.export_inference_graph( + FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix, + FLAGS.output_directory, input_shape=input_shape, + write_inference_graph=FLAGS.write_inference_graph, + additional_output_tensor_names=additional_output_tensor_names, + use_side_inputs=FLAGS.use_side_inputs, + side_input_shapes=side_input_shapes, + side_input_names=side_input_names, + side_input_types=side_input_types) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_lib_tf2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_lib_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..60789537b0006ff48aed31b4183654fa3d8d32a4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_lib_tf2.py @@ -0,0 +1,254 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library to export TFLite-compatible SavedModel from TF2 detection models.""" +import os +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf + +from object_detection.builders import model_builder +from object_detection.builders import post_processing_builder +from object_detection.core import box_list + +_DEFAULT_NUM_CHANNELS = 3 +_DEFAULT_NUM_COORD_BOX = 4 +_MAX_CLASSES_PER_DETECTION = 1 +_DETECTION_POSTPROCESS_FUNC = 'TFLite_Detection_PostProcess' + + +def get_const_center_size_encoded_anchors(anchors): + """Exports center-size encoded anchors as a constant tensor. + + Args: + anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor + boxes + + Returns: + encoded_anchors: a float32 constant tensor of shape [num_anchors, 4] + containing the anchor boxes. + """ + anchor_boxlist = box_list.BoxList(anchors) + y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes() + num_anchors = y.get_shape().as_list() + + with tf1.Session() as sess: + y_out, x_out, h_out, w_out = sess.run([y, x, h, w]) + encoded_anchors = tf1.constant( + np.transpose(np.stack((y_out, x_out, h_out, w_out))), + dtype=tf1.float32, + shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX], + name='anchors') + return num_anchors[0], encoded_anchors + + +class SSDModule(tf.Module): + """Inference Module for TFLite-friendly SSD models.""" + + def __init__(self, pipeline_config, detection_model, max_detections, + use_regular_nms): + """Initialization. + + Args: + pipeline_config: The original pipeline_pb2.TrainEvalPipelineConfig + detection_model: The detection model to use for inference. + max_detections: Max detections desired from the TFLite model. + use_regular_nms: If True, TFLite model uses the (slower) multi-class NMS. + """ + self._process_config(pipeline_config) + self._pipeline_config = pipeline_config + self._model = detection_model + self._max_detections = max_detections + self._use_regular_nms = use_regular_nms + + def _process_config(self, pipeline_config): + self._num_classes = pipeline_config.model.ssd.num_classes + self._nms_score_threshold = pipeline_config.model.ssd.post_processing.batch_non_max_suppression.score_threshold + self._nms_iou_threshold = pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold + self._scale_values = {} + self._scale_values[ + 'y_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale + self._scale_values[ + 'x_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale + self._scale_values[ + 'h_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale + self._scale_values[ + 'w_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale + + image_resizer_config = pipeline_config.model.ssd.image_resizer + image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') + self._num_channels = _DEFAULT_NUM_CHANNELS + + if image_resizer == 'fixed_shape_resizer': + self._height = image_resizer_config.fixed_shape_resizer.height + self._width = image_resizer_config.fixed_shape_resizer.width + if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: + self._num_channels = 1 + else: + raise ValueError( + 'Only fixed_shape_resizer' + 'is supported with tflite. Found {}'.format( + image_resizer_config.WhichOneof('image_resizer_oneof'))) + + def input_shape(self): + """Returns shape of TFLite model input.""" + return [1, self._height, self._width, self._num_channels] + + def postprocess_implements_signature(self): + """Returns tf.implements signature for MLIR legalization of TFLite NMS.""" + implements_signature = [ + 'name: "%s"' % _DETECTION_POSTPROCESS_FUNC, + 'attr { key: "max_detections" value { i: %d } }' % self._max_detections, + 'attr { key: "max_classes_per_detection" value { i: %d } }' % + _MAX_CLASSES_PER_DETECTION, + 'attr { key: "use_regular_nms" value { b: %s } }' % + str(self._use_regular_nms).lower(), + 'attr { key: "nms_score_threshold" value { f: %f } }' % + self._nms_score_threshold, + 'attr { key: "nms_iou_threshold" value { f: %f } }' % + self._nms_iou_threshold, + 'attr { key: "y_scale" value { f: %f } }' % + self._scale_values['y_scale'], + 'attr { key: "x_scale" value { f: %f } }' % + self._scale_values['x_scale'], + 'attr { key: "h_scale" value { f: %f } }' % + self._scale_values['h_scale'], + 'attr { key: "w_scale" value { f: %f } }' % + self._scale_values['w_scale'], + 'attr { key: "num_classes" value { i: %d } }' % self._num_classes + ] + implements_signature = ' '.join(implements_signature) + return implements_signature + + def _get_postprocess_fn(self, num_anchors, num_classes): + # There is no TF equivalent for TFLite's custom post-processing op. + # So we add an 'empty' composite function here, that is legalized to the + # custom op with MLIR. + @tf.function( + experimental_implements=self.postprocess_implements_signature()) + # pylint: disable=g-unused-argument,unused-argument + def dummy_post_processing(box_encodings, class_predictions, anchors): + boxes = tf.constant(0.0, dtype=tf.float32, name='boxes') + scores = tf.constant(0.0, dtype=tf.float32, name='scores') + classes = tf.constant(0.0, dtype=tf.float32, name='classes') + num_detections = tf.constant(0.0, dtype=tf.float32, name='num_detections') + return boxes, scores, classes, num_detections + + return dummy_post_processing + + @tf.function + def inference_fn(self, image): + """Encapsulates SSD inference for TFLite conversion. + + NOTE: The Args & Returns sections below indicate the TFLite model signature, + and not what the TF graph does (since the latter does not include the custom + NMS op used by TFLite) + + Args: + image: a float32 tensor of shape [num_anchors, 4] containing the anchor + boxes + + Returns: + num_detections: a float32 scalar denoting number of total detections. + classes: a float32 tensor denoting class ID for each detection. + scores: a float32 tensor denoting score for each detection. + boxes: a float32 tensor denoting coordinates of each detected box. + """ + predicted_tensors = self._model.predict(image, true_image_shapes=None) + # The score conversion occurs before the post-processing custom op + _, score_conversion_fn = post_processing_builder.build( + self._pipeline_config.model.ssd.post_processing) + class_predictions = score_conversion_fn( + predicted_tensors['class_predictions_with_background']) + + with tf.name_scope('raw_outputs'): + # 'raw_outputs/box_encodings': a float32 tensor of shape + # [1, num_anchors, 4] containing the encoded box predictions. Note that + # these are raw predictions and no Non-Max suppression is applied on + # them and no decode center size boxes is applied to them. + box_encodings = tf.identity( + predicted_tensors['box_encodings'], name='box_encodings') + # 'raw_outputs/class_predictions': a float32 tensor of shape + # [1, num_anchors, num_classes] containing the class scores for each + # anchor after applying score conversion. + class_predictions = tf.identity( + class_predictions, name='class_predictions') + # 'anchors': a float32 tensor of shape + # [4, num_anchors] containing the anchors as a constant node. + num_anchors, anchors = get_const_center_size_encoded_anchors( + predicted_tensors['anchors']) + anchors = tf.identity(anchors, name='anchors') + + # tf.function@ seems to reverse order of inputs, so reverse them here. + return self._get_postprocess_fn(num_anchors, + self._num_classes)(box_encodings, + class_predictions, + anchors)[::-1] + + +def export_tflite_model(pipeline_config, trained_checkpoint_dir, + output_directory, max_detections, use_regular_nms): + """Exports inference SavedModel for TFLite conversion. + + NOTE: Only supports SSD meta-architectures for now, and the output model will + have static-shaped, single-batch input. + + This function creates `output_directory` if it does not already exist, + which will hold the intermediate SavedModel that can be used with the TFLite + converter. + + Args: + pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. + trained_checkpoint_dir: Path to the trained checkpoint file. + output_directory: Path to write outputs. + max_detections: Max detections desired from the TFLite model. + use_regular_nms: If True, TFLite model uses the (slower) multi-class NMS. + + Raises: + ValueError: if pipeline is invalid. + """ + output_saved_model_directory = os.path.join(output_directory, 'saved_model') + + # Build the underlying model using pipeline config. + # TODO(b/162842801): Add support for other architectures. + if pipeline_config.model.WhichOneof('model') != 'ssd': + raise ValueError('Only ssd models are supported in tflite. ' + 'Found {} in config'.format( + pipeline_config.model.WhichOneof('model'))) + detection_model = model_builder.build( + pipeline_config.model, is_training=False) + + ckpt = tf.train.Checkpoint(model=detection_model) + manager = tf.train.CheckpointManager( + ckpt, trained_checkpoint_dir, max_to_keep=1) + status = ckpt.restore(manager.latest_checkpoint).expect_partial() + + # The module helps build a TF SavedModel appropriate for TFLite conversion. + detection_module = SSDModule(pipeline_config, detection_model, max_detections, + use_regular_nms) + + # Getting the concrete function traces the graph and forces variables to + # be constructed; only after this can we save the saved model. + status.assert_existing_objects_matched() + concrete_function = detection_module.inference_fn.get_concrete_function( + tf.TensorSpec( + shape=detection_module.input_shape(), dtype=tf.float32, name='input')) + status.assert_existing_objects_matched() + + # Export SavedModel. + tf.saved_model.save( + detection_module, + output_saved_model_directory, + signatures=concrete_function) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_lib_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_lib_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b1ea440ef7b991f4f291968ecc98f3882b012d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_lib_tf2_test.py @@ -0,0 +1,245 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test for export_tflite_graph_lib_tf2.py.""" + +from __future__ import division +import os +import unittest +import six + +import tensorflow.compat.v2 as tf + +from object_detection import export_tflite_graph_lib_tf2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import pipeline_pb2 +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-importing-member,g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self): + super(FakeModel, self).__init__(num_classes=2) + self._conv = tf.keras.layers.Conv2D( + filters=1, + kernel_size=1, + strides=(1, 1), + padding='valid', + kernel_initializer=tf.keras.initializers.Constant(value=1.0)) + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + prediction_tensors = {'image': self._conv(preprocessed_inputs)} + with tf.control_dependencies([prediction_tensors['image']]): + prediction_tensors['box_encodings'] = tf.constant( + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], tf.float32) + prediction_tensors['class_predictions_with_background'] = tf.constant( + [[[0.7, 0.6], [0.9, 0.0]]], tf.float32) + with tf.control_dependencies([ + tf.convert_to_tensor( + prediction_tensors['image'].get_shape().as_list()[1:3]) + ]): + prediction_tensors['anchors'] = tf.constant( + [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32) + return prediction_tensors + + def postprocess(self, prediction_dict, true_image_shapes): + predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) + with tf.control_dependencies(list(prediction_dict.values())): + postprocessed_tensors = { + 'detection_boxes': + tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]], + tf.float32), + 'detection_scores': + predict_tensor_sum + + tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32), + 'detection_classes': + tf.constant([[0, 1], [1, 0]], tf.float32), + 'num_detections': + tf.constant([2, 1], tf.float32), + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, from_detection_checkpoint): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ExportTfLiteGraphTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, checkpoint_dir): + mock_model = FakeModel() + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_dir, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + def _get_ssd_config(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold = 0.5 + return pipeline_config + + # The tf.implements signature is important since it ensures MLIR legalization, + # so we test it here. + def test_postprocess_implements_signature(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + pipeline_config = self._get_ssd_config() + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + + detection_model = model_builder.build( + pipeline_config.model, is_training=False) + + ckpt = tf.train.Checkpoint(model=detection_model) + manager = tf.train.CheckpointManager(ckpt, tmp_dir, max_to_keep=1) + ckpt.restore(manager.latest_checkpoint).expect_partial() + + # The module helps build a TF graph appropriate for TFLite conversion. + detection_module = export_tflite_graph_lib_tf2.SSDModule( + pipeline_config=pipeline_config, + detection_model=detection_model, + max_detections=20, + use_regular_nms=True) + + expected_signature = ('name: "TFLite_Detection_PostProcess" attr { key: ' + '"max_detections" value { i: 20 } } attr { key: ' + '"max_classes_per_detection" value { i: 1 } } attr ' + '{ key: "use_regular_nms" value { b: true } } attr ' + '{ key: "nms_score_threshold" value { f: 0.000000 }' + ' } attr { key: "nms_iou_threshold" value { f: ' + '0.500000 } } attr { key: "y_scale" value { f: ' + '10.000000 } } attr { key: "x_scale" value { f: ' + '10.000000 } } attr { key: "h_scale" value { f: ' + '5.000000 } } attr { key: "w_scale" value { f: ' + '5.000000 } } attr { key: "num_classes" value { i: ' + '2 } }') + + self.assertEqual(expected_signature, + detection_module.postprocess_implements_signature()) + + def test_unsupported_architecture(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.faster_rcnn.num_classes = 10 + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + expected_message = 'Only ssd models are supported in tflite' + try: + export_tflite_graph_lib_tf2.export_tflite_model( + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory, + max_detections=10, + use_regular_nms=False) + except ValueError as e: + if expected_message not in str(e): + raise + else: + raise AssertionError('Exception not raised: %s' % expected_message) + + def test_export_yields_saved_model(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + export_tflite_graph_lib_tf2.export_tflite_model( + pipeline_config=self._get_ssd_config(), + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory, + max_detections=10, + use_regular_nms=False) + self.assertTrue( + os.path.exists( + os.path.join(output_directory, 'saved_model', 'saved_model.pb'))) + self.assertTrue( + os.path.exists( + os.path.join(output_directory, 'saved_model', 'variables', + 'variables.index'))) + self.assertTrue( + os.path.exists( + os.path.join(output_directory, 'saved_model', 'variables', + 'variables.data-00000-of-00001'))) + + def test_exported_model_inference(self): + tmp_dir = self.get_temp_dir() + output_directory = os.path.join(tmp_dir, 'output') + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + export_tflite_graph_lib_tf2.export_tflite_model( + pipeline_config=self._get_ssd_config(), + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory, + max_detections=10, + use_regular_nms=False) + + saved_model_path = os.path.join(output_directory, 'saved_model') + detect_fn = tf.saved_model.load(saved_model_path) + detect_fn_sig = detect_fn.signatures['serving_default'] + image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + detections = detect_fn_sig(image) + + # The exported graph doesn't have numerically correct outputs, but there + # should be 4. + self.assertEqual(4, len(detections)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_tf2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..0efe6339d359e0dffb3bb3aaece15d62dde56b7e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_graph_tf2.py @@ -0,0 +1,126 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Exports TF2 detection SavedModel for conversion to TensorFlow Lite. + +Link to the TF2 Detection Zoo: +https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md +The output folder will contain an intermediate SavedModel that can be used with +the TfLite converter. + +NOTE: This only supports SSD meta-architectures for now. + +One input: + image: a float32 tensor of shape[1, height, width, 3] containing the + *normalized* input image. + NOTE: See the `preprocess` function defined in the feature extractor class + in the object_detection/models directory. + +Four Outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected boxes + +Example Usage: +-------------- +python object_detection/export_tflite_graph_tf2.py \ + --pipeline_config_path path/to/ssd_model/pipeline.config \ + --trained_checkpoint_dir path/to/ssd_model/checkpoint \ + --output_directory path/to/exported_model_directory + +The expected output SavedModel would be in the directory +path/to/exported_model_directory (which is created if it does not exist). + +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the NMS iou_threshold to be 0.5 and +NMS score_threshold to be 0.0): +python object_detection/export_tflite_model_tf2.py \ + --pipeline_config_path path/to/ssd_model/pipeline.config \ + --trained_checkpoint_dir path/to/ssd_model/checkpoint \ + --output_directory path/to/exported_model_directory + --config_override " \ + model{ \ + ssd{ \ + post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.0 \ + iou_threshold: 0.5 \ + } \ + } \ + } \ + } \ + " +""" +from absl import app +from absl import flags + +import tensorflow.compat.v2 as tf +from google.protobuf import text_format +from object_detection import export_tflite_graph_lib_tf2 +from object_detection.protos import pipeline_pb2 + +tf.enable_v2_behavior() + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + 'pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_dir', None, + 'Path to trained checkpoint directory') +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string( + 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') +# SSD-specific flags +flags.DEFINE_integer('ssd_max_detections', 10, + 'Maximum number of detections (boxes) to return.') +flags.DEFINE_bool( + 'ssd_use_regular_nms', False, + 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS ' + '(Default false).') + + +def main(argv): + del argv # Unused. + flags.mark_flag_as_required('pipeline_config_path') + flags.mark_flag_as_required('trained_checkpoint_dir') + flags.mark_flag_as_required('output_directory') + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + + with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Parse(f.read(), pipeline_config) + text_format.Parse(FLAGS.config_override, pipeline_config) + + export_tflite_graph_lib_tf2.export_tflite_model(pipeline_config, + FLAGS.trained_checkpoint_dir, + FLAGS.output_directory, + FLAGS.ssd_max_detections, + FLAGS.ssd_use_regular_nms) + + +if __name__ == '__main__': + app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..2127ca08ef57228831871605b4df5ca7f0a79963 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph.py @@ -0,0 +1,144 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Exports an SSD detection model to use with tf-lite. + +Outputs file: +* A tflite compatible frozen graph - $output_directory/tflite_graph.pb + +The exported graph has the following input and output nodes. + +Inputs: +'normalized_input_image_tensor': a float32 tensor of shape +[1, height, width, 3] containing the normalized input image. Note that the +height and width must be compatible with the height and width configured in +the fixed_shape_image resizer options in the pipeline config proto. + +In floating point Mobilenet model, 'normalized_image_tensor' has values +between [-1,1). This typically means mapping each pixel (linearly) +to a value between [-1, 1]. Input image +values between 0 and 255 are scaled by (1/128.0) and then a value of +-1 is added to them to ensure the range is [-1,1). +In quantized Mobilenet model, 'normalized_image_tensor' has values between [0, +255]. +In general, see the `preprocess` function defined in the feature extractor class +in the object_detection/models directory. + +Outputs: +If add_postprocessing_op is true: frozen graph adds a + TFLite_Detection_PostProcess custom op node has four outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected boxes +else: + the graph has two outputs: + 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] + containing the encoded box predictions. + 'raw_outputs/class_predictions': a float32 tensor of shape + [1, num_anchors, num_classes] containing the class scores for each anchor + after applying score conversion. + +Example Usage: +-------------- +python object_detection/export_tflite_ssd_graph.py \ + --pipeline_config_path path/to/ssd_mobilenet.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +with contents: + - tflite_graph.pbtxt + - tflite_graph.pb +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the NMS iou_threshold to be 0.5 and +NMS score_threshold to be 0.0): +python object_detection/export_tflite_ssd_graph.py \ + --pipeline_config_path path/to/ssd_mobilenet.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory + --config_override " \ + model{ \ + ssd{ \ + post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.0 \ + iou_threshold: 0.5 \ + } \ + } \ + } \ + } \ + " +""" + +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from object_detection import export_tflite_ssd_graph_lib +from object_detection.protos import pipeline_pb2 + +flags = tf.app.flags +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string( + 'pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.') +flags.DEFINE_integer('max_detections', 10, + 'Maximum number of detections (boxes) to show.') +flags.DEFINE_integer('max_classes_per_detection', 1, + 'Maximum number of classes to output per detection box.') +flags.DEFINE_integer( + 'detections_per_class', 100, + 'Number of anchors used per class in Regular Non-Max-Suppression.') +flags.DEFINE_bool('add_postprocessing_op', True, + 'Add TFLite custom op for postprocessing to the graph.') +flags.DEFINE_bool( + 'use_regular_nms', False, + 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.') +flags.DEFINE_string( + 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') + +FLAGS = flags.FLAGS + + +def main(argv): + del argv # Unused. + flags.mark_flag_as_required('output_directory') + flags.mark_flag_as_required('pipeline_config_path') + flags.mark_flag_as_required('trained_checkpoint_prefix') + + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + + with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge(FLAGS.config_override, pipeline_config) + export_tflite_ssd_graph_lib.export_tflite_graph( + pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory, + FLAGS.add_postprocessing_op, FLAGS.max_detections, + FLAGS.max_classes_per_detection, use_regular_nms=FLAGS.use_regular_nms) + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph_lib.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..f72e9525bfd75b58c874cba5b790cbac710cb9dd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph_lib.py @@ -0,0 +1,334 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exports an SSD detection model to use with tf-lite. + +See export_tflite_ssd_graph.py for usage. +""" +import os +import tempfile +import numpy as np +import tensorflow.compat.v1 as tf +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.framework import types_pb2 +from tensorflow.core.protobuf import saver_pb2 +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.builders import post_processing_builder +from object_detection.core import box_list +from object_detection.utils import tf_version + +_DEFAULT_NUM_CHANNELS = 3 +_DEFAULT_NUM_COORD_BOX = 4 + +if tf_version.is_tf1(): + from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top + + +def get_const_center_size_encoded_anchors(anchors): + """Exports center-size encoded anchors as a constant tensor. + + Args: + anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor + boxes + + Returns: + encoded_anchors: a float32 constant tensor of shape [num_anchors, 4] + containing the anchor boxes. + """ + anchor_boxlist = box_list.BoxList(anchors) + y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes() + num_anchors = y.get_shape().as_list() + + with tf.Session() as sess: + y_out, x_out, h_out, w_out = sess.run([y, x, h, w]) + encoded_anchors = tf.constant( + np.transpose(np.stack((y_out, x_out, h_out, w_out))), + dtype=tf.float32, + shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX], + name='anchors') + return encoded_anchors + + +def append_postprocessing_op(frozen_graph_def, + max_detections, + max_classes_per_detection, + nms_score_threshold, + nms_iou_threshold, + num_classes, + scale_values, + detections_per_class=100, + use_regular_nms=False, + additional_output_tensors=()): + """Appends postprocessing custom op. + + Args: + frozen_graph_def: Frozen GraphDef for SSD model after freezing the + checkpoint + max_detections: Maximum number of detections (boxes) to show + max_classes_per_detection: Number of classes to display per detection + nms_score_threshold: Score threshold used in Non-maximal suppression in + post-processing + nms_iou_threshold: Intersection-over-union threshold used in Non-maximal + suppression in post-processing + num_classes: number of classes in SSD detector + scale_values: scale values is a dict with following key-value pairs + {y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode + centersize boxes + detections_per_class: In regular NonMaxSuppression, number of anchors used + for NonMaxSuppression per class + use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of + Fast NMS. + additional_output_tensors: Array of additional tensor names to output. + Tensors are appended after postprocessing output. + + Returns: + transformed_graph_def: Frozen GraphDef with postprocessing custom op + appended + TFLite_Detection_PostProcess custom op node has four outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected + boxes + """ + new_output = frozen_graph_def.node.add() + new_output.op = 'TFLite_Detection_PostProcess' + new_output.name = 'TFLite_Detection_PostProcess' + new_output.attr['_output_quantized'].CopyFrom( + attr_value_pb2.AttrValue(b=True)) + new_output.attr['_output_types'].list.type.extend([ + types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, + types_pb2.DT_FLOAT + ]) + new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom( + attr_value_pb2.AttrValue(b=True)) + new_output.attr['max_detections'].CopyFrom( + attr_value_pb2.AttrValue(i=max_detections)) + new_output.attr['max_classes_per_detection'].CopyFrom( + attr_value_pb2.AttrValue(i=max_classes_per_detection)) + new_output.attr['nms_score_threshold'].CopyFrom( + attr_value_pb2.AttrValue(f=nms_score_threshold.pop())) + new_output.attr['nms_iou_threshold'].CopyFrom( + attr_value_pb2.AttrValue(f=nms_iou_threshold.pop())) + new_output.attr['num_classes'].CopyFrom( + attr_value_pb2.AttrValue(i=num_classes)) + + new_output.attr['y_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop())) + new_output.attr['x_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop())) + new_output.attr['h_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop())) + new_output.attr['w_scale'].CopyFrom( + attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop())) + new_output.attr['detections_per_class'].CopyFrom( + attr_value_pb2.AttrValue(i=detections_per_class)) + new_output.attr['use_regular_nms'].CopyFrom( + attr_value_pb2.AttrValue(b=use_regular_nms)) + + new_output.input.extend( + ['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors']) + # Transform the graph to append new postprocessing op + input_names = [] + output_names = ['TFLite_Detection_PostProcess' + ] + list(additional_output_tensors) + transforms = ['strip_unused_nodes'] + transformed_graph_def = TransformGraph(frozen_graph_def, input_names, + output_names, transforms) + return transformed_graph_def + + +def export_tflite_graph(pipeline_config, + trained_checkpoint_prefix, + output_dir, + add_postprocessing_op, + max_detections, + max_classes_per_detection, + detections_per_class=100, + use_regular_nms=False, + binary_graph_name='tflite_graph.pb', + txt_graph_name='tflite_graph.pbtxt', + additional_output_tensors=()): + """Exports a tflite compatible graph and anchors for ssd detection model. + + Anchors are written to a tensor and tflite compatible graph + is written to output_dir/tflite_graph.pb. + + Args: + pipeline_config: a pipeline.proto object containing the configuration for + SSD model to export. + trained_checkpoint_prefix: a file prefix for the checkpoint containing the + trained parameters of the SSD model. + output_dir: A directory to write the tflite graph and anchor file to. + add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a + TFLite_Detection_PostProcess custom op + max_detections: Maximum number of detections (boxes) to show + max_classes_per_detection: Number of classes to display per detection + detections_per_class: In regular NonMaxSuppression, number of anchors used + for NonMaxSuppression per class + use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of + Fast NMS. + binary_graph_name: Name of the exported graph file in binary format. + txt_graph_name: Name of the exported graph file in text format. + additional_output_tensors: Array of additional tensor names to output. + Additional tensors are appended to the end of output tensor list. + + Raises: + ValueError: if the pipeline config contains models other than ssd or uses an + fixed_shape_resizer and provides a shape as well. + """ + tf.gfile.MakeDirs(output_dir) + if pipeline_config.model.WhichOneof('model') != 'ssd': + raise ValueError('Only ssd models are supported in tflite. ' + 'Found {} in config'.format( + pipeline_config.model.WhichOneof('model'))) + + num_classes = pipeline_config.model.ssd.num_classes + nms_score_threshold = { + pipeline_config.model.ssd.post_processing.batch_non_max_suppression + .score_threshold + } + nms_iou_threshold = { + pipeline_config.model.ssd.post_processing.batch_non_max_suppression + .iou_threshold + } + scale_values = {} + scale_values['y_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale + } + scale_values['x_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale + } + scale_values['h_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale + } + scale_values['w_scale'] = { + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale + } + + image_resizer_config = pipeline_config.model.ssd.image_resizer + image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') + num_channels = _DEFAULT_NUM_CHANNELS + if image_resizer == 'fixed_shape_resizer': + height = image_resizer_config.fixed_shape_resizer.height + width = image_resizer_config.fixed_shape_resizer.width + if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: + num_channels = 1 + shape = [1, height, width, num_channels] + else: + raise ValueError( + 'Only fixed_shape_resizer' + 'is supported with tflite. Found {}'.format( + image_resizer_config.WhichOneof('image_resizer_oneof'))) + + image = tf.placeholder( + tf.float32, shape=shape, name='normalized_input_image_tensor') + + detection_model = model_builder.build( + pipeline_config.model, is_training=False) + predicted_tensors = detection_model.predict(image, true_image_shapes=None) + # The score conversion occurs before the post-processing custom op + _, score_conversion_fn = post_processing_builder.build( + pipeline_config.model.ssd.post_processing) + class_predictions = score_conversion_fn( + predicted_tensors['class_predictions_with_background']) + + with tf.name_scope('raw_outputs'): + # 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] + # containing the encoded box predictions. Note that these are raw + # predictions and no Non-Max suppression is applied on them and + # no decode center size boxes is applied to them. + tf.identity(predicted_tensors['box_encodings'], name='box_encodings') + # 'raw_outputs/class_predictions': a float32 tensor of shape + # [1, num_anchors, num_classes] containing the class scores for each anchor + # after applying score conversion. + tf.identity(class_predictions, name='class_predictions') + # 'anchors': a float32 tensor of shape + # [4, num_anchors] containing the anchors as a constant node. + tf.identity( + get_const_center_size_encoded_anchors(predicted_tensors['anchors']), + name='anchors') + + # Add global step to the graph, so we know the training step number when we + # evaluate the model. + tf.train.get_or_create_global_step() + + # graph rewriter + is_quantized = pipeline_config.HasField('graph_rewriter') + if is_quantized: + graph_rewriter_config = pipeline_config.graph_rewriter + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + if pipeline_config.model.ssd.feature_extractor.HasField('fpn'): + exporter.rewrite_nn_resize_op(is_quantized) + + # freeze the graph + saver_kwargs = {} + if pipeline_config.eval_config.use_moving_averages: + saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 + moving_average_checkpoint = tempfile.NamedTemporaryFile() + exporter.replace_variable_values_with_moving_averages( + tf.get_default_graph(), trained_checkpoint_prefix, + moving_average_checkpoint.name) + checkpoint_to_use = moving_average_checkpoint.name + else: + checkpoint_to_use = trained_checkpoint_prefix + + saver = tf.train.Saver(**saver_kwargs) + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_to_use, + output_node_names=','.join([ + 'raw_outputs/box_encodings', 'raw_outputs/class_predictions', + 'anchors' + ] + list(additional_output_tensors)), + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + clear_devices=True, + output_graph='', + initializer_nodes='') + + # Add new operation to do post processing in a custom op (TF Lite only) + if add_postprocessing_op: + transformed_graph_def = append_postprocessing_op( + frozen_graph_def, + max_detections, + max_classes_per_detection, + nms_score_threshold, + nms_iou_threshold, + num_classes, + scale_values, + detections_per_class, + use_regular_nms, + additional_output_tensors=additional_output_tensors) + else: + # Return frozen without adding post-processing custom op + transformed_graph_def = frozen_graph_def + + binary_graph = os.path.join(output_dir, binary_graph_name) + with tf.gfile.GFile(binary_graph, 'wb') as f: + f.write(transformed_graph_def.SerializeToString()) + txt_graph = os.path.join(output_dir, txt_graph_name) + with tf.gfile.GFile(txt_graph, 'w') as f: + f.write(str(transformed_graph_def)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph_lib_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph_lib_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3625b9f651c157f52f690b1c9adf1e7ce19f2b94 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/export_tflite_ssd_graph_lib_tf1_test.py @@ -0,0 +1,426 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.export_tflite_ssd_graph.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from tensorflow.core.framework import types_pb2 +from object_detection import export_tflite_ssd_graph_lib +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import graph_rewriter_pb2 +from object_detection.protos import pipeline_pb2 +from object_detection.protos import post_processing_pb2 +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top + +if six.PY2: + import mock +else: + from unittest import mock # pylint: disable=g-importing-member +# pylint: enable=g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self, add_detection_masks=False): + self._add_detection_masks = add_detection_masks + + def preprocess(self, inputs): + pass + + def predict(self, preprocessed_inputs, true_image_shapes): + features = slim.conv2d(preprocessed_inputs, 3, 1) + with tf.control_dependencies([features]): + prediction_tensors = { + 'box_encodings': + tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], + tf.float32), + 'class_predictions_with_background': + tf.constant([[[0.7, 0.6], [0.9, 0.0]]], tf.float32), + } + with tf.control_dependencies( + [tf.convert_to_tensor(features.get_shape().as_list()[1:3])]): + prediction_tensors['anchors'] = tf.constant( + [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32) + return prediction_tensors + + def postprocess(self, prediction_tensors, true_image_shapes): + pass + + def restore_map(self, checkpoint_path, from_detection_checkpoint): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ExportTfliteGraphTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, + checkpoint_path, + use_moving_averages, + quantize=False, + num_channels=3): + g = tf.Graph() + with g.as_default(): + mock_model = FakeModel() + inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, num_channels]) + mock_model.predict(inputs, true_image_shapes=None) + if use_moving_averages: + tf.train.ExponentialMovingAverage(0.0).apply() + tf.train.get_or_create_global_step() + if quantize: + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + saver = tf.train.Saver() + init = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init) + saver.save(sess, checkpoint_path) + + def _assert_quant_vars_exists(self, tflite_graph_file): + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_string = f.read() + print(graph_string) + self.assertIn(six.ensure_binary('quant'), graph_string) + + def _import_graph_and_run_inference(self, tflite_graph_file, num_channels=3): + """Imports a tflite graph, runs single inference and returns outputs.""" + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name='') + input_tensor = graph.get_tensor_by_name('normalized_input_image_tensor:0') + box_encodings = graph.get_tensor_by_name('raw_outputs/box_encodings:0') + class_predictions = graph.get_tensor_by_name( + 'raw_outputs/class_predictions:0') + with self.test_session(graph) as sess: + [box_encodings_np, class_predictions_np] = sess.run( + [box_encodings, class_predictions], + feed_dict={input_tensor: np.random.rand(1, 10, 10, num_channels)}) + return box_encodings_np, class_predictions_np + + def _export_graph(self, + pipeline_config, + num_channels=3, + additional_output_tensors=()): + """Exports a tflite graph.""" + output_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt') + tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb') + + quantize = pipeline_config.HasField('graph_rewriter') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, + use_moving_averages=pipeline_config.eval_config.use_moving_averages, + quantize=quantize, + num_channels=num_channels) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + + with tf.Graph().as_default(): + tf.identity( + tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor') + export_tflite_ssd_graph_lib.export_tflite_graph( + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_dir=output_dir, + add_postprocessing_op=False, + max_detections=10, + max_classes_per_detection=1, + additional_output_tensors=additional_output_tensors) + return tflite_graph_file + + def _export_graph_with_postprocessing_op(self, + pipeline_config, + num_channels=3, + additional_output_tensors=()): + """Exports a tflite graph with custom postprocessing op.""" + output_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt') + tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb') + + quantize = pipeline_config.HasField('graph_rewriter') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, + use_moving_averages=pipeline_config.eval_config.use_moving_averages, + quantize=quantize, + num_channels=num_channels) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + + with tf.Graph().as_default(): + tf.identity( + tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor') + export_tflite_ssd_graph_lib.export_tflite_graph( + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_dir=output_dir, + add_postprocessing_op=True, + max_detections=10, + max_classes_per_detection=1, + additional_output_tensors=additional_output_tensors) + return tflite_graph_file + + def test_export_tflite_graph_with_moving_averages(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = True + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_without_moving_averages(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_grayscale(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + (pipeline_config.model.ssd.image_resizer.fixed_shape_resizer + ).convert_to_grayscale = True + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config, num_channels=1) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, + class_predictions_np) = self._import_graph_and_run_inference( + tflite_graph_file, num_channels=1) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_with_quantization(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.graph_rewriter.quantization.delay = 500000 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + self._assert_quant_vars_exists(tflite_graph_file) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) + + def test_export_tflite_graph_with_softmax_score_conversion(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SOFTMAX) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, + [[[0.524979, 0.475021], [0.710949, 0.28905]]]) + + def test_export_tflite_graph_with_sigmoid_score_conversion(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SIGMOID) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph(pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + (box_encodings_np, class_predictions_np + ) = self._import_graph_and_run_inference(tflite_graph_file) + self.assertAllClose(box_encodings_np, + [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) + self.assertAllClose(class_predictions_np, + [[[0.668188, 0.645656], [0.710949, 0.5]]]) + + def test_export_tflite_graph_with_postprocessing_op(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SIGMOID) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 + pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + all_op_names = [node.name for node in graph_def.node] + self.assertIn('TFLite_Detection_PostProcess', all_op_names) + self.assertNotIn('UnattachedTensor', all_op_names) + for node in graph_def.node: + if node.name == 'TFLite_Detection_PostProcess': + self.assertTrue(node.attr['_output_quantized'].b) + self.assertTrue( + node.attr['_support_output_type_float_in_quantized_op'].b) + self.assertEqual(node.attr['y_scale'].f, 10.0) + self.assertEqual(node.attr['x_scale'].f, 10.0) + self.assertEqual(node.attr['h_scale'].f, 5.0) + self.assertEqual(node.attr['w_scale'].f, 5.0) + self.assertEqual(node.attr['num_classes'].i, 2) + self.assertTrue( + all([ + t == types_pb2.DT_FLOAT + for t in node.attr['_output_types'].list.type + ])) + + def test_export_tflite_graph_with_additional_tensors(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + tflite_graph_file = self._export_graph( + pipeline_config, additional_output_tensors=['UnattachedTensor']) + self.assertTrue(os.path.exists(tflite_graph_file)) + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + all_op_names = [node.name for node in graph_def.node] + self.assertIn('UnattachedTensor', all_op_names) + + def test_export_tflite_graph_with_postprocess_op_and_additional_tensors(self): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + pipeline_config.model.ssd.post_processing.score_converter = ( + post_processing_pb2.PostProcessing.SIGMOID) + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.num_classes = 2 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config, additional_output_tensors=['UnattachedTensor']) + self.assertTrue(os.path.exists(tflite_graph_file)) + graph = tf.Graph() + with graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.Open(tflite_graph_file, mode='rb') as f: + graph_def.ParseFromString(f.read()) + all_op_names = [node.name for node in graph_def.node] + self.assertIn('TFLite_Detection_PostProcess', all_op_names) + self.assertIn('UnattachedTensor', all_op_names) + + @mock.patch.object(exporter, 'rewrite_nn_resize_op') + def test_export_with_nn_resize_op_not_called_without_fpn(self, mock_get): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + mock_get.assert_not_called() + + @mock.patch.object(exporter, 'rewrite_nn_resize_op') + def test_export_with_nn_resize_op_called_with_fpn(self, mock_get): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 + pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 + pipeline_config.model.ssd.feature_extractor.fpn.min_level = 3 + pipeline_config.model.ssd.feature_extractor.fpn.max_level = 7 + tflite_graph_file = self._export_graph_with_postprocessing_op( + pipeline_config) + self.assertTrue(os.path.exists(tflite_graph_file)) + self.assertEqual(1, mock_get.call_count) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter.py new file mode 100644 index 0000000000000000000000000000000000000000..61c5f7f22db46c88c8bc5c1803b281da4c020967 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter.py @@ -0,0 +1,656 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to export object detection inference graph.""" +import os +import tempfile +import tensorflow.compat.v1 as tf +import tf_slim as slim +from tensorflow.core.protobuf import saver_pb2 +from tensorflow.python.tools import freeze_graph # pylint: disable=g-direct-tensorflow-import +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.utils import config_util +from object_detection.utils import shape_utils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import tfprof as contrib_tfprof + from tensorflow.contrib.quantize.python import graph_matcher +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +freeze_graph_with_def_protos = freeze_graph.freeze_graph_with_def_protos + + +def parse_side_inputs(side_input_shapes_string, side_input_names_string, + side_input_types_string): + """Parses side input flags. + + Args: + side_input_shapes_string: The shape of the side input tensors, provided as a + comma-separated list of integers. A value of -1 is used for unknown + dimensions. A `/` denotes a break, starting the shape of the next side + input tensor. + side_input_names_string: The names of the side input tensors, provided as a + comma-separated list of strings. + side_input_types_string: The type of the side input tensors, provided as a + comma-separated list of types, each of `string`, `integer`, or `float`. + + Returns: + side_input_shapes: A list of shapes. + side_input_names: A list of strings. + side_input_types: A list of tensorflow dtypes. + + """ + if side_input_shapes_string: + side_input_shapes = [] + for side_input_shape_list in side_input_shapes_string.split('/'): + side_input_shape = [ + int(dim) if dim != '-1' else None + for dim in side_input_shape_list.split(',') + ] + side_input_shapes.append(side_input_shape) + else: + raise ValueError('When using side_inputs, side_input_shapes must be ' + 'specified in the input flags.') + if side_input_names_string: + side_input_names = list(side_input_names_string.split(',')) + else: + raise ValueError('When using side_inputs, side_input_names must be ' + 'specified in the input flags.') + if side_input_types_string: + typelookup = {'float': tf.float32, 'int': tf.int32, 'string': tf.string} + side_input_types = [ + typelookup[side_input_type] + for side_input_type in side_input_types_string.split(',') + ] + else: + raise ValueError('When using side_inputs, side_input_types must be ' + 'specified in the input flags.') + return side_input_shapes, side_input_names, side_input_types + + +def rewrite_nn_resize_op(is_quantized=False): + """Replaces a custom nearest-neighbor resize op with the Tensorflow version. + + Some graphs use this custom version for TPU-compatibility. + + Args: + is_quantized: True if the default graph is quantized. + """ + def remove_nn(): + """Remove nearest neighbor upsampling structures and replace with TF op.""" + input_pattern = graph_matcher.OpTypePattern( + 'FakeQuantWithMinMaxVars' if is_quantized else '*') + stack_1_pattern = graph_matcher.OpTypePattern( + 'Pack', inputs=[input_pattern, input_pattern], ordered_inputs=False) + stack_2_pattern = graph_matcher.OpTypePattern( + 'Pack', inputs=[stack_1_pattern, stack_1_pattern], ordered_inputs=False) + reshape_pattern = graph_matcher.OpTypePattern( + 'Reshape', inputs=[stack_2_pattern, 'Const'], ordered_inputs=False) + consumer_pattern1 = graph_matcher.OpTypePattern( + 'Add|AddV2|Max|Mul', inputs=[reshape_pattern, '*'], + ordered_inputs=False) + consumer_pattern2 = graph_matcher.OpTypePattern( + 'StridedSlice', inputs=[reshape_pattern, '*', '*', '*'], + ordered_inputs=False) + + def replace_matches(consumer_pattern): + """Search for nearest neighbor pattern and replace with TF op.""" + match_counter = 0 + matcher = graph_matcher.GraphMatcher(consumer_pattern) + for match in matcher.match_graph(tf.get_default_graph()): + match_counter += 1 + projection_op = match.get_op(input_pattern) + reshape_op = match.get_op(reshape_pattern) + consumer_op = match.get_op(consumer_pattern) + nn_resize = tf.image.resize_nearest_neighbor( + projection_op.outputs[0], + reshape_op.outputs[0].shape.dims[1:3], + align_corners=False, + name=os.path.split(reshape_op.name)[0] + '/resize_nearest_neighbor') + + for index, op_input in enumerate(consumer_op.inputs): + if op_input == reshape_op.outputs[0]: + consumer_op._update_input(index, nn_resize) # pylint: disable=protected-access + break + + return match_counter + + match_counter = replace_matches(consumer_pattern1) + match_counter += replace_matches(consumer_pattern2) + + tf.logging.info('Found and fixed {} matches'.format(match_counter)) + return match_counter + + # Applying twice because both inputs to Add could be NN pattern + total_removals = 0 + while remove_nn(): + total_removals += 1 + # This number is chosen based on the nas-fpn architecture. + if total_removals > 4: + raise ValueError('Graph removal encountered a infinite loop.') + + +def replace_variable_values_with_moving_averages(graph, + current_checkpoint_file, + new_checkpoint_file, + no_ema_collection=None): + """Replaces variable values in the checkpoint with their moving averages. + + If the current checkpoint has shadow variables maintaining moving averages of + the variables defined in the graph, this function generates a new checkpoint + where the variables contain the values of their moving averages. + + Args: + graph: a tf.Graph object. + current_checkpoint_file: a checkpoint containing both original variables and + their moving averages. + new_checkpoint_file: file path to write a new checkpoint. + no_ema_collection: A list of namescope substrings to match the variables + to eliminate EMA. + """ + with graph.as_default(): + variable_averages = tf.train.ExponentialMovingAverage(0.0) + ema_variables_to_restore = variable_averages.variables_to_restore() + ema_variables_to_restore = config_util.remove_unecessary_ema( + ema_variables_to_restore, no_ema_collection) + with tf.Session() as sess: + read_saver = tf.train.Saver(ema_variables_to_restore) + read_saver.restore(sess, current_checkpoint_file) + write_saver = tf.train.Saver() + write_saver.save(sess, new_checkpoint_file) + + +def _image_tensor_input_placeholder(input_shape=None): + """Returns input placeholder and a 4-D uint8 image tensor.""" + if input_shape is None: + input_shape = (None, None, None, 3) + input_tensor = tf.placeholder( + dtype=tf.uint8, shape=input_shape, name='image_tensor') + return input_tensor, input_tensor + + +def _side_input_tensor_placeholder(side_input_shape, side_input_name, + side_input_type): + """Returns side input placeholder and side input tensor.""" + side_input_tensor = tf.placeholder( + dtype=side_input_type, shape=side_input_shape, name=side_input_name) + return side_input_tensor, side_input_tensor + + +def _tf_example_input_placeholder(input_shape=None): + """Returns input that accepts a batch of strings with tf examples. + + Args: + input_shape: the shape to resize the output decoded images to (optional). + + Returns: + a tuple of input placeholder and the output decoded images. + """ + batch_tf_example_placeholder = tf.placeholder( + tf.string, shape=[None], name='tf_example') + def decode(tf_example_string_tensor): + tensor_dict = tf_example_decoder.TfExampleDecoder().decode( + tf_example_string_tensor) + image_tensor = tensor_dict[fields.InputDataFields.image] + if input_shape is not None: + image_tensor = tf.image.resize(image_tensor, input_shape[1:3]) + return image_tensor + return (batch_tf_example_placeholder, + shape_utils.static_or_dynamic_map_fn( + decode, + elems=batch_tf_example_placeholder, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False)) + + +def _encoded_image_string_tensor_input_placeholder(input_shape=None): + """Returns input that accepts a batch of PNG or JPEG strings. + + Args: + input_shape: the shape to resize the output decoded images to (optional). + + Returns: + a tuple of input placeholder and the output decoded images. + """ + batch_image_str_placeholder = tf.placeholder( + dtype=tf.string, + shape=[None], + name='encoded_image_string_tensor') + def decode(encoded_image_string_tensor): + image_tensor = tf.image.decode_image(encoded_image_string_tensor, + channels=3) + image_tensor.set_shape((None, None, 3)) + if input_shape is not None: + image_tensor = tf.image.resize(image_tensor, input_shape[1:3]) + return image_tensor + return (batch_image_str_placeholder, + tf.map_fn( + decode, + elems=batch_image_str_placeholder, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False)) + + +input_placeholder_fn_map = { + 'image_tensor': _image_tensor_input_placeholder, + 'encoded_image_string_tensor': + _encoded_image_string_tensor_input_placeholder, + 'tf_example': _tf_example_input_placeholder +} + + +def add_output_tensor_nodes(postprocessed_tensors, + output_collection_name='inference_op'): + """Adds output nodes for detection boxes and scores. + + Adds the following nodes for output tensors - + * num_detections: float32 tensor of shape [batch_size]. + * detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4] + containing detected boxes. + * detection_scores: float32 tensor of shape [batch_size, num_boxes] + containing scores for the detected boxes. + * detection_multiclass_scores: (Optional) float32 tensor of shape + [batch_size, num_boxes, num_classes_with_background] for containing class + score distribution for detected boxes including background if any. + * detection_features: (Optional) float32 tensor of shape + [batch, num_boxes, roi_height, roi_width, depth] + containing classifier features + for each detected box + * detection_classes: float32 tensor of shape [batch_size, num_boxes] + containing class predictions for the detected boxes. + * detection_keypoints: (Optional) float32 tensor of shape + [batch_size, num_boxes, num_keypoints, 2] containing keypoints for each + detection box. + * detection_masks: (Optional) float32 tensor of shape + [batch_size, num_boxes, mask_height, mask_width] containing masks for each + detection box. + + Args: + postprocessed_tensors: a dictionary containing the following fields + 'detection_boxes': [batch, max_detections, 4] + 'detection_scores': [batch, max_detections] + 'detection_multiclass_scores': [batch, max_detections, + num_classes_with_background] + 'detection_features': [batch, num_boxes, roi_height, roi_width, depth] + 'detection_classes': [batch, max_detections] + 'detection_masks': [batch, max_detections, mask_height, mask_width] + (optional). + 'detection_keypoints': [batch, max_detections, num_keypoints, 2] + (optional). + 'num_detections': [batch] + output_collection_name: Name of collection to add output tensors to. + + Returns: + A tensor dict containing the added output tensor nodes. + """ + detection_fields = fields.DetectionResultFields + label_id_offset = 1 + boxes = postprocessed_tensors.get(detection_fields.detection_boxes) + scores = postprocessed_tensors.get(detection_fields.detection_scores) + multiclass_scores = postprocessed_tensors.get( + detection_fields.detection_multiclass_scores) + box_classifier_features = postprocessed_tensors.get( + detection_fields.detection_features) + raw_boxes = postprocessed_tensors.get(detection_fields.raw_detection_boxes) + raw_scores = postprocessed_tensors.get(detection_fields.raw_detection_scores) + classes = postprocessed_tensors.get( + detection_fields.detection_classes) + label_id_offset + keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) + masks = postprocessed_tensors.get(detection_fields.detection_masks) + num_detections = postprocessed_tensors.get(detection_fields.num_detections) + outputs = {} + outputs[detection_fields.detection_boxes] = tf.identity( + boxes, name=detection_fields.detection_boxes) + outputs[detection_fields.detection_scores] = tf.identity( + scores, name=detection_fields.detection_scores) + if multiclass_scores is not None: + outputs[detection_fields.detection_multiclass_scores] = tf.identity( + multiclass_scores, name=detection_fields.detection_multiclass_scores) + if box_classifier_features is not None: + outputs[detection_fields.detection_features] = tf.identity( + box_classifier_features, + name=detection_fields.detection_features) + outputs[detection_fields.detection_classes] = tf.identity( + classes, name=detection_fields.detection_classes) + outputs[detection_fields.num_detections] = tf.identity( + num_detections, name=detection_fields.num_detections) + if raw_boxes is not None: + outputs[detection_fields.raw_detection_boxes] = tf.identity( + raw_boxes, name=detection_fields.raw_detection_boxes) + if raw_scores is not None: + outputs[detection_fields.raw_detection_scores] = tf.identity( + raw_scores, name=detection_fields.raw_detection_scores) + if keypoints is not None: + outputs[detection_fields.detection_keypoints] = tf.identity( + keypoints, name=detection_fields.detection_keypoints) + if masks is not None: + outputs[detection_fields.detection_masks] = tf.identity( + masks, name=detection_fields.detection_masks) + for output_key in outputs: + tf.add_to_collection(output_collection_name, outputs[output_key]) + + return outputs + + +def write_saved_model(saved_model_path, + frozen_graph_def, + inputs, + outputs): + """Writes SavedModel to disk. + + If checkpoint_path is not None bakes the weights into the graph thereby + eliminating the need of checkpoint files during inference. If the model + was trained with moving averages, setting use_moving_averages to true + restores the moving averages, otherwise the original set of variables + is restored. + + Args: + saved_model_path: Path to write SavedModel. + frozen_graph_def: tf.GraphDef holding frozen graph. + inputs: A tensor dictionary containing the inputs to a DetectionModel. + outputs: A tensor dictionary containing the outputs of a DetectionModel. + """ + with tf.Graph().as_default(): + with tf.Session() as sess: + + tf.import_graph_def(frozen_graph_def, name='') + + builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path) + + tensor_info_inputs = {} + if isinstance(inputs, dict): + for k, v in inputs.items(): + tensor_info_inputs[k] = tf.saved_model.utils.build_tensor_info(v) + else: + tensor_info_inputs['inputs'] = tf.saved_model.utils.build_tensor_info( + inputs) + tensor_info_outputs = {} + for k, v in outputs.items(): + tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v) + + detection_signature = ( + tf.saved_model.signature_def_utils.build_signature_def( + inputs=tensor_info_inputs, + outputs=tensor_info_outputs, + method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME + )) + + builder.add_meta_graph_and_variables( + sess, + [tf.saved_model.tag_constants.SERVING], + signature_def_map={ + tf.saved_model.signature_constants + .DEFAULT_SERVING_SIGNATURE_DEF_KEY: + detection_signature, + }, + ) + builder.save() + + +def write_graph_and_checkpoint(inference_graph_def, + model_path, + input_saver_def, + trained_checkpoint_prefix): + """Writes the graph and the checkpoint into disk.""" + for node in inference_graph_def.node: + node.device = '' + with tf.Graph().as_default(): + tf.import_graph_def(inference_graph_def, name='') + with tf.Session() as sess: + saver = tf.train.Saver( + saver_def=input_saver_def, save_relative_paths=True) + saver.restore(sess, trained_checkpoint_prefix) + saver.save(sess, model_path) + + +def _get_outputs_from_inputs(input_tensors, detection_model, + output_collection_name, **side_inputs): + inputs = tf.cast(input_tensors, dtype=tf.float32) + preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) + output_tensors = detection_model.predict( + preprocessed_inputs, true_image_shapes, **side_inputs) + postprocessed_tensors = detection_model.postprocess( + output_tensors, true_image_shapes) + return add_output_tensor_nodes(postprocessed_tensors, + output_collection_name) + + +def build_detection_graph(input_type, detection_model, input_shape, + output_collection_name, graph_hook_fn, + use_side_inputs=False, side_input_shapes=None, + side_input_names=None, side_input_types=None): + """Build the detection graph.""" + if input_type not in input_placeholder_fn_map: + raise ValueError('Unknown input type: {}'.format(input_type)) + placeholder_args = {} + side_inputs = {} + if input_shape is not None: + if (input_type != 'image_tensor' and + input_type != 'encoded_image_string_tensor' and + input_type != 'tf_example' and + input_type != 'tf_sequence_example'): + raise ValueError('Can only specify input shape for `image_tensor`, ' + '`encoded_image_string_tensor`, `tf_example`, ' + ' or `tf_sequence_example` inputs.') + placeholder_args['input_shape'] = input_shape + placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type]( + **placeholder_args) + placeholder_tensors = {'inputs': placeholder_tensor} + if use_side_inputs: + for idx, side_input_name in enumerate(side_input_names): + side_input_placeholder, side_input = _side_input_tensor_placeholder( + side_input_shapes[idx], side_input_name, side_input_types[idx]) + print(side_input) + side_inputs[side_input_name] = side_input + placeholder_tensors[side_input_name] = side_input_placeholder + outputs = _get_outputs_from_inputs( + input_tensors=input_tensors, + detection_model=detection_model, + output_collection_name=output_collection_name, + **side_inputs) + + # Add global step to the graph. + slim.get_or_create_global_step() + + if graph_hook_fn: graph_hook_fn() + + return outputs, placeholder_tensors + + +def _export_inference_graph(input_type, + detection_model, + use_moving_averages, + trained_checkpoint_prefix, + output_directory, + additional_output_tensor_names=None, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None, + write_inference_graph=False, + temp_checkpoint_prefix='', + use_side_inputs=False, + side_input_shapes=None, + side_input_names=None, + side_input_types=None): + """Export helper.""" + tf.gfile.MakeDirs(output_directory) + frozen_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + saved_model_path = os.path.join(output_directory, 'saved_model') + model_path = os.path.join(output_directory, 'model.ckpt') + + outputs, placeholder_tensor_dict = build_detection_graph( + input_type=input_type, + detection_model=detection_model, + input_shape=input_shape, + output_collection_name=output_collection_name, + graph_hook_fn=graph_hook_fn, + use_side_inputs=use_side_inputs, + side_input_shapes=side_input_shapes, + side_input_names=side_input_names, + side_input_types=side_input_types) + + profile_inference_graph(tf.get_default_graph()) + saver_kwargs = {} + if use_moving_averages: + if not temp_checkpoint_prefix: + # This check is to be compatible with both version of SaverDef. + if os.path.isfile(trained_checkpoint_prefix): + saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 + temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name + else: + temp_checkpoint_prefix = tempfile.mkdtemp() + replace_variable_values_with_moving_averages( + tf.get_default_graph(), trained_checkpoint_prefix, + temp_checkpoint_prefix) + checkpoint_to_use = temp_checkpoint_prefix + else: + checkpoint_to_use = trained_checkpoint_prefix + + saver = tf.train.Saver(**saver_kwargs) + input_saver_def = saver.as_saver_def() + + write_graph_and_checkpoint( + inference_graph_def=tf.get_default_graph().as_graph_def(), + model_path=model_path, + input_saver_def=input_saver_def, + trained_checkpoint_prefix=checkpoint_to_use) + if write_inference_graph: + inference_graph_def = tf.get_default_graph().as_graph_def() + inference_graph_path = os.path.join(output_directory, + 'inference_graph.pbtxt') + for node in inference_graph_def.node: + node.device = '' + with tf.gfile.GFile(inference_graph_path, 'wb') as f: + f.write(str(inference_graph_def)) + + if additional_output_tensor_names is not None: + output_node_names = ','.join(list(outputs.keys())+( + additional_output_tensor_names)) + else: + output_node_names = ','.join(outputs.keys()) + + frozen_graph_def = freeze_graph.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=checkpoint_to_use, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph=frozen_graph_path, + clear_devices=True, + initializer_nodes='') + + write_saved_model(saved_model_path, frozen_graph_def, + placeholder_tensor_dict, outputs) + + +def export_inference_graph(input_type, + pipeline_config, + trained_checkpoint_prefix, + output_directory, + input_shape=None, + output_collection_name='inference_op', + additional_output_tensor_names=None, + write_inference_graph=False, + use_side_inputs=False, + side_input_shapes=None, + side_input_names=None, + side_input_types=None): + """Exports inference graph for the model specified in the pipeline config. + + Args: + input_type: Type of input for the graph. Can be one of ['image_tensor', + 'encoded_image_string_tensor', 'tf_example']. + pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. + trained_checkpoint_prefix: Path to the trained checkpoint file. + output_directory: Path to write outputs. + input_shape: Sets a fixed shape for an `image_tensor` input. If not + specified, will default to [None, None, None, 3]. + output_collection_name: Name of collection to add output tensors to. + If None, does not add output tensors to a collection. + additional_output_tensor_names: list of additional output + tensors to include in the frozen graph. + write_inference_graph: If true, writes inference graph to disk. + use_side_inputs: If True, the model requires side_inputs. + side_input_shapes: List of shapes of the side input tensors, + required if use_side_inputs is True. + side_input_names: List of names of the side input tensors, + required if use_side_inputs is True. + side_input_types: List of types of the side input tensors, + required if use_side_inputs is True. + """ + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + graph_rewriter_fn = None + if pipeline_config.HasField('graph_rewriter'): + graph_rewriter_config = pipeline_config.graph_rewriter + graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config, + is_training=False) + _export_inference_graph( + input_type, + detection_model, + pipeline_config.eval_config.use_moving_averages, + trained_checkpoint_prefix, + output_directory, + additional_output_tensor_names, + input_shape, + output_collection_name, + graph_hook_fn=graph_rewriter_fn, + write_inference_graph=write_inference_graph, + use_side_inputs=use_side_inputs, + side_input_shapes=side_input_shapes, + side_input_names=side_input_names, + side_input_types=side_input_types) + pipeline_config.eval_config.use_moving_averages = False + config_util.save_pipeline_config(pipeline_config, output_directory) + + +def profile_inference_graph(graph): + """Profiles the inference graph. + + Prints model parameters and computation FLOPs given an inference graph. + BatchNorms are excluded from the parameter count due to the fact that + BatchNorms are usually folded. BatchNorm, Initializer, Regularizer + and BiasAdd are not considered in FLOP count. + + Args: + graph: the inference graph. + """ + tfprof_vars_option = ( + contrib_tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS) + tfprof_flops_option = contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS + + # Batchnorm is usually folded during inference. + tfprof_vars_option['trim_name_regexes'] = ['.*BatchNorm.*'] + # Initializer and Regularizer are only used in training. + tfprof_flops_option['trim_name_regexes'] = [ + '.*BatchNorm.*', '.*Initializer.*', '.*Regularizer.*', '.*BiasAdd.*' + ] + + contrib_tfprof.model_analyzer.print_model_analysis( + graph, tfprof_options=tfprof_vars_option) + + contrib_tfprof.model_analyzer.print_model_analysis( + graph, tfprof_options=tfprof_flops_option) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_lib_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_lib_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8e85e1124bca40957464b5c80acb6a24ea7fcc3d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_lib_tf2_test.py @@ -0,0 +1,297 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test for exporter_lib_v2.py.""" + +from __future__ import division +import io +import os +import unittest +from absl.testing import parameterized +import numpy as np +from PIL import Image +import six + +import tensorflow.compat.v2 as tf + +from object_detection import exporter_lib_v2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.protos import pipeline_pb2 +from object_detection.utils import dataset_util +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-importing-member,g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self, conv_weight_scalar=1.0): + super(FakeModel, self).__init__(num_classes=2) + self._conv = tf.keras.layers.Conv2D( + filters=1, kernel_size=1, strides=(1, 1), padding='valid', + kernel_initializer=tf.keras.initializers.Constant( + value=conv_weight_scalar)) + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): + return_dict = {'image': self._conv(preprocessed_inputs)} + if 'side_inp_1' in side_inputs: + return_dict['image'] += side_inputs['side_inp_1'] + return return_dict + + def postprocess(self, prediction_dict, true_image_shapes): + predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) + with tf.control_dependencies(list(prediction_dict.values())): + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]], tf.float32), + 'detection_scores': predict_tensor_sum + tf.constant( + [[0.7, 0.6], [0.9, 0.0]], tf.float32), + 'detection_classes': tf.constant([[0, 1], + [1, 0]], tf.float32), + 'num_detections': tf.constant([2, 1], tf.float32), + } + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): + + def _save_checkpoint_from_mock_model( + self, checkpoint_dir, conv_weight_scalar=6.0): + mock_model = FakeModel(conv_weight_scalar) + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_dir, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + @parameterized.parameters( + {'input_type': 'image_tensor'}, + {'input_type': 'encoded_image_string_tensor'}, + {'input_type': 'tf_example'}, + ) + def test_export_yields_correct_directory_structure( + self, input_type='image_tensor'): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type=input_type, + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'variables', 'variables.index'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'variables', + 'variables.data-00000-of-00001'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'checkpoint', 'ckpt-0.index'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001'))) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'pipeline.config'))) + + def get_dummy_input(self, input_type): + """Get dummy input for the given input type.""" + + if input_type == 'image_tensor': + return np.zeros((1, 20, 20, 3), dtype=np.uint8) + if input_type == 'float_image_tensor': + return np.zeros((1, 20, 20, 3), dtype=np.float32) + elif input_type == 'encoded_image_string_tensor': + image = Image.new('RGB', (20, 20)) + byte_io = io.BytesIO() + image.save(byte_io, 'PNG') + return [byte_io.getvalue()] + elif input_type == 'tf_example': + image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8) + encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy() + example = tf.train.Example( + features=tf.train.Features( + feature={ + 'image/encoded': + dataset_util.bytes_feature(encoded_jpeg), + 'image/format': + dataset_util.bytes_feature(six.b('jpeg')), + 'image/source_id': + dataset_util.bytes_feature(six.b('image_id')), + })).SerializeToString() + return [example] + + @parameterized.parameters( + {'input_type': 'image_tensor'}, + {'input_type': 'encoded_image_string_tensor'}, + {'input_type': 'tf_example'}, + {'input_type': 'float_image_tensor'}, + ) + def test_export_saved_model_and_run_inference( + self, input_type='image_tensor'): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type=input_type, + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + + saved_model_path = os.path.join(output_directory, 'saved_model') + detect_fn = tf.saved_model.load(saved_model_path) + image = self.get_dummy_input(input_type) + detections = detect_fn(tf.constant(image)) + + detection_fields = fields.DetectionResultFields + self.assertAllClose(detections[detection_fields.detection_boxes], + [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(detections[detection_fields.detection_scores], + [[0.7, 0.6], [0.9, 0.0]]) + self.assertAllClose(detections[detection_fields.detection_classes], + [[1, 2], [2, 1]]) + self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) + + @parameterized.parameters( + {'use_default_serving': True}, + {'use_default_serving': False} + ) + def test_export_saved_model_and_run_inference_with_side_inputs( + self, input_type='image_tensor', use_default_serving=True): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type=input_type, + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory, + use_side_inputs=True, + side_input_shapes='1/2,2', + side_input_names='side_inp_1,side_inp_2', + side_input_types='tf.float32,tf.uint8') + + saved_model_path = os.path.join(output_directory, 'saved_model') + detect_fn = tf.saved_model.load(saved_model_path) + detect_fn_sig = detect_fn.signatures['serving_default'] + image = tf.constant(self.get_dummy_input(input_type)) + side_input_1 = np.ones((1,), dtype=np.float32) + side_input_2 = np.ones((2, 2), dtype=np.uint8) + if use_default_serving: + detections = detect_fn_sig(input_tensor=image, + side_inp_1=tf.constant(side_input_1), + side_inp_2=tf.constant(side_input_2)) + else: + detections = detect_fn(image, + tf.constant(side_input_1), + tf.constant(side_input_2)) + + detection_fields = fields.DetectionResultFields + self.assertAllClose(detections[detection_fields.detection_boxes], + [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(detections[detection_fields.detection_scores], + [[400.7, 400.6], [400.9, 400.0]]) + self.assertAllClose(detections[detection_fields.detection_classes], + [[1, 2], [2, 1]]) + self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) + + def test_export_checkpoint_and_run_inference_with_image(self): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + + mock_model = FakeModel() + ckpt = tf.compat.v2.train.Checkpoint( + model=mock_model) + checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint') + manager = tf.compat.v2.train.CheckpointManager( + ckpt, checkpoint_dir, max_to_keep=7) + ckpt.restore(manager.latest_checkpoint).expect_partial() + + fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + detections = mock_model.postprocess(predictions, true_image_shapes) + + # 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3. + self.assertAllClose(detections['detection_scores'], + [[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]]) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_lib_v2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_lib_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..5a7a182c62ab4a24d271a64bae9e3b4fb72fcb79 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_lib_v2.py @@ -0,0 +1,275 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to export object detection inference graph.""" +import ast +import os + +import tensorflow.compat.v2 as tf +from object_detection.builders import model_builder +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.utils import config_util + + +INPUT_BUILDER_UTIL_MAP = { + 'model_build': model_builder.build, +} + + +def _decode_image(encoded_image_string_tensor): + image_tensor = tf.image.decode_image(encoded_image_string_tensor, + channels=3) + image_tensor.set_shape((None, None, 3)) + return image_tensor + + +def _decode_tf_example(tf_example_string_tensor): + tensor_dict = tf_example_decoder.TfExampleDecoder().decode( + tf_example_string_tensor) + image_tensor = tensor_dict[fields.InputDataFields.image] + return image_tensor + + +def _combine_side_inputs(side_input_shapes='', + side_input_types='', + side_input_names=''): + """Zips the side inputs together. + + Args: + side_input_shapes: forward-slash-separated list of comma-separated lists + describing input shapes. + side_input_types: comma-separated list of the types of the inputs. + side_input_names: comma-separated list of the names of the inputs. + + Returns: + a zipped list of side input tuples. + """ + side_input_shapes = [ + ast.literal_eval('[' + x + ']') for x in side_input_shapes.split('/') + ] + side_input_types = eval('[' + side_input_types + ']') # pylint: disable=eval-used + side_input_names = side_input_names.split(',') + return zip(side_input_shapes, side_input_types, side_input_names) + + +class DetectionInferenceModule(tf.Module): + """Detection Inference Module.""" + + def __init__(self, detection_model, + use_side_inputs=False, + zipped_side_inputs=None): + """Initializes a module for detection. + + Args: + detection_model: the detection model to use for inference. + use_side_inputs: whether to use side inputs. + zipped_side_inputs: the zipped side inputs. + """ + self._model = detection_model + + def _get_side_input_signature(self, zipped_side_inputs): + sig = [] + side_input_names = [] + for info in zipped_side_inputs: + sig.append(tf.TensorSpec(shape=info[0], + dtype=info[1], + name=info[2])) + side_input_names.append(info[2]) + return sig + + def _get_side_names_from_zip(self, zipped_side_inputs): + return [side[2] for side in zipped_side_inputs] + + def _run_inference_on_images(self, image, **kwargs): + """Cast image to float and run inference. + + Args: + image: uint8 Tensor of shape [1, None, None, 3]. + **kwargs: additional keyword arguments. + + Returns: + Tensor dictionary holding detections. + """ + label_id_offset = 1 + + image = tf.cast(image, tf.float32) + image, shapes = self._model.preprocess(image) + prediction_dict = self._model.predict(image, shapes, **kwargs) + detections = self._model.postprocess(prediction_dict, shapes) + classes_field = fields.DetectionResultFields.detection_classes + detections[classes_field] = ( + tf.cast(detections[classes_field], tf.float32) + label_id_offset) + + for key, val in detections.items(): + detections[key] = tf.cast(val, tf.float32) + + return detections + + +class DetectionFromImageModule(DetectionInferenceModule): + """Detection Inference Module for image inputs.""" + + def __init__(self, detection_model, + use_side_inputs=False, + zipped_side_inputs=None): + """Initializes a module for detection. + + Args: + detection_model: the detection model to use for inference. + use_side_inputs: whether to use side inputs. + zipped_side_inputs: the zipped side inputs. + """ + if zipped_side_inputs is None: + zipped_side_inputs = [] + sig = [tf.TensorSpec(shape=[1, None, None, 3], + dtype=tf.uint8, + name='input_tensor')] + if use_side_inputs: + sig.extend(self._get_side_input_signature(zipped_side_inputs)) + self._side_input_names = self._get_side_names_from_zip(zipped_side_inputs) + + def call_func(input_tensor, *side_inputs): + kwargs = dict(zip(self._side_input_names, side_inputs)) + return self._run_inference_on_images(input_tensor, **kwargs) + + self.__call__ = tf.function(call_func, input_signature=sig) + + # TODO(kaushikshiv): Check if omitting the signature also works. + super(DetectionFromImageModule, self).__init__(detection_model, + use_side_inputs, + zipped_side_inputs) + + +class DetectionFromFloatImageModule(DetectionInferenceModule): + """Detection Inference Module for float image inputs.""" + + @tf.function( + input_signature=[ + tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.float32)]) + def __call__(self, input_tensor): + return self._run_inference_on_images(input_tensor) + + +class DetectionFromEncodedImageModule(DetectionInferenceModule): + """Detection Inference Module for encoded image string inputs.""" + + @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.string)]) + def __call__(self, input_tensor): + with tf.device('cpu:0'): + image = tf.map_fn( + _decode_image, + elems=input_tensor, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False) + return self._run_inference_on_images(image) + + +class DetectionFromTFExampleModule(DetectionInferenceModule): + """Detection Inference Module for TF.Example inputs.""" + + @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.string)]) + def __call__(self, input_tensor): + with tf.device('cpu:0'): + image = tf.map_fn( + _decode_tf_example, + elems=input_tensor, + dtype=tf.uint8, + parallel_iterations=32, + back_prop=False) + return self._run_inference_on_images(image) + +DETECTION_MODULE_MAP = { + 'image_tensor': DetectionFromImageModule, + 'encoded_image_string_tensor': + DetectionFromEncodedImageModule, + 'tf_example': DetectionFromTFExampleModule, + 'float_image_tensor': DetectionFromFloatImageModule +} + + +def export_inference_graph(input_type, + pipeline_config, + trained_checkpoint_dir, + output_directory, + use_side_inputs=False, + side_input_shapes='', + side_input_types='', + side_input_names=''): + """Exports inference graph for the model specified in the pipeline config. + + This function creates `output_directory` if it does not already exist, + which will hold a copy of the pipeline config with filename `pipeline.config`, + and two subdirectories named `checkpoint` and `saved_model` + (containing the exported checkpoint and SavedModel respectively). + + Args: + input_type: Type of input for the graph. Can be one of ['image_tensor', + 'encoded_image_string_tensor', 'tf_example']. + pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. + trained_checkpoint_dir: Path to the trained checkpoint file. + output_directory: Path to write outputs. + use_side_inputs: boolean that determines whether side inputs should be + included in the input signature. + side_input_shapes: forward-slash-separated list of comma-separated lists + describing input shapes. + side_input_types: comma-separated list of the types of the inputs. + side_input_names: comma-separated list of the names of the inputs. + Raises: + ValueError: if input_type is invalid. + """ + output_checkpoint_directory = os.path.join(output_directory, 'checkpoint') + output_saved_model_directory = os.path.join(output_directory, 'saved_model') + + detection_model = INPUT_BUILDER_UTIL_MAP['model_build']( + pipeline_config.model, is_training=False) + + ckpt = tf.train.Checkpoint( + model=detection_model) + manager = tf.train.CheckpointManager( + ckpt, trained_checkpoint_dir, max_to_keep=1) + status = ckpt.restore(manager.latest_checkpoint).expect_partial() + + if input_type not in DETECTION_MODULE_MAP: + raise ValueError('Unrecognized `input_type`') + if use_side_inputs and input_type != 'image_tensor': + raise ValueError('Side inputs supported for image_tensor input type only.') + + zipped_side_inputs = [] + if use_side_inputs: + zipped_side_inputs = _combine_side_inputs(side_input_shapes, + side_input_types, + side_input_names) + + detection_module = DETECTION_MODULE_MAP[input_type](detection_model, + use_side_inputs, + list(zipped_side_inputs)) + # Getting the concrete function traces the graph and forces variables to + # be constructed --- only after this can we save the checkpoint and + # saved model. + concrete_function = detection_module.__call__.get_concrete_function() + status.assert_existing_objects_matched() + + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, output_checkpoint_directory, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) + + tf.saved_model.save(detection_module, + output_saved_model_directory, + signatures=concrete_function) + + config_util.save_pipeline_config(pipeline_config, output_directory) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_main_v2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_main_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..bb4a01aa8e9367927d54ebe5c43b7bef9c0fbd4c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_main_v2.py @@ -0,0 +1,159 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Tool to export an object detection model for inference. + +Prepares an object detection tensorflow graph for inference using model +configuration and a trained checkpoint. Outputs associated checkpoint files, +a SavedModel, and a copy of the model config. + +The inference graph contains one of three input nodes depending on the user +specified option. + * `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3] + * `float_image_tensor`: Accepts a float32 4-D tensor of shape + [1, None, None, 3] + * `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None] + containing encoded PNG or JPEG images. Image resolutions are expected to be + the same if more than 1 image is provided. + * `tf_example`: Accepts a 1-D string tensor of shape [None] containing + serialized TFExample protos. Image resolutions are expected to be the same + if more than 1 image is provided. + +and the following output nodes returned by the model.postprocess(..): + * `num_detections`: Outputs float32 tensors of the form [batch] + that specifies the number of valid boxes per image in the batch. + * `detection_boxes`: Outputs float32 tensors of the form + [batch, num_boxes, 4] containing detected boxes. + * `detection_scores`: Outputs float32 tensors of the form + [batch, num_boxes] containing class scores for the detections. + * `detection_classes`: Outputs float32 tensors of the form + [batch, num_boxes] containing classes for the detections. + + +Example Usage: +-------------- +python exporter_main_v2.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_dir path/to/checkpoint \ + --output_directory path/to/exported_model_directory + --use_side_inputs True/False \ + --side_input_shapes dim_0,dim_1,...dim_a/.../dim_0,dim_1,...,dim_z \ + --side_input_names name_a,name_b,...,name_c \ + --side_input_types type_1,type_2 + +The expected output would be in the directory +path/to/exported_model_directory (which is created if it does not exist) +holding two subdirectories (corresponding to checkpoint and SavedModel, +respectively) and a copy of the pipeline config. + +Config overrides (see the `config_override` flag) are text protobufs +(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override +certain fields in the provided pipeline_config_path. These are useful for +making small changes to the inference graph that differ from the training or +eval config. + +Example Usage (in which we change the second stage post-processing score +threshold to be 0.5): + +python exporter_main_v2.py \ + --input_type image_tensor \ + --pipeline_config_path path/to/ssd_inception_v2.config \ + --trained_checkpoint_dir path/to/checkpoint \ + --output_directory path/to/exported_model_directory \ + --config_override " \ + model{ \ + faster_rcnn { \ + second_stage_post_processing { \ + batch_non_max_suppression { \ + score_threshold: 0.5 \ + } \ + } \ + } \ + }" + +If side inputs are desired, the following arguments could be appended +(the example below is for Context R-CNN). + --use_side_inputs True \ + --side_input_shapes 1,2000,2057/1 \ + --side_input_names context_features,valid_context_size \ + --side_input_types tf.float32,tf.int32 +""" +from absl import app +from absl import flags + +import tensorflow.compat.v2 as tf +from google.protobuf import text_format +from object_detection import exporter_lib_v2 +from object_detection.protos import pipeline_pb2 + +tf.enable_v2_behavior() + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be ' + 'one of [`image_tensor`, `encoded_image_string_tensor`, ' + '`tf_example`, `float_image_tensor`]') +flags.DEFINE_string('pipeline_config_path', None, + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file.') +flags.DEFINE_string('trained_checkpoint_dir', None, + 'Path to trained checkpoint directory') +flags.DEFINE_string('output_directory', None, 'Path to write outputs.') +flags.DEFINE_string('config_override', '', + 'pipeline_pb2.TrainEvalPipelineConfig ' + 'text proto to override pipeline_config_path.') +flags.DEFINE_boolean('use_side_inputs', False, + 'If True, uses side inputs as well as image inputs.') +flags.DEFINE_string('side_input_shapes', '', + 'If use_side_inputs is True, this explicitly sets ' + 'the shape of the side input tensors to a fixed size. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of integers. A value of -1 can be used for unknown ' + 'dimensions. A `/` denotes a break, starting the shape of ' + 'the next side input tensor. This flag is required if ' + 'using side inputs.') +flags.DEFINE_string('side_input_types', '', + 'If use_side_inputs is True, this explicitly sets ' + 'the type of the side input tensors. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of types, each of `string`, `integer`, or `float`. ' + 'This flag is required if using side inputs.') +flags.DEFINE_string('side_input_names', '', + 'If use_side_inputs is True, this explicitly sets ' + 'the names of the side input tensors required by the model ' + 'assuming the names will be a comma-separated list of ' + 'strings. This flag is required if using side inputs.') + +flags.mark_flag_as_required('pipeline_config_path') +flags.mark_flag_as_required('trained_checkpoint_dir') +flags.mark_flag_as_required('output_directory') + + +def main(_): + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: + text_format.Merge(f.read(), pipeline_config) + text_format.Merge(FLAGS.config_override, pipeline_config) + exporter_lib_v2.export_inference_graph( + FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir, + FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes, + FLAGS.side_input_types, FLAGS.side_input_names) + + +if __name__ == '__main__': + app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b33bafd8db4f77627d6a64a1035f8c08bf6c09ee --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/exporter_tf1_test.py @@ -0,0 +1,1206 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.export_inference_graph.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf +from google.protobuf import text_format +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import array_ops +from tensorflow.python.tools import strip_unused_lib +from object_detection import exporter +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import graph_rewriter_pb2 +from object_detection.protos import pipeline_pb2 +from object_detection.utils import ops +from object_detection.utils import tf_version +from object_detection.utils import variables_helper + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + mock = unittest.mock # pylint: disable=g-import-not-at-top, g-importing-member + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +class FakeModel(model.DetectionModel): + + def __init__(self, add_detection_keypoints=False, add_detection_masks=False, + add_detection_features=False): + self._add_detection_keypoints = add_detection_keypoints + self._add_detection_masks = add_detection_masks + self._add_detection_features = add_detection_features + + def preprocess(self, inputs): + true_image_shapes = [] # Doesn't matter for the fake model. + return tf.identity(inputs), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} + + def postprocess(self, prediction_dict, true_image_shapes): + with tf.control_dependencies(list(prediction_dict.values())): + postprocessed_tensors = { + 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]], tf.float32), + 'detection_scores': tf.constant([[0.7, 0.6], + [0.9, 0.0]], tf.float32), + 'detection_multiclass_scores': tf.constant([[[0.3, 0.7], [0.4, 0.6]], + [[0.1, 0.9], [0.0, 0.0]]], + tf.float32), + 'detection_classes': tf.constant([[0, 1], + [1, 0]], tf.float32), + 'num_detections': tf.constant([2, 1], tf.float32), + 'raw_detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.0, 0.5]]], + tf.float32), + 'raw_detection_scores': tf.constant([[0.7, 0.6], + [0.9, 0.5]], tf.float32), + } + if self._add_detection_keypoints: + postprocessed_tensors['detection_keypoints'] = tf.constant( + np.arange(48).reshape([2, 2, 6, 2]), tf.float32) + if self._add_detection_masks: + postprocessed_tensors['detection_masks'] = tf.constant( + np.arange(64).reshape([2, 2, 4, 4]), tf.float32) + if self._add_detection_features: + # let fake detection features have shape [4, 4, 10] + postprocessed_tensors['detection_features'] = tf.constant( + np.ones((2, 2, 4, 4, 10)), tf.float32) + + return postprocessed_tensors + + def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def loss(self, prediction_dict, true_image_shapes): + pass + + def regularization_losses(self): + pass + + def updates(self): + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ExportInferenceGraphTest(tf.test.TestCase): + + def _save_checkpoint_from_mock_model(self, + checkpoint_path, + use_moving_averages, + enable_quantization=False): + g = tf.Graph() + with g.as_default(): + mock_model = FakeModel() + preprocessed_inputs, true_image_shapes = mock_model.preprocess( + tf.placeholder(tf.float32, shape=[None, None, None, 3])) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + if use_moving_averages: + tf.train.ExponentialMovingAverage(0.0).apply() + tf.train.get_or_create_global_step() + if enable_quantization: + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + saver = tf.train.Saver() + init = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init) + saver.save(sess, checkpoint_path) + + def _load_inference_graph(self, inference_graph_path, is_binary=True): + od_graph = tf.Graph() + with od_graph.as_default(): + od_graph_def = tf.GraphDef() + with tf.gfile.GFile(inference_graph_path, mode='rb') as fid: + if is_binary: + od_graph_def.ParseFromString(fid.read()) + else: + text_format.Parse(fid.read(), od_graph_def) + tf.import_graph_def(od_graph_def, name='') + return od_graph + + def _create_tf_example(self, image_array): + with self.test_session(): + encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval() + def _bytes_feature(value): + return tf.train.Feature( + bytes_list=tf.train.BytesList(value=[six.ensure_binary(value)])) + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/encoded': _bytes_feature(encoded_image), + 'image/format': _bytes_feature('jpg'), + 'image/source_id': _bytes_feature('image_id') + })).SerializeToString() + return example + + def test_export_graph_with_image_tensor_input(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + + def test_write_inference_graph(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + write_inference_graph=True) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'inference_graph.pbtxt'))) + + def test_export_graph_with_fixed_size_image_tensor_input(self): + input_shape = [1, 320, 320, 3] + + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + input_shape=input_shape) + saved_model_path = os.path.join(output_directory, 'saved_model') + self.assertTrue( + os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) + + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + meta_graph = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + image_tensor = od_graph.get_tensor_by_name(input_tensor_name) + self.assertSequenceEqual(image_tensor.get_shape().as_list(), + input_shape) + + def test_export_graph_with_tf_example_input(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + + def test_export_graph_with_fixed_size_tf_example_input(self): + input_shape = [1, 320, 320, 3] + + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + input_shape=input_shape) + saved_model_path = os.path.join(output_directory, 'saved_model') + self.assertTrue( + os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) + + def test_export_graph_with_encoded_image_string_input(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + + def test_export_graph_with_fixed_size_encoded_image_string_input(self): + input_shape = [1, 320, 320, 3] + + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, use_moving_averages=False) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + input_shape=input_shape) + saved_model_path = os.path.join(output_directory, 'saved_model') + self.assertTrue( + os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) + + def _get_variables_in_checkpoint(self, checkpoint_file): + return set([ + var_name + for var_name, _ in tf.train.list_variables(checkpoint_file)]) + + def test_replace_variable_values_with_moving_averages(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + graph = tf.Graph() + with graph.as_default(): + fake_model = FakeModel() + preprocessed_inputs, true_image_shapes = fake_model.preprocess( + tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3])) + predictions = fake_model.predict(preprocessed_inputs, true_image_shapes) + fake_model.postprocess(predictions, true_image_shapes) + exporter.replace_variable_values_with_moving_averages( + graph, trained_checkpoint_prefix, new_checkpoint_prefix) + + expected_variables = set(['conv2d/bias', 'conv2d/kernel']) + variables_in_old_ckpt = self._get_variables_in_checkpoint( + trained_checkpoint_prefix) + self.assertIn('conv2d/bias/ExponentialMovingAverage', + variables_in_old_ckpt) + self.assertIn('conv2d/kernel/ExponentialMovingAverage', + variables_in_old_ckpt) + variables_in_new_ckpt = self._get_variables_in_checkpoint( + new_checkpoint_prefix) + self.assertTrue(expected_variables.issubset(variables_in_new_ckpt)) + self.assertNotIn('conv2d/bias/ExponentialMovingAverage', + variables_in_new_ckpt) + self.assertNotIn('conv2d/kernel/ExponentialMovingAverage', + variables_in_new_ckpt) + + def test_export_graph_with_moving_averages(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = True + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + self.assertTrue(os.path.exists(os.path.join( + output_directory, 'saved_model', 'saved_model.pb'))) + expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step']) + actual_variables = set( + [var_name for var_name, _ in tf.train.list_variables(output_directory)]) + self.assertTrue(expected_variables.issubset(actual_variables)) + + def test_export_model_with_quantization_nodes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model( + trained_checkpoint_prefix, + use_moving_averages=False, + enable_quantization=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'inference_graph.pbtxt') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + text_format.Merge( + """graph_rewriter { + quantization { + delay: 50000 + activation_bits: 8 + weight_bits: 8 + } + }""", pipeline_config) + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory, + write_inference_graph=True) + self._load_inference_graph(inference_graph_path, is_binary=False) + has_quant_nodes = False + for v in variables_helper.get_global_variables_safely(): + if six.ensure_str(v.op.name).endswith('act_quant/min'): + has_quant_nodes = True + break + self.assertTrue(has_quant_nodes) + + def test_export_model_with_all_output_nodes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True, + add_detection_features=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph): + inference_graph.get_tensor_by_name('image_tensor:0') + inference_graph.get_tensor_by_name('detection_boxes:0') + inference_graph.get_tensor_by_name('detection_scores:0') + inference_graph.get_tensor_by_name('detection_multiclass_scores:0') + inference_graph.get_tensor_by_name('detection_classes:0') + inference_graph.get_tensor_by_name('detection_keypoints:0') + inference_graph.get_tensor_by_name('detection_masks:0') + inference_graph.get_tensor_by_name('num_detections:0') + inference_graph.get_tensor_by_name('detection_features:0') + + def test_export_model_with_detection_only_nodes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel(add_detection_masks=False) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph): + inference_graph.get_tensor_by_name('image_tensor:0') + inference_graph.get_tensor_by_name('detection_boxes:0') + inference_graph.get_tensor_by_name('detection_scores:0') + inference_graph.get_tensor_by_name('detection_multiclass_scores:0') + inference_graph.get_tensor_by_name('detection_classes:0') + inference_graph.get_tensor_by_name('num_detections:0') + with self.assertRaises(KeyError): + inference_graph.get_tensor_by_name('detection_keypoints:0') + inference_graph.get_tensor_by_name('detection_masks:0') + + def test_export_model_with_detection_only_nodes_and_detection_features(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel(add_detection_features=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph): + inference_graph.get_tensor_by_name('image_tensor:0') + inference_graph.get_tensor_by_name('detection_boxes:0') + inference_graph.get_tensor_by_name('detection_scores:0') + inference_graph.get_tensor_by_name('detection_multiclass_scores:0') + inference_graph.get_tensor_by_name('detection_classes:0') + inference_graph.get_tensor_by_name('num_detections:0') + inference_graph.get_tensor_by_name('detection_features:0') + with self.assertRaises(KeyError): + inference_graph.get_tensor_by_name('detection_keypoints:0') + inference_graph.get_tensor_by_name('detection_masks:0') + + def test_export_and_run_inference_with_image_tensor(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + with self.test_session(graph=inference_graph) as sess: + image_tensor = inference_graph.get_tensor_by_name('image_tensor:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def _create_encoded_image_string(self, image_array_np, encoding_format): + od_graph = tf.Graph() + with od_graph.as_default(): + if encoding_format == 'jpg': + encoded_string = tf.image.encode_jpeg(image_array_np) + elif encoding_format == 'png': + encoded_string = tf.image.encode_png(image_array_np) + else: + raise ValueError('Supports only the following formats: `jpg`, `png`') + with self.test_session(graph=od_graph): + return encoded_string.eval() + + def test_export_and_run_inference_with_encoded_image_string_tensor(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + jpg_image_str = self._create_encoded_image_string( + np.ones((4, 4, 3)).astype(np.uint8), 'jpg') + png_image_str = self._create_encoded_image_string( + np.ones((4, 4, 3)).astype(np.uint8), 'png') + with self.test_session(graph=inference_graph) as sess: + image_str_tensor = inference_graph.get_tensor_by_name( + 'encoded_image_string_tensor:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + multiclass_scores = inference_graph.get_tensor_by_name( + 'detection_multiclass_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + for image_str in [jpg_image_str, png_image_str]: + image_str_batch_np = np.hstack([image_str]* 2) + (boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np, + masks_np, num_detections_np) = sess.run( + [ + boxes, scores, multiclass_scores, classes, keypoints, masks, + num_detections + ], + feed_dict={image_str_tensor: image_str_batch_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]], + [[0.1, 0.9], [0.0, 0.0]]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_raise_runtime_error_on_images_with_different_sizes(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='encoded_image_string_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + large_image = self._create_encoded_image_string( + np.ones((4, 4, 3)).astype(np.uint8), 'jpg') + small_image = self._create_encoded_image_string( + np.ones((2, 2, 3)).astype(np.uint8), 'jpg') + + image_str_batch_np = np.hstack([large_image, small_image]) + with self.test_session(graph=inference_graph) as sess: + image_str_tensor = inference_graph.get_tensor_by_name( + 'encoded_image_string_tensor:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, + 'TensorArray.*shape'): + sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={image_str_tensor: image_str_batch_np}) + + def test_export_and_run_inference_with_tf_example(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + inference_graph = self._load_inference_graph(inference_graph_path) + tf_example_np = np.expand_dims(self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8)), axis=0) + with self.test_session(graph=inference_graph) as sess: + tf_example = inference_graph.get_tensor_by_name('tf_example:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_write_frozen_graph(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + inference_graph_path = os.path.join(output_directory, + 'frozen_inference_graph.pb') + tf.gfile.MakeDirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + outputs, _ = exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + output_node_names = ','.join(list(outputs.keys())) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=trained_checkpoint_prefix, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph=inference_graph_path, + clear_devices=True, + initializer_nodes='') + + inference_graph = self._load_inference_graph(inference_graph_path) + tf_example_np = np.expand_dims(self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8)), axis=0) + with self.test_session(graph=inference_graph) as sess: + tf_example = inference_graph.get_tensor_by_name('tf_example:0') + boxes = inference_graph.get_tensor_by_name('detection_boxes:0') + scores = inference_graph.get_tensor_by_name('detection_scores:0') + classes = inference_graph.get_tensor_by_name('detection_classes:0') + keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') + masks = inference_graph.get_tensor_by_name('detection_masks:0') + num_detections = inference_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_export_graph_saves_pipeline_file(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=True) + output_directory = os.path.join(tmp_dir, 'output') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter.export_inference_graph( + input_type='image_tensor', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + expected_pipeline_path = os.path.join( + output_directory, 'pipeline.config') + self.assertTrue(os.path.exists(expected_pipeline_path)) + + written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + with tf.gfile.GFile(expected_pipeline_path, 'r') as f: + proto_str = f.read() + text_format.Merge(proto_str, written_pipeline_config) + self.assertProtoEquals(pipeline_config, written_pipeline_config) + + def test_export_saved_model_and_run_inference(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + meta_graph = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) + + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + tf_example = od_graph.get_tensor_by_name(input_tensor_name) + + boxes = od_graph.get_tensor_by_name( + signature.outputs['detection_boxes'].name) + scores = od_graph.get_tensor_by_name( + signature.outputs['detection_scores'].name) + multiclass_scores = od_graph.get_tensor_by_name( + signature.outputs['detection_multiclass_scores'].name) + classes = od_graph.get_tensor_by_name( + signature.outputs['detection_classes'].name) + keypoints = od_graph.get_tensor_by_name( + signature.outputs['detection_keypoints'].name) + masks = od_graph.get_tensor_by_name( + signature.outputs['detection_masks'].name) + num_detections = od_graph.get_tensor_by_name( + signature.outputs['num_detections'].name) + + (boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np, + masks_np, num_detections_np) = sess.run( + [boxes, scores, multiclass_scores, classes, keypoints, masks, + num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]], + [[0.1, 0.9], [0.0, 0.0]]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_write_saved_model(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + saved_model_path = os.path.join(output_directory, 'saved_model') + tf.gfile.MakeDirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + outputs, placeholder_tensor = exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + output_node_names = ','.join(list(outputs.keys())) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + frozen_graph_def = exporter.freeze_graph_with_def_protos( + input_graph_def=tf.get_default_graph().as_graph_def(), + input_saver_def=input_saver_def, + input_checkpoint=trained_checkpoint_prefix, + output_node_names=output_node_names, + restore_op_name='save/restore_all', + filename_tensor_name='save/Const:0', + output_graph='', + clear_devices=True, + initializer_nodes='') + exporter.write_saved_model( + saved_model_path=saved_model_path, + frozen_graph_def=frozen_graph_def, + inputs=placeholder_tensor, + outputs=outputs) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + meta_graph = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) + + signature = meta_graph.signature_def['serving_default'] + input_tensor_name = signature.inputs['inputs'].name + tf_example = od_graph.get_tensor_by_name(input_tensor_name) + + boxes = od_graph.get_tensor_by_name( + signature.outputs['detection_boxes'].name) + scores = od_graph.get_tensor_by_name( + signature.outputs['detection_scores'].name) + classes = od_graph.get_tensor_by_name( + signature.outputs['detection_classes'].name) + keypoints = od_graph.get_tensor_by_name( + signature.outputs['detection_keypoints'].name) + masks = od_graph.get_tensor_by_name( + signature.outputs['detection_masks'].name) + num_detections = od_graph.get_tensor_by_name( + signature.outputs['num_detections'].name) + + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_export_checkpoint_and_run_inference(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + model_path = os.path.join(output_directory, 'model.ckpt') + meta_graph_path = model_path + '.meta' + + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + exporter.export_inference_graph( + input_type='tf_example', + pipeline_config=pipeline_config, + trained_checkpoint_prefix=trained_checkpoint_prefix, + output_directory=output_directory) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + new_saver = tf.train.import_meta_graph(meta_graph_path) + new_saver.restore(sess, model_path) + + tf_example = od_graph.get_tensor_by_name('tf_example:0') + boxes = od_graph.get_tensor_by_name('detection_boxes:0') + scores = od_graph.get_tensor_by_name('detection_scores:0') + classes = od_graph.get_tensor_by_name('detection_classes:0') + keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') + masks = od_graph.get_tensor_by_name('detection_masks:0') + num_detections = od_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, classes_np, keypoints_np, masks_np, + num_detections_np) = sess.run( + [boxes, scores, classes, keypoints, masks, num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_write_graph_and_checkpoint(self): + tmp_dir = self.get_temp_dir() + trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') + self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, + use_moving_averages=False) + output_directory = os.path.join(tmp_dir, 'output') + model_path = os.path.join(output_directory, 'model.ckpt') + meta_graph_path = model_path + '.meta' + tf.gfile.MakeDirs(output_directory) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel( + add_detection_keypoints=True, add_detection_masks=True) + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + pipeline_config.eval_config.use_moving_averages = False + detection_model = model_builder.build(pipeline_config.model, + is_training=False) + exporter.build_detection_graph( + input_type='tf_example', + detection_model=detection_model, + input_shape=None, + output_collection_name='inference_op', + graph_hook_fn=None) + saver = tf.train.Saver() + input_saver_def = saver.as_saver_def() + exporter.write_graph_and_checkpoint( + inference_graph_def=tf.get_default_graph().as_graph_def(), + model_path=model_path, + input_saver_def=input_saver_def, + trained_checkpoint_prefix=trained_checkpoint_prefix) + + tf_example_np = np.hstack([self._create_tf_example( + np.ones((4, 4, 3)).astype(np.uint8))] * 2) + with tf.Graph().as_default() as od_graph: + with self.test_session(graph=od_graph) as sess: + new_saver = tf.train.import_meta_graph(meta_graph_path) + new_saver.restore(sess, model_path) + + tf_example = od_graph.get_tensor_by_name('tf_example:0') + boxes = od_graph.get_tensor_by_name('detection_boxes:0') + scores = od_graph.get_tensor_by_name('detection_scores:0') + raw_boxes = od_graph.get_tensor_by_name('raw_detection_boxes:0') + raw_scores = od_graph.get_tensor_by_name('raw_detection_scores:0') + classes = od_graph.get_tensor_by_name('detection_classes:0') + keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') + masks = od_graph.get_tensor_by_name('detection_masks:0') + num_detections = od_graph.get_tensor_by_name('num_detections:0') + (boxes_np, scores_np, raw_boxes_np, raw_scores_np, classes_np, + keypoints_np, masks_np, num_detections_np) = sess.run( + [boxes, scores, raw_boxes, raw_scores, classes, keypoints, masks, + num_detections], + feed_dict={tf_example: tf_example_np}) + self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(scores_np, [[0.7, 0.6], + [0.9, 0.0]]) + self.assertAllClose(raw_boxes_np, [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.0, 0.5]]]) + self.assertAllClose(raw_scores_np, [[0.7, 0.6], + [0.9, 0.5]]) + self.assertAllClose(classes_np, [[1, 2], + [2, 1]]) + self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) + self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) + self.assertAllClose(num_detections_np, [2, 1]) + + def test_rewrite_nn_resize_op(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) + s = ops.nearest_neighbor_upsampling(x, 2) + t = s + y + exporter.rewrite_nn_resize_op() + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0], x) + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_quantized(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_conv = slim.conv2d(x, 8, 1) + y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) + s = ops.nearest_neighbor_upsampling(x_conv, 2) + t = s + y + + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + exporter.rewrite_nn_resize_op(is_quantized=True) + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_odd_size(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + s = ops.nearest_neighbor_upsampling(x, 2) + t = s[:, :19, :19, :] + exporter.rewrite_nn_resize_op() + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0], x) + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_quantized_odd_size(self): + g = tf.Graph() + with g.as_default(): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_conv = slim.conv2d(x, 8, 1) + s = ops.nearest_neighbor_upsampling(x_conv, 2) + t = s[:, :19, :19, :] + + graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() + graph_rewriter_config.quantization.delay = 500000 + graph_rewriter_fn = graph_rewriter_builder.build( + graph_rewriter_config, is_training=False) + graph_rewriter_fn() + + exporter.rewrite_nn_resize_op(is_quantized=True) + + resize_op_found = False + for op in g.get_operations(): + if op.type == 'ResizeNearestNeighbor': + resize_op_found = True + self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') + self.assertEqual(op.outputs[0].consumers()[0], t.op) + break + + self.assertTrue(resize_op_found) + + def test_rewrite_nn_resize_op_multiple_path(self): + g = tf.Graph() + with g.as_default(): + with tf.name_scope('nearest_upsampling'): + x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_stack = tf.stack([tf.stack([x] * 2, axis=3)] * 2, axis=2) + x_reshape = tf.reshape(x_stack, [8, 20, 20, 8]) + + with tf.name_scope('nearest_upsampling'): + x_2 = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) + x_stack_2 = tf.stack([tf.stack([x_2] * 2, axis=3)] * 2, axis=2) + x_reshape_2 = tf.reshape(x_stack_2, [8, 20, 20, 8]) + + t = x_reshape + x_reshape_2 + + exporter.rewrite_nn_resize_op() + + graph_def = g.as_graph_def() + graph_def = strip_unused_lib.strip_unused( + graph_def, + input_node_names=[ + 'nearest_upsampling/Placeholder', 'nearest_upsampling_1/Placeholder' + ], + output_node_names=['add'], + placeholder_type_enum=dtypes.float32.as_datatype_enum) + + counter_resize_op = 0 + t_input_ops = [op.name for op in t.op.inputs] + for node in graph_def.node: + # Make sure Stacks are replaced. + self.assertNotEqual(node.op, 'Pack') + if node.op == 'ResizeNearestNeighbor': + counter_resize_op += 1 + self.assertIn(six.ensure_str(node.name) + ':0', t_input_ops) + self.assertEqual(counter_resize_op, 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/challenge_evaluation.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/challenge_evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..15f032d4e8ae807470ba1ae47e961b84c0976dc7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/challenge_evaluation.md @@ -0,0 +1,215 @@ +# Open Images Challenge Evaluation + +The Object Detection API is currently supporting several evaluation metrics used +in the +[Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) +and +[Open Images Challenge 2019](https://storage.googleapis.com/openimages/web/challenge2019.html). +In addition, several data processing tools are available. Detailed instructions +on using the tools for each track are available below. + +**NOTE:** all data links are updated to the Open Images Challenge 2019. + +## Object Detection Track + +The +[Object Detection metric](https://storage.googleapis.com/openimages/web/evaluation.html#object_detection_eval) +protocol requires a pre-processing of the released data to ensure correct +evaluation. The released data contains only leaf-most bounding box annotations +and image-level labels. The evaluation metric implementation is available in the +class `OpenImagesChallengeEvaluator`. + +1. Download + [class hierarchy of Open Images Detection Challenge 2019](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-label500-hierarchy.json) + in JSON format. +2. Download + [ground-truth boundling boxes](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-detection-bbox.csv) + and + [image-level labels](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-detection-human-imagelabels.csv). +3. Run the following command to create hierarchical expansion of the bounding + boxes and image-level label annotations: + +``` +HIERARCHY_FILE=/path/to/challenge-2019-label500-hierarchy.json +BOUNDING_BOXES=/path/to/challenge-2019-validation-detection-bbox +IMAGE_LABELS=/path/to/challenge-2019-validation-detection-human-imagelabels + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${BOUNDING_BOXES}.csv \ + --output_annotations=${BOUNDING_BOXES}_expanded.csv \ + --annotation_type=1 + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${IMAGE_LABELS}.csv \ + --output_annotations=${IMAGE_LABELS}_expanded.csv \ + --annotation_type=2 +``` + +1. If you are not using TensorFlow, you can run evaluation directly using your + algorithm's output and generated ground-truth files. {value=4} + +After step 3 you produced the ground-truth files suitable for running 'OID +Challenge Object Detection Metric 2019' evaluation. To run the evaluation, use +the following command: + +``` +INPUT_PREDICTIONS=/path/to/detection_predictions.csv +OUTPUT_METRICS=/path/to/output/metrics/file + +python models/research/object_detection/metrics/oid_challenge_evaluation.py \ + --input_annotations_boxes=${BOUNDING_BOXES}_expanded.csv \ + --input_annotations_labels=${IMAGE_LABELS}_expanded.csv \ + --input_class_labelmap=object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt \ + --input_predictions=${INPUT_PREDICTIONS} \ + --output_metrics=${OUTPUT_METRICS} \ +``` + +Note that predictions file must contain the following keys: +ImageID,LabelName,Score,XMin,XMax,YMin,YMax + +For the Object Detection Track, the participants will be ranked on: + +- "OpenImagesDetectionChallenge_Precision/mAP@0.5IOU" + +To use evaluation within TensorFlow training, use metric name +`oid_challenge_detection_metrics` in the evaluation config. + +## Instance Segmentation Track + +The +[Instance Segmentation metric](https://storage.googleapis.com/openimages/web/evaluation.html#instance_segmentation_eval) +can be directly evaluated using the ground-truth data and model predictions. The +evaluation metric implementation is available in the class +`OpenImagesChallengeEvaluator`. + +1. Download + [class hierarchy of Open Images Instance Segmentation Challenge 2019](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-label300-segmentable-hierarchy.json) + in JSON format. +2. Download + [ground-truth bounding boxes](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-segmentation-bbox.csv) + and + [image-level labels](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-segmentation-labels.csv). +3. Download instance segmentation files for the validation set (see + [Open Images Challenge Downloads page](https://storage.googleapis.com/openimages/web/challenge2019_downloads.html)). + The download consists of a set of .zip archives containing binary .png + masks. + Those should be transformed into a single CSV file in the format: + + ImageID,LabelName,ImageWidth,ImageHeight,XMin,YMin,XMax,YMax,IsGroupOf,Mask + where Mask is MS COCO RLE encoding, compressed with zip, and re-coded with + base64 encoding of a binary mask stored in .png file. See an example + implementation of the encoding function + [here](https://gist.github.com/pculliton/209398a2a52867580c6103e25e55d93c). + +1. Run the following command to create hierarchical expansion of the instance + segmentation, bounding boxes and image-level label annotations: {value=4} + +``` +HIERARCHY_FILE=/path/to/challenge-2019-label300-hierarchy.json +BOUNDING_BOXES=/path/to/challenge-2019-validation-detection-bbox +IMAGE_LABELS=/path/to/challenge-2019-validation-detection-human-imagelabels + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${BOUNDING_BOXES}.csv \ + --output_annotations=${BOUNDING_BOXES}_expanded.csv \ + --annotation_type=1 + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${IMAGE_LABELS}.csv \ + --output_annotations=${IMAGE_LABELS}_expanded.csv \ + --annotation_type=2 + +python object_detection/dataset_tools/oid_hierarchical_labels_expansion.py \ + --json_hierarchy_file=${HIERARCHY_FILE} \ + --input_annotations=${INSTANCE_SEGMENTATIONS}.csv \ + --output_annotations=${INSTANCE_SEGMENTATIONS}_expanded.csv \ + --annotation_type=1 +``` + +1. If you are not using TensorFlow, you can run evaluation directly using your + algorithm's output and generated ground-truth files. {value=4} + +``` +INPUT_PREDICTIONS=/path/to/instance_segmentation_predictions.csv +OUTPUT_METRICS=/path/to/output/metrics/file + +python models/research/object_detection/metrics/oid_challenge_evaluation.py \ + --input_annotations_boxes=${BOUNDING_BOXES}_expanded.csv \ + --input_annotations_labels=${IMAGE_LABELS}_expanded.csv \ + --input_class_labelmap=object_detection/data/oid_object_detection_challenge_500_label_map.pbtxt \ + --input_predictions=${INPUT_PREDICTIONS} \ + --input_annotations_segm=${INSTANCE_SEGMENTATIONS}_expanded.csv + --output_metrics=${OUTPUT_METRICS} \ +``` + +Note that predictions file must contain the following keys: +ImageID,ImageWidth,ImageHeight,LabelName,Score,Mask + +Mask must be encoded the same way as groundtruth masks. + +For the Instance Segmentation Track, the participants will be ranked on: + +- "OpenImagesInstanceSegmentationChallenge_Precision/mAP@0.5IOU" + +## Visual Relationships Detection Track + +The +[Visual Relationships Detection metrics](https://storage.googleapis.com/openimages/web/evaluation.html#visual_relationships_eval) +can be directly evaluated using the ground-truth data and model predictions. The +evaluation metric implementation is available in the class +`VRDRelationDetectionEvaluator`,`VRDPhraseDetectionEvaluator`. + +1. Download the ground-truth + [visual relationships annotations](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-vrd.csv) + and + [image-level labels](https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-validation-vrd-labels.csv). +2. Run the follwing command to produce final metrics: + +``` +INPUT_ANNOTATIONS_BOXES=/path/to/challenge-2018-train-vrd.csv +INPUT_ANNOTATIONS_LABELS=/path/to/challenge-2018-train-vrd-labels.csv +INPUT_PREDICTIONS=/path/to/predictions.csv +INPUT_CLASS_LABELMAP=/path/to/oid_object_detection_challenge_500_label_map.pbtxt +INPUT_RELATIONSHIP_LABELMAP=/path/to/relationships_labelmap.pbtxt +OUTPUT_METRICS=/path/to/output/metrics/file + +echo "item { name: '/m/02gy9n' id: 602 display_name: 'Transparent' } +item { name: '/m/05z87' id: 603 display_name: 'Plastic' } +item { name: '/m/0dnr7' id: 604 display_name: '(made of)Textile' } +item { name: '/m/04lbp' id: 605 display_name: '(made of)Leather' } +item { name: '/m/083vt' id: 606 display_name: 'Wooden'} +">>${INPUT_CLASS_LABELMAP} + +echo "item { name: 'at' id: 1 display_name: 'at' } +item { name: 'on' id: 2 display_name: 'on (top of)' } +item { name: 'holds' id: 3 display_name: 'holds' } +item { name: 'plays' id: 4 display_name: 'plays' } +item { name: 'interacts_with' id: 5 display_name: 'interacts with' } +item { name: 'wears' id: 6 display_name: 'wears' } +item { name: 'is' id: 7 display_name: 'is' } +item { name: 'inside_of' id: 8 display_name: 'inside of' } +item { name: 'under' id: 9 display_name: 'under' } +item { name: 'hits' id: 10 display_name: 'hits' } +"> ${INPUT_RELATIONSHIP_LABELMAP} + +python object_detection/metrics/oid_vrd_challenge_evaluation.py \ + --input_annotations_boxes=${INPUT_ANNOTATIONS_BOXES} \ + --input_annotations_labels=${INPUT_ANNOTATIONS_LABELS} \ + --input_predictions=${INPUT_PREDICTIONS} \ + --input_class_labelmap=${INPUT_CLASS_LABELMAP} \ + --input_relationship_labelmap=${INPUT_RELATIONSHIP_LABELMAP} \ + --output_metrics=${OUTPUT_METRICS} +``` + +Note that predictions file must contain the following keys: +ImageID,LabelName1,LabelName2,RelationshipLabel,Score,XMin1,XMax1,YMin1,YMax1,XMin2,XMax2,YMin2,YMax2 + +The participants of the challenge will be evaluated by weighted average of the following three metrics: + +- "VRDMetric_Relationships_mAP@0.5IOU" +- "VRDMetric_Relationships_Recall@50@0.5IOU" +- "VRDMetric_Phrases_mAP@0.5IOU" diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/configuring_jobs.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/configuring_jobs.md new file mode 100644 index 0000000000000000000000000000000000000000..59925f293b5cf8ba05d1db33b9dc9577bd06121a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/configuring_jobs.md @@ -0,0 +1,147 @@ +# Configuring the Object Detection Training Pipeline + +## Overview + +The TensorFlow Object Detection API uses protobuf files to configure the +training and evaluation process. The schema for the training pipeline can be +found in object_detection/protos/pipeline.proto. At a high level, the config +file is split into 5 parts: + +1. The `model` configuration. This defines what type of model will be trained +(ie. meta-architecture, feature extractor). +2. The `train_config`, which decides what parameters should be used to train +model parameters (ie. SGD parameters, input preprocessing and feature extractor +initialization values). +3. The `eval_config`, which determines what set of metrics will be reported for +evaluation. +4. The `train_input_config`, which defines what dataset the model should be +trained on. +5. The `eval_input_config`, which defines what dataset the model will be +evaluated on. Typically this should be different than the training input +dataset. + +A skeleton configuration file is shown below: + +``` +model { +(... Add model config here...) +} + +train_config : { +(... Add train_config here...) +} + +train_input_reader: { +(... Add train_input configuration here...) +} + +eval_config: { +} + +eval_input_reader: { +(... Add eval_input configuration here...) +} +``` + +## Picking Model Parameters + +There are a large number of model parameters to configure. The best settings +will depend on your given application. Faster R-CNN models are better suited to +cases where high accuracy is desired and latency is of lower priority. +Conversely, if processing time is the most important factor, SSD models are +recommended. Read [our paper](https://arxiv.org/abs/1611.10012) for a more +detailed discussion on the speed vs accuracy tradeoff. + +To help new users get started, sample model configurations have been provided +in the object_detection/samples/configs folder. The contents of these +configuration files can be pasted into `model` field of the skeleton +configuration. Users should note that the `num_classes` field should be changed +to a value suited for the dataset the user is training on. + +## Defining Inputs + +The TensorFlow Object Detection API accepts inputs in the TFRecord file format. +Users must specify the locations of both the training and evaluation files. +Additionally, users should also specify a label map, which define the mapping +between a class id and class name. The label map should be identical between +training and evaluation datasets. + +An example input configuration looks as follows: + +``` +tf_record_input_reader { + input_path: "/usr/home/username/data/train.record" +} +label_map_path: "/usr/home/username/data/label_map.pbtxt" +``` + +Users should substitute the `input_path` and `label_map_path` arguments and +insert the input configuration into the `train_input_reader` and +`eval_input_reader` fields in the skeleton configuration. Note that the paths +can also point to Google Cloud Storage buckets (ie. +"gs://project_bucket/train.record") for use on Google Cloud. + +## Configuring the Trainer + +The `train_config` defines parts of the training process: + +1. Model parameter initialization. +2. Input preprocessing. +3. SGD parameters. + +A sample `train_config` is below: + +``` +batch_size: 1 +optimizer { + momentum_optimizer: { + learning_rate: { + manual_step_learning_rate { + initial_learning_rate: 0.0002 + schedule { + step: 0 + learning_rate: .0002 + } + schedule { + step: 900000 + learning_rate: .00002 + } + schedule { + step: 1200000 + learning_rate: .000002 + } + } + } + momentum_optimizer_value: 0.9 + } + use_moving_average: false +} +fine_tune_checkpoint: "/usr/home/username/tmp/model.ckpt-#####" +from_detection_checkpoint: true +load_all_detection_checkpoint_vars: true +gradient_clipping_by_norm: 10.0 +data_augmentation_options { + random_horizontal_flip { + } +} +``` + +### Input Preprocessing + +The `data_augmentation_options` in `train_config` can be used to specify +how training data can be modified. This field is optional. + +### SGD Parameters + +The remainings parameters in `train_config` are hyperparameters for gradient +descent. Please note that the optimal learning rates provided in these +configuration files may depend on the specifics of the training setup (e.g. +number of workers, gpu type). + +## Configuring the Evaluator + +The main components to set in `eval_config` are `num_examples` and +`metrics_set`. The parameter `num_examples` indicates the number of batches ( +currently of batch size 1) used for an evaluation cycle, and often is the total +size of the evaluation dataset. The parameter `metrics_set` indicates which +metrics to run during evaluation (i.e. `"coco_detection_metrics"`). diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/context_rcnn.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/context_rcnn.md new file mode 100644 index 0000000000000000000000000000000000000000..14a42d89afed63d91298a7a55b34b1a481d129a8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/context_rcnn.md @@ -0,0 +1,201 @@ +# Context R-CNN + +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +Context R-CNN is an object detection model that uses contextual features to +improve object detection. See https://arxiv.org/abs/1912.03538 for more details. + +## Table of Contents + +* [Preparing Context Data for Context R-CNN](#preparing-context-data-for-context-r-cnn) + + [Generating TfRecords from a set of images and a COCO-CameraTraps style + JSON](#generating-tfrecords-from-a-set-of-images-and-a-coco-cameratraps-style-json) + + [Generating weakly-supervised bounding box labels for image-labeled data](#generating-weakly-supervised-bounding-box-labels-for-image-labeled-data) + + [Generating and saving contextual features for each image](#generating-and-saving-contextual-features-for-each-image) + + [Building up contextual memory banks and storing them for each context + group](#building-up-contextual-memory-banks-and-storing-them-for-each-context-group) +- [Training a Context R-CNN Model](#training-a-context-r-cnn-model) +- [Exporting a Context R-CNN Model](#exporting-a-context-r-cnn-model) + +## Preparing Context Data for Context R-CNN + +In this section, we will walk through the process of generating TfRecords with +contextual features. We focus on building context from object-centric features +generated with a pre-trained Faster R-CNN model, but you can adapt the provided +code to use alternative feature extractors. + +Each of these data processing scripts uses Apache Beam, which can be installed +using + +``` +pip install apache-beam +``` + +and can be run locally, or on a cluster for efficient processing of large +amounts of data. Note that generate_detection_data.py and +generate_embedding_data.py both involve running inference, and may be very slow +to run locally. See the +[Apache Beam documentation](https://beam.apache.org/documentation/runners/dataflow/) +for more information, and Google Cloud Documentation for a tutorial on +[running Beam jobs on DataFlow](https://cloud.google.com/dataflow/docs/quickstarts/quickstart-python). + +### Generating TfRecords from a set of images and a COCO-CameraTraps style JSON + +If your data is already stored in TfRecords, you can skip this first step. + +We assume a COCO-CameraTraps json format, as described on +[LILA.science](https://github.com/microsoft/CameraTraps/blob/master/data_management/README.md). + +COCO-CameraTraps is a format that adds static-camera-specific fields, such as a +location ID and datetime, to the well-established COCO format. To generate +appropriate context later on, be sure you have specified each contextual group +with a different location ID, which in the static camera case would be the ID of +the camera, as well as the datetime each photo was taken. We assume that empty +images will be labeled 'empty' with class id 0. + +To generate TfRecords from your database and local image folder, run + +``` +python object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py \ + --alsologtostderr \ + --output_tfrecord_prefix="/path/to/output/tfrecord/location/prefix" \ + --image_directory="/path/to/image/folder/" \ + --input_annotations_file="path/to/annotations.json" +``` + +### Generating weakly-supervised bounding box labels for image-labeled data + +If all your data already has bounding box labels you can skip this step. + +Many camera trap datasets do not have bounding box labels, or only have bounding +box labels for some of the data. We have provided code to add bounding boxes +from a pretrained model (such as the +[Microsoft AI for Earth MegaDetector](https://github.com/microsoft/CameraTraps/blob/master/megadetector.md)) +and match the boxes to the image-level class label. + +To export your pretrained detection model, run + +``` +python object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/faster_rcnn_model.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory +``` + +To add bounding boxes to your dataset using the above model, run + +``` +python object_detection/dataset_tools/context_rcnn/generate_detection_data.py \ + --alsologtostderr \ + --input_tfrecord path/to/input_tfrecord@X \ + --output_tfrecord path/to/output_tfrecord@X \ + --model_dir path/to/exported_model_directory/saved_model +``` + +If an image already has bounding box labels, those labels are left unchanged. If +an image is labeled 'empty' (class ID 0), we will not generate boxes for that +image. + +### Generating and saving contextual features for each image + +We next extract and store features for each image from a pretrained model. This +model can be the same model as above, or be a class-specific detection model +trained on data from your classes of interest. + +To export your pretrained detection model, run + +``` +python object_detection/export_inference_graph.py \ + --alsologtostderr \ + --input_type tf_example \ + --pipeline_config_path path/to/pipeline.config \ + --trained_checkpoint_prefix path/to/model.ckpt \ + --output_directory path/to/exported_model_directory \ + --additional_output_tensor_names detection_features +``` + +Make sure that you have set `output_final_box_features: true` within +your config file before exporting. This is needed to export the features as an +output, but it does not need to be set during training. + +To generate and save contextual features for your data, run + +``` +python object_detection/dataset_tools/context_rcnn/generate_embedding_data.py \ + --alsologtostderr \ + --embedding_input_tfrecord path/to/input_tfrecords* \ + --embedding_output_tfrecord path/to/output_tfrecords \ + --embedding_model_dir path/to/exported_model_directory/saved_model +``` + +### Building up contextual memory banks and storing them for each context group + +To build the context features you just added for each image into memory banks, +run + +``` +python object_detection/dataset_tools/context_rcnn/add_context_to_examples.py \ + --input_tfrecord path/to/input_tfrecords* \ + --output_tfrecord path/to/output_tfrecords \ + --sequence_key image/location \ + --time_horizon month +``` + +where the input_tfrecords for add_context_to_examples.py are the +output_tfrecords from generate_embedding_data.py. + +For all options, see add_context_to_examples.py. By default, this code builds +TfSequenceExamples, which are more data efficient (this allows you to store the +context features once for each context group, as opposed to once per image). If +you would like to export TfExamples instead, set flag `--output_type +tf_example`. + +If you use TfSequenceExamples, you must be sure to set `input_type: +TF_SEQUENCE_EXAMPLE` within your Context R-CNN configs for both +train_input_reader and test_input_reader. See +`object_detection/test_data/context_rcnn_camera_trap.config` +for an example. + +## Training a Context R-CNN Model + +To train a Context R-CNN model, you must first set up your config file. See +`test_data/context_rcnn_camera_trap.config` for an example. The important +difference between this config and a Faster R-CNN config is the inclusion of a +`context_config` within the model, which defines the necessary Context R-CNN +parameters. + +``` +context_config { + max_num_context_features: 2000 + context_feature_length: 2057 + } +``` + +Once your config file has been updated with your local paths, you can follow +along with documentation for running [locally](running_locally.md), or +[on the cloud](running_on_cloud.md). + +## Exporting a Context R-CNN Model + +Since Context R-CNN takes context features as well as images as input, we have +to explicitly define the other inputs ("side_inputs") to the model when +exporting, as below. This example is shown with default context feature shapes. + +``` +python export_inference_graph.py \ + --input_type image_tensor \ + --input_shape 1,-1,-1,3 \ + --pipeline_config_path /path/to/context_rcnn_model/pipeline.config \ + --trained_checkpoint_prefix /path/to/context_rcnn_model/model.ckpt \ + --output_directory /path/to/output_directory \ + --use_side_inputs True \ + --side_input_shapes 1,2000,2057/1 \ + --side_input_names context_features,valid_context_size \ + --side_input_types float,int + +``` + +If you have questions about Context R-CNN, please contact +[Sara Beery](https://beerys.github.io/). diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/defining_your_own_model.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/defining_your_own_model.md new file mode 100644 index 0000000000000000000000000000000000000000..dabc0649f6e5c98cd1db3fdf700c843b3ff7d85b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/defining_your_own_model.md @@ -0,0 +1,137 @@ +# So you want to create a new model! + +In this section, we discuss some of the abstractions that we use +for defining detection models. If you would like to define a new model +architecture for detection and use it in the TensorFlow Detection API, +then this section should also serve as a high level guide to the files that you +will need to edit to get your new model working. + +## DetectionModels (`object_detection/core/model.py`) + +In order to be trained, evaluated, and exported for serving using our +provided binaries, all models under the TensorFlow Object Detection API must +implement the `DetectionModel` interface (see the full definition in `object_detection/core/model.py`). In particular, +each of these models are responsible for implementing 5 functions: + +* `preprocess`: Run any preprocessing (e.g., scaling/shifting/reshaping) of + input values that is necessary prior to running the detector on an input + image. +* `predict`: Produce “raw” prediction tensors that can be passed to loss or + postprocess functions. +* `postprocess`: Convert predicted output tensors to final detections. +* `loss`: Compute scalar loss tensors with respect to provided groundtruth. +* `restore`: Load a checkpoint into the TensorFlow graph. + +Given a `DetectionModel` at training time, we pass each image batch through +the following sequence of functions to compute a loss which can be optimized via +SGD: + +``` +inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor) +``` + +And at eval time, we pass each image batch through the following sequence of +functions to produce a set of detections: + +``` +inputs (images tensor) -> preprocess -> predict -> postprocess -> + outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor) +``` + +Some conventions to be aware of: + +* `DetectionModel`s should make no assumptions about the input size or aspect + ratio --- they are responsible for doing any resize/reshaping necessary + (see docstring for the `preprocess` function). +* Output classes are always integers in the range `[0, num_classes)`. + Any mapping of these integers to semantic labels is to be handled outside + of this class. We never explicitly emit a “background class” --- thus 0 is + the first non-background class and any logic of predicting and removing + implicit background classes must be handled internally by the implementation. +* Detected boxes are to be interpreted as being in + `[y_min, x_min, y_max, x_max]` format and normalized relative to the + image window. +* We do not specifically assume any kind of probabilistic interpretation of the + scores --- the only important thing is their relative ordering. Thus + implementations of the postprocess function are free to output logits, + probabilities, calibrated probabilities, or anything else. + +## Defining a new Faster R-CNN or SSD Feature Extractor + +In most cases, you probably will not implement a `DetectionModel` from scratch +--- instead you might create a new feature extractor to be used by one of the +SSD or Faster R-CNN meta-architectures. (We think of meta-architectures as +classes that define entire families of models using the `DetectionModel` +abstraction). + +Note: For the following discussion to make sense, we recommend first becoming +familiar with the [Faster R-CNN](https://arxiv.org/abs/1506.01497) paper. + +Let’s now imagine that you have invented a brand new network architecture +(say, “InceptionV100”) for classification and want to see how InceptionV100 +would behave as a feature extractor for detection (say, with Faster R-CNN). +A similar procedure would hold for SSD models, but we’ll discuss Faster R-CNN. + +To use InceptionV100, we will have to define a new +`FasterRCNNFeatureExtractor` and pass it to our `FasterRCNNMetaArch` +constructor as input. See +`object_detection/meta_architectures/faster_rcnn_meta_arch.py` for definitions +of `FasterRCNNFeatureExtractor` and `FasterRCNNMetaArch`, respectively. +A `FasterRCNNFeatureExtractor` must define a few +functions: + +* `preprocess`: Run any preprocessing of input values that is necessary prior + to running the detector on an input image. +* `_extract_proposal_features`: Extract first stage Region Proposal Network + (RPN) features. +* `_extract_box_classifier_features`: Extract second stage Box Classifier + features. +* `restore_from_classification_checkpoint_fn`: Load a checkpoint into the + TensorFlow graph. + +See the `object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py` +definition as one example. Some remarks: + +* We typically initialize the weights of this feature extractor + using those from the + [Slim Resnet-101 classification checkpoint](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models), + and we know + that images were preprocessed when training this checkpoint + by subtracting a channel mean from each input + image. Thus, we implement the preprocess function to replicate the same + channel mean subtraction behavior. +* The “full” resnet classification network defined in slim is cut into two + parts --- all but the last “resnet block” is put into the + `_extract_proposal_features` function and the final block is separately + defined in the `_extract_box_classifier_features function`. In general, + some experimentation may be required to decide on an optimal layer at + which to “cut” your feature extractor into these two pieces for Faster R-CNN. + +## Register your model for configuration + +Assuming that your new feature extractor does not require nonstandard +configuration, you will want to ideally be able to simply change the +“feature_extractor.type” fields in your configuration protos to point to a +new feature extractor. In order for our API to know how to understand this +new type though, you will first have to register your new feature +extractor with the model builder (`object_detection/builders/model_builder.py`), +whose job is to create models from config protos.. + +Registration is simple --- just add a pointer to the new Feature Extractor +class that you have defined in one of the SSD or Faster R-CNN Feature +Extractor Class maps at the top of the +`object_detection/builders/model_builder.py` file. +We recommend adding a test in `object_detection/builders/model_builder_test.py` +to make sure that parsing your proto will work as expected. + +## Taking your new model for a spin + +After registration you are ready to go with your model! Some final tips: + +* To save time debugging, try running your configuration file locally first + (both training and evaluation). +* Do a sweep of learning rates to figure out which learning rate is best + for your model. +* A small but often important detail: you may find it necessary to disable + batchnorm training (that is, load the batch norm parameters from the + classification checkpoint, but do not update them during gradient descent). diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/evaluation_protocols.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/evaluation_protocols.md new file mode 100644 index 0000000000000000000000000000000000000000..d5a070f6bc0a7bf9721a0e11fe19690f926d3e8f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/evaluation_protocols.md @@ -0,0 +1,163 @@ +# Supported object detection evaluation protocols + +The TensorFlow Object Detection API currently supports three evaluation protocols, +that can be configured in `EvalConfig` by setting `metrics_set` to the +corresponding value. + +## PASCAL VOC 2010 detection metric + +`EvalConfig.metrics_set='pascal_voc_detection_metrics'` + +The commonly used mAP metric for evaluating the quality of object detectors, +computed according to the protocol of the PASCAL VOC Challenge 2010-2012. The +protocol is available +[here](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/devkit_doc_08-May-2010.pdf). + +## Weighted PASCAL VOC detection metric + +`EvalConfig.metrics_set='weighted_pascal_voc_detection_metrics'` + +The weighted PASCAL metric computes the mean average precision as the average +precision when treating all classes as a single class. In comparison, +PASCAL metrics computes the mean average precision as the mean of the +per-class average precisions. + +For example, the test set consists of two classes, "cat" and "dog", and there +are ten times more boxes of "cat" than those of "dog". According to PASCAL VOC +2010 metric, performance on each of the two classes would contribute equally +towards the final mAP value, while for the Weighted PASCAL VOC metric the final +mAP value will be influenced by frequency of each class. + +## PASCAL VOC 2010 instance segmentation metric + +`EvalConfig.metrics_set='pascal_voc_instance_segmentation_metrics'` + +Similar to Pascal VOC 2010 detection metric, but computes the intersection over +union based on the object masks instead of object boxes. + +## Weighted PASCAL VOC instance segmentation metric + +`EvalConfig.metrics_set='weighted_pascal_voc_instance_segmentation_metrics'` + +Similar to the weighted pascal voc 2010 detection metric, but computes the +intersection over union based on the object masks instead of object boxes. + + +## COCO detection metrics + +`EvalConfig.metrics_set='coco_detection_metrics'` + +The COCO metrics are the official detection metrics used to score the +[COCO competition](http://cocodataset.org/) and are similar to Pascal VOC +metrics but have a slightly different implementation and report additional +statistics such as mAP at IOU thresholds of .5:.95, and precision/recall +statistics for small, medium, and large objects. +See the +[pycocotools](https://github.com/cocodataset/cocoapi/tree/master/PythonAPI) +repository for more details. + +## COCO mask metrics + +`EvalConfig.metrics_set='coco_mask_metrics'` + +Similar to the COCO detection metrics, but computes the +intersection over union based on the object masks instead of object boxes. + +## Open Images V2 detection metric + +`EvalConfig.metrics_set='oid_V2_detection_metrics'` + +This metric is defined originally for evaluating detector performance on [Open +Images V2 dataset](https://github.com/openimages/dataset) and is fairly similar +to the PASCAL VOC 2010 metric mentioned above. It computes interpolated average +precision (AP) for each class and averages it among all classes (mAP). + +The difference to the PASCAL VOC 2010 metric is the following: Open Images +annotations contain `group-of` ground-truth boxes (see [Open Images data +description](https://github.com/openimages/dataset#annotations-human-bboxcsv)), +that are treated differently for the purpose of deciding whether detections are +"true positives", "ignored", "false positives". Here we define these three +cases: + +A detection is a "true positive" if there is a non-group-of ground-truth box, +such that: + +* The detection box and the ground-truth box are of the same class, and + intersection-over-union (IoU) between the detection box and the ground-truth + box is greater than the IoU threshold (default value 0.5). \ + Illustration of handling non-group-of boxes: \ + ![alt + groupof_case_eval](img/nongroupof_case_eval.png "illustration of handling non-group-of boxes: yellow box - ground truth bounding box; green box - true positive; red box - false positives.") + + * yellow box - ground-truth box; + * green box - true positive; + * red boxes - false positives. + +* This is the highest scoring detection for this ground truth box that + satisfies the criteria above. + +A detection is "ignored" if it is not a true positive, and there is a `group-of` +ground-truth box such that: + +* The detection box and the ground-truth box are of the same class, and the + area of intersection between the detection box and the ground-truth box + divided by the area of the detection is greater than 0.5. This is intended + to measure whether the detection box is approximately inside the group-of + ground-truth box. \ + Illustration of handling `group-of` boxes: \ + ![alt + groupof_case_eval](img/groupof_case_eval.png "illustration of handling group-of boxes: yellow box - ground truth bounding box; grey boxes - two detections of cars, that are ignored; red box - false positive.") + + * yellow box - ground-truth box; + * grey boxes - two detections on cars, that are ignored; + * red box - false positive. + +A detection is a "false positive" if it is neither a "true positive" nor +"ignored". + +Precision and recall are defined as: + +* Precision = number-of-true-positives/(number-of-true-positives + number-of-false-positives) +* Recall = number-of-true-positives/number-of-non-group-of-boxes + +Note that detections ignored as firing on a `group-of` ground-truth box do not +contribute to the number of true positives. + +The labels in Open Images are organized in a +[hierarchy](https://storage.googleapis.com/openimages/2017_07/bbox_labels_vis/bbox_labels_vis.html). +Ground-truth bounding-boxes are annotated with the most specific class available +in the hierarchy. For example, "car" has two children "limousine" and "van". Any +other kind of car is annotated as "car" (for example, a sedan). Given this +convention, the evaluation software treats all classes independently, ignoring +the hierarchy. To achieve high performance values, object detectors should +output bounding-boxes labelled in the same manner. + +The old metric name is DEPRECATED. +`EvalConfig.metrics_set='open_images_V2_detection_metrics'` + +## OID Challenge Object Detection Metric + +`EvalConfig.metrics_set='oid_challenge_detection_metrics'` + +The metric for the OID Challenge Object Detection Metric 2018/2019 Object +Detection track. The description is provided on the +[Open Images Challenge website](https://storage.googleapis.com/openimages/web/evaluation.html#object_detection_eval). + +The old metric name is DEPRECATED. +`EvalConfig.metrics_set='oid_challenge_object_detection_metrics'` + +## OID Challenge Visual Relationship Detection Metric + +The metric for the OID Challenge Visual Relationship Detection Metric 2018,2019 +Visual Relationship Detection track. The description is provided on the +[Open Images Challenge website](https://storage.googleapis.com/openimages/web/evaluation.html#visual_relationships_eval). +Note: this is currently a stand-alone metric, that can be used only through the +`metrics/oid_vrd_challenge_evaluation.py` util. + +## OID Challenge Instance Segmentation Metric + +`EvalConfig.metrics_set='oid_challenge_segmentation_metrics'` + +The metric for the OID Challenge Instance Segmentation Metric 2019, Instance +Segmentation track. The description is provided on the +[Open Images Challenge website](https://storage.googleapis.com/openimages/web/evaluation.html#instance_segmentation_eval). diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/exporting_models.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/exporting_models.md new file mode 100644 index 0000000000000000000000000000000000000000..701acf3c4305faf3d0fa830bfa755b56c2fe8042 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/exporting_models.md @@ -0,0 +1,38 @@ +# Exporting a trained model for inference + +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +After your model has been trained, you should export it to a TensorFlow +graph proto. A checkpoint will typically consist of three files: + +* model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001 +* model.ckpt-${CHECKPOINT_NUMBER}.index +* model.ckpt-${CHECKPOINT_NUMBER}.meta + +After you've identified a candidate checkpoint to export, run the following +command from tensorflow/models/research: + +``` bash +# From tensorflow/models/research/ +INPUT_TYPE=image_tensor +PIPELINE_CONFIG_PATH={path to pipeline config file} +TRAINED_CKPT_PREFIX={path to model.ckpt} +EXPORT_DIR={path to folder that will be used for export} +python object_detection/export_inference_graph.py \ + --input_type=${INPUT_TYPE} \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --trained_checkpoint_prefix=${TRAINED_CKPT_PREFIX} \ + --output_directory=${EXPORT_DIR} +``` + +NOTE: We are configuring our exported model to ingest 4-D image tensors. We can +also configure the exported model to take encoded images or serialized +`tf.Example`s. + +After export, you should see the directory ${EXPORT_DIR} containing the following: + +* saved_model/, a directory containing the saved model format of the exported model +* frozen_inference_graph.pb, the frozen graph format of the exported model +* model.ckpt.*, the model checkpoints used for exporting +* checkpoint, a file specifying to restore included checkpoint files +* pipeline.config, pipeline config file for the exported model diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/faq.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/faq.md new file mode 100644 index 0000000000000000000000000000000000000000..f2a6e30ccf78de8bc4b8b9898128e24ddfd8d233 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/faq.md @@ -0,0 +1,27 @@ +# Frequently Asked Questions + +## Q: How can I ensure that all the groundtruth boxes are used during train and eval? +A: For the object detecion framework to be TPU-complient, we must pad our input +tensors to static shapes. This means that we must pad to a fixed number of +bounding boxes, configured by `InputReader.max_number_of_boxes`. It is +important to set this value to a number larger than the maximum number of +groundtruth boxes in the dataset. If an image is encountered with more +bounding boxes, the excess boxes will be clipped. + +## Q: AttributeError: 'module' object has no attribute 'BackupHandler' +A: This BackupHandler (tf_slim.tfexample_decoder.BackupHandler) was +introduced in tensorflow 1.5.0 so runing with earlier versions may cause this +issue. It now has been replaced by +object_detection.data_decoders.tf_example_decoder.BackupHandler. Whoever sees +this issue should be able to resolve it by syncing your fork to HEAD. +Same for LookupTensor. + +## Q: AttributeError: 'module' object has no attribute 'LookupTensor' +A: Similar to BackupHandler, syncing your fork to HEAD should make it work. + +## Q: Why can't I get the inference time as reported in model zoo? +A: The inference time reported in model zoo is mean time of testing hundreds of +images with an internal machine. As mentioned in +[TensorFlow detection model zoo](tf1_detection_zoo.md), this speed depends +highly on one's specific hardware configuration and should be treated more as +relative timing. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/dogs_detections_output.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/dogs_detections_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e88a7010fa90f5c4a74f6caee78f5c975f77e40 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/dogs_detections_output.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/example_cat.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/example_cat.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74c7ef4b0849ce1b1f3b8061f172cb98ce06ef5e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/example_cat.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/groupof_case_eval.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/groupof_case_eval.png new file mode 100644 index 0000000000000000000000000000000000000000..5abc9b6984fb5816ca4f2e6f40e38ec6e6ea9cfc Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/groupof_case_eval.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/kites_detections_output.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/kites_detections_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c0f3364deda6614b5bf6fdddad7e7a578f0f6eb Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/kites_detections_output.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/kites_with_segment_overlay.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/kites_with_segment_overlay.png new file mode 100644 index 0000000000000000000000000000000000000000..a52e57de193e53edbb1a49643e8c77609abdc79d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/kites_with_segment_overlay.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/nongroupof_case_eval.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/nongroupof_case_eval.png new file mode 100644 index 0000000000000000000000000000000000000000..cbb76f493adfa725cd0b2ab323f89fdfc57a57ec Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/nongroupof_case_eval.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e9412ad545c0a1e1e7dcfa35a168c2a61cf2012 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46b1fb282a428fe1169a7ff1d30e963bc085e733 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oxford_pet.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oxford_pet.png new file mode 100644 index 0000000000000000000000000000000000000000..ddac415f5ef079f8d6fde8dd4c9838735fd96325 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/oxford_pet.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tensorboard.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tensorboard.png new file mode 100644 index 0000000000000000000000000000000000000000..fbcdbeb38cf5594681c0e206a08b6d06bd1e86a9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tensorboard.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tensorboard2.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tensorboard2.png new file mode 100644 index 0000000000000000000000000000000000000000..97ad22daa11870ecebbbe7cadfb2d8bb30d738f6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tensorboard2.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tf-od-api-logo.png b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tf-od-api-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..9fa9cc9dba228c1effabfa5c1474052ed8bad3fd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/img/tf-od-api-logo.png differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/instance_segmentation.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/instance_segmentation.md new file mode 100644 index 0000000000000000000000000000000000000000..f9b4856c90f47d60f4e8ea2a15b1813bab2847a5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/instance_segmentation.md @@ -0,0 +1,105 @@ +## Run an Instance Segmentation Model + +For some applications it isn't adequate enough to localize an object with a +simple bounding box. For instance, you might want to segment an object region +once it is detected. This class of problems is called **instance segmentation**. + +

+ +

+ +### Materializing data for instance segmentation {#materializing-instance-seg} + +Instance segmentation is an extension of object detection, where a binary mask +(i.e. object vs. background) is associated with every bounding box. This allows +for more fine-grained information about the extent of the object within the box. +To train an instance segmentation model, a groundtruth mask must be supplied for +every groundtruth bounding box. In additional to the proto fields listed in the +section titled [Using your own dataset](using_your_own_dataset.md), one must +also supply `image/object/mask`, which can either be a repeated list of +single-channel encoded PNG strings, or a single dense 3D binary tensor where +masks corresponding to each object are stacked along the first dimension. Each +is described in more detail below. + +#### PNG Instance Segmentation Masks + +Instance segmentation masks can be supplied as serialized PNG images. + +```shell +image/object/mask = ["\x89PNG\r\n\x1A\n\x00\x00\x00\rIHDR\...", ...] +``` + +These masks are whole-image masks, one for each object instance. The spatial +dimensions of each mask must agree with the image. Each mask has only a single +channel, and the pixel values are either 0 (background) or 1 (object mask). +**PNG masks are the preferred parameterization since they offer considerable +space savings compared to dense numerical masks.** + +#### Dense Numerical Instance Segmentation Masks + +Masks can also be specified via a dense numerical tensor. + +```shell +image/object/mask = [0.0, 0.0, 1.0, 1.0, 0.0, ...] +``` + +For an image with dimensions `H` x `W` and `num_boxes` groundtruth boxes, the +mask corresponds to a [`num_boxes`, `H`, `W`] float32 tensor, flattened into a +single vector of shape `num_boxes` * `H` * `W`. In TensorFlow, examples are read +in row-major format, so the elements are organized as: + +```shell +... mask 0 row 0 ... mask 0 row 1 ... // ... mask 0 row H-1 ... mask 1 row 0 ... +``` + +where each row has W contiguous binary values. + +To see an example tf-records with mask labels, see the examples under the +[Preparing Inputs](preparing_inputs.md) section. + +### Pre-existing config files + +We provide four instance segmentation config files that you can use to train +your own models: + +1. mask_rcnn_inception_resnet_v2_atrous_coco +1. mask_rcnn_resnet101_atrous_coco +1. mask_rcnn_resnet50_atrous_coco +1. mask_rcnn_inception_v2_coco + +For more details see the [detection model zoo](tf1_detection_zoo.md). + +### Updating a Faster R-CNN config file + +Currently, the only supported instance segmentation model is [Mask +R-CNN](https://arxiv.org/abs/1703.06870), which requires Faster R-CNN as the +backbone object detector. + +Once you have a baseline Faster R-CNN pipeline configuration, you can make the +following modifications in order to convert it into a Mask R-CNN model. + +1. Within `train_input_reader` and `eval_input_reader`, set + `load_instance_masks` to `True`. If using PNG masks, set `mask_type` to + `PNG_MASKS`, otherwise you can leave it as the default 'NUMERICAL_MASKS'. +1. Within the `faster_rcnn` config, use a `MaskRCNNBoxPredictor` as the + `second_stage_box_predictor`. +1. Within the `MaskRCNNBoxPredictor` message, set `predict_instance_masks` to + `True`. You must also define `conv_hyperparams`. +1. Within the `faster_rcnn` message, set `number_of_stages` to `3`. +1. Add instance segmentation metrics to the set of metrics: + `'coco_mask_metrics'`. +1. Update the `input_path`s to point at your data. + +Please refer to the section on [Running the pets dataset](running_pets.md) for +additional details. + +> Note: The mask prediction branch consists of a sequence of convolution layers. +> You can set the number of convolution layers and their depth as follows: +> +> 1. Within the `MaskRCNNBoxPredictor` message, set the +> `mask_prediction_conv_depth` to your value of interest. The default value +> is 256. If you set it to `0` (recommended), the depth is computed +> automatically based on the number of classes in the dataset. +> 1. Within the `MaskRCNNBoxPredictor` message, set the +> `mask_prediction_num_conv_layers` to your value of interest. The default +> value is 2. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/oid_inference_and_evaluation.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/oid_inference_and_evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..d54ad23940b5292bcd4db78bfaf3fd609d82cfe2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/oid_inference_and_evaluation.md @@ -0,0 +1,257 @@ +# Inference and evaluation on the Open Images dataset + +This page presents a tutorial for running object detector inference and +evaluation measure computations on the [Open Images +dataset](https://github.com/openimages/dataset), using tools from the +[TensorFlow Object Detection +API](https://github.com/tensorflow/models/tree/master/research/object_detection). +It shows how to download the images and annotations for the validation and test +sets of Open Images; how to package the downloaded data in a format understood +by the Object Detection API; where to find a trained object detector model for +Open Images; how to run inference; and how to compute evaluation measures on the +inferred detections. + +Inferred detections will look like the following: + +![](img/oid_bus_72e19c28aac34ed8.jpg) +![](img/oid_monkey_3b4168c89cecbc5b.jpg) + +On the validation set of Open Images, this tutorial requires 27GB of free disk +space and the inference step takes approximately 9 hours on a single NVIDIA +Tesla P100 GPU. On the test set -- 75GB and 27 hours respectively. All other +steps require less than two hours in total on both sets. + +## Installing TensorFlow, the Object Detection API, and Google Cloud SDK + +Please run through the [installation instructions](installation.md) to install +TensorFlow and all its dependencies. Ensure the Protobuf libraries are compiled +and the library directories are added to `PYTHONPATH`. You will also need to +`pip` install `pandas` and `contextlib2`. + +Some of the data used in this tutorial lives in Google Cloud buckets. To access +it, you will have to [install the Google Cloud +SDK](https://cloud.google.com/sdk/downloads) on your workstation or laptop. + +## Preparing the Open Images validation and test sets + +In order to run inference and subsequent evaluation measure computations, we +require a dataset of images and ground truth boxes, packaged as TFRecords of +TFExamples. To create such a dataset for Open Images, you will need to first +download ground truth boxes from the [Open Images +website](https://github.com/openimages/dataset): + +```bash +# From tensorflow/models/research +mkdir oid +cd oid +wget https://storage.googleapis.com/openimages/2017_07/annotations_human_bbox_2017_07.tar.gz +tar -xvf annotations_human_bbox_2017_07.tar.gz +``` + +Next, download the images. In this tutorial, we will use lower resolution images +provided by [CVDF](http://www.cvdfoundation.org). Please follow the instructions +on [CVDF's Open Images repository +page](https://github.com/cvdfoundation/open-images-dataset) in order to gain +access to the cloud bucket with the images. Then run: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # Set SPLIT to "test" to download the images in the test set +mkdir raw_images_${SPLIT} +gsutil -m rsync -r gs://open-images-dataset/$SPLIT raw_images_${SPLIT} +``` + +Another option for downloading the images is to follow the URLs contained in the +[image URLs and metadata CSV +files](https://storage.googleapis.com/openimages/2017_07/images_2017_07.tar.gz) +on the Open Images website. + +At this point, your `tensorflow/models/research/oid` directory should appear as +follows: + +```lang-none +|-- 2017_07 +| |-- test +| | `-- annotations-human-bbox.csv +| |-- train +| | `-- annotations-human-bbox.csv +| `-- validation +| `-- annotations-human-bbox.csv +|-- raw_images_validation (if you downloaded the validation split) +| `-- ... (41,620 files matching regex "[0-9a-f]{16}.jpg") +|-- raw_images_test (if you downloaded the test split) +| `-- ... (125,436 files matching regex "[0-9a-f]{16}.jpg") +`-- annotations_human_bbox_2017_07.tar.gz +``` + +Next, package the data into TFRecords of TFExamples by running: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # Set SPLIT to "test" to create TFRecords for the test split +mkdir ${SPLIT}_tfrecords + +PYTHONPATH=$PYTHONPATH:$(readlink -f ..) \ +python -m object_detection/dataset_tools/create_oid_tf_record \ + --input_box_annotations_csv 2017_07/$SPLIT/annotations-human-bbox.csv \ + --input_images_directory raw_images_${SPLIT} \ + --input_label_map ../object_detection/data/oid_bbox_trainable_label_map.pbtxt \ + --output_tf_record_path_prefix ${SPLIT}_tfrecords/$SPLIT.tfrecord \ + --num_shards=100 +``` + +To add image-level labels, use the `--input_image_label_annotations_csv` flag. + +This results in 100 TFRecord files (shards), written to +`oid/${SPLIT}_tfrecords`, with filenames matching +`${SPLIT}.tfrecord-000[0-9][0-9]-of-00100`. Each shard contains approximately +the same number of images and is defacto a representative random sample of the +input data. [This enables](#accelerating_inference) a straightforward work +division scheme for distributing inference and also approximate measure +computations on subsets of the validation and test sets. + +## Inferring detections + +Inference requires a trained object detection model. In this tutorial we will +use a model from the [detections model zoo](tf1_detection_zoo.md), which can +be downloaded and unpacked by running the commands below. More information about +the model, such as its architecture and how it was trained, is available in the +[model zoo page](tf1_detection_zoo.md). + +```bash +# From tensorflow/models/research/oid +wget http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_14_10_2017.tar.gz +tar -zxvf faster_rcnn_inception_resnet_v2_atrous_oid_14_10_2017.tar.gz +``` + +At this point, data is packed into TFRecords and we have an object detector +model. We can run inference using: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test +TF_RECORD_FILES=$(ls -1 ${SPLIT}_tfrecords/* | tr '\n' ',') + +PYTHONPATH=$PYTHONPATH:$(readlink -f ..) \ +python -m object_detection/inference/infer_detections \ + --input_tfrecord_paths=$TF_RECORD_FILES \ + --output_tfrecord_path=${SPLIT}_detections.tfrecord-00000-of-00001 \ + --inference_graph=faster_rcnn_inception_resnet_v2_atrous_oid/frozen_inference_graph.pb \ + --discard_image_pixels +``` + +Inference preserves all fields of the input TFExamples, and adds new fields to +store the inferred detections. This allows [computing evaluation +measures](#computing-evaluation-measures) on the output TFRecord alone, as +groundtruth boxes are preserved as well. Since measure computations don't +require access to the images, `infer_detections` can optionally discard them +with the `--discard_image_pixels` flag. Discarding the images drastically +reduces the size of the output TFRecord. + +### Accelerating inference + +Running inference on the whole validation or test set can take a long time to +complete due to the large number of images present in these sets (41,620 and +125,436 respectively). For quick but approximate evaluation, inference and the +subsequent measure computations can be run on a small number of shards. To run +for example on 2% of all the data, it is enough to set `TF_RECORD_FILES` as +shown below before running `infer_detections`: + +```bash +TF_RECORD_FILES=$(ls ${SPLIT}_tfrecords/${SPLIT}.tfrecord-0000[0-1]-of-00100 | tr '\n' ',') +``` + +Please note that computing evaluation measures on a small subset of the data +introduces variance and bias, since some classes of objects won't be seen during +evaluation. In the example above, this leads to 13.2% higher mAP on the first +two shards of the validation set compared to the mAP for the full set ([see mAP +results](#expected-maps)). + +Another way to accelerate inference is to run it in parallel on multiple +TensorFlow devices on possibly multiple machines. The script below uses +[tmux](https://github.com/tmux/tmux/wiki) to run a separate `infer_detections` +process for each GPU on different partition of the input data. + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test +NUM_GPUS=4 +NUM_SHARDS=100 + +tmux new-session -d -s "inference" +function tmux_start { tmux new-window -d -n "inference:GPU$1" "${*:2}; exec bash"; } +for gpu_index in $(seq 0 $(($NUM_GPUS-1))); do + start_shard=$(( $gpu_index * $NUM_SHARDS / $NUM_GPUS )) + end_shard=$(( ($gpu_index + 1) * $NUM_SHARDS / $NUM_GPUS - 1)) + TF_RECORD_FILES=$(seq -s, -f "${SPLIT}_tfrecords/${SPLIT}.tfrecord-%05.0f-of-$(printf '%05d' $NUM_SHARDS)" $start_shard $end_shard) + tmux_start ${gpu_index} \ + PYTHONPATH=$PYTHONPATH:$(readlink -f ..) CUDA_VISIBLE_DEVICES=$gpu_index \ + python -m object_detection/inference/infer_detections \ + --input_tfrecord_paths=$TF_RECORD_FILES \ + --output_tfrecord_path=${SPLIT}_detections.tfrecord-$(printf "%05d" $gpu_index)-of-$(printf "%05d" $NUM_GPUS) \ + --inference_graph=faster_rcnn_inception_resnet_v2_atrous_oid/frozen_inference_graph.pb \ + --discard_image_pixels +done +``` + +After all `infer_detections` processes finish, `tensorflow/models/research/oid` +will contain one output TFRecord from each process, with name matching +`validation_detections.tfrecord-0000[0-3]-of-00004`. + +## Computing evaluation measures + +To compute evaluation measures on the inferred detections you first need to +create the appropriate configuration files: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test +NUM_SHARDS=1 # Set to NUM_GPUS if using the parallel evaluation script above + +mkdir -p ${SPLIT}_eval_metrics + +echo " +label_map_path: '../object_detection/data/oid_bbox_trainable_label_map.pbtxt' +tf_record_input_reader: { input_path: '${SPLIT}_detections.tfrecord@${NUM_SHARDS}' } +" > ${SPLIT}_eval_metrics/${SPLIT}_input_config.pbtxt + +echo " +metrics_set: 'oid_V2_detection_metrics' +" > ${SPLIT}_eval_metrics/${SPLIT}_eval_config.pbtxt +``` + +And then run: + +```bash +# From tensorflow/models/research/oid +SPLIT=validation # or test + +PYTHONPATH=$PYTHONPATH:$(readlink -f ..) \ +python -m object_detection/metrics/offline_eval_map_corloc \ + --eval_dir=${SPLIT}_eval_metrics \ + --eval_config_path=${SPLIT}_eval_metrics/${SPLIT}_eval_config.pbtxt \ + --input_config_path=${SPLIT}_eval_metrics/${SPLIT}_input_config.pbtxt +``` + +The first configuration file contains an `object_detection.protos.InputReader` +message that describes the location of the necessary input files. The second +file contains an `object_detection.protos.EvalConfig` message that describes the +evaluation metric. For more information about these protos see the corresponding +source files. + +### Expected mAPs + +The result of running `offline_eval_map_corloc` is a CSV file located at +`${SPLIT}_eval_metrics/metrics.csv`. With the above configuration, the file will +contain average precision at IoU≥0.5 for each of the classes present in the +dataset. It will also contain the mAP@IoU≥0.5. Both the per-class average +precisions and the mAP are computed according to the [Open Images evaluation +protocol](evaluation_protocols.md). The expected mAPs for the validation and +test sets of Open Images in this case are: + +Set | Fraction of data | Images | mAP@IoU≥0.5 +---------: | :--------------: | :-----: | ----------- +validation | everything | 41,620 | 39.2% +validation | first 2 shards | 884 | 52.4% +test | everything | 125,436 | 37.7% +test | first 2 shards | 2,476 | 50.8% diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/preparing_inputs.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/preparing_inputs.md new file mode 100644 index 0000000000000000000000000000000000000000..7e8df08502b9d5598f9c6bcd8b5f6b1f0c4edf3e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/preparing_inputs.md @@ -0,0 +1,59 @@ +# Preparing Inputs + +TensorFlow Object Detection API reads data using the TFRecord file format. Two +sample scripts (`create_pascal_tf_record.py` and `create_pet_tf_record.py`) are +provided to convert from the PASCAL VOC dataset and Oxford-IIIT Pet dataset to +TFRecords. + +## Generating the PASCAL VOC TFRecord files. + +The raw 2012 PASCAL VOC data set is located +[here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar). +To download, extract and convert it to TFRecords, run the following commands +below: + +```bash +# From tensorflow/models/research/ +wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar +tar -xvf VOCtrainval_11-May-2012.tar +python object_detection/dataset_tools/create_pascal_tf_record.py \ + --label_map_path=object_detection/data/pascal_label_map.pbtxt \ + --data_dir=VOCdevkit --year=VOC2012 --set=train \ + --output_path=pascal_train.record +python object_detection/dataset_tools/create_pascal_tf_record.py \ + --label_map_path=object_detection/data/pascal_label_map.pbtxt \ + --data_dir=VOCdevkit --year=VOC2012 --set=val \ + --output_path=pascal_val.record +``` + +You should end up with two TFRecord files named `pascal_train.record` and +`pascal_val.record` in the `tensorflow/models/research/` directory. + +The label map for the PASCAL VOC data set can be found at +`object_detection/data/pascal_label_map.pbtxt`. + +## Generating the Oxford-IIIT Pet TFRecord files. + +The Oxford-IIIT Pet data set is located +[here](http://www.robots.ox.ac.uk/~vgg/data/pets/). To download, extract and +convert it to TFRecords, run the following commands below: + +```bash +# From tensorflow/models/research/ +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz +tar -xvf annotations.tar.gz +tar -xvf images.tar.gz +python object_detection/dataset_tools/create_pet_tf_record.py \ + --label_map_path=object_detection/data/pet_label_map.pbtxt \ + --data_dir=`pwd` \ + --output_dir=`pwd` +``` + +You should end up with two 10-sharded TFRecord files named +`pet_faces_train.record-?????-of-00010` and +`pet_faces_val.record-?????-of-00010` in the `tensorflow/models/research/` +directory. + +The label map for the Pet dataset can be found at +`object_detection/data/pet_label_map.pbtxt`. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/release_notes.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/release_notes.md new file mode 100644 index 0000000000000000000000000000000000000000..21512397c9991e74bbe7a009e5cae40e11de63c6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/release_notes.md @@ -0,0 +1,358 @@ +# Release Notes + +### September 3rd, 2020 + +TF2 OD API models can now be converted to TensorFlow Lite! Only SSD models +currently supported. See documentation. + +**Thanks to contributors**: Sachin Joglekar + +### July 10th, 2020 + +We are happy to announce that the TF OD API officially supports TF2! Our release +includes: + +* New binaries for train/eval/export that are designed to run in eager mode. +* A suite of TF2 compatible (Keras-based) models; this includes migrations of + our most popular TF1.x models (e.g., SSD with MobileNet, RetinaNet, Faster + R-CNN, Mask R-CNN), as well as a few new architectures for which we will + only maintain TF2 implementations: + + 1. CenterNet - a simple and effective anchor-free architecture based on the + recent [Objects as Points](https://arxiv.org/abs/1904.07850) paper by + Zhou et al + 2. [EfficientDet](https://arxiv.org/abs/1911.09070) - a recent family of + SOTA models discovered with the help of Neural Architecture Search. + +* COCO pre-trained weights for all of the models provided as TF2 style + object-based checkpoints. + +* Access to + [Distribution Strategies](https://www.tensorflow.org/guide/distributed_training) + for distributed training --- our model are designed to be trainable using + sync multi-GPU and TPU platforms. + +* Colabs demo’ing eager mode training and inference. + +See our release blogpost +[here](https://blog.tensorflow.org/2020/07/tensorflow-2-meets-object-detection-api.html). +If you are an existing user of the TF OD API using TF 1.x, don’t worry, we’ve +got you covered. + +**Thanks to contributors**: Akhil Chinnakotla, Allen Lavoie, Anirudh Vegesana, +Anjali Sridhar, Austin Myers, Dan Kondratyuk, David Ross, Derek Chow, Jaeyoun +Kim, Jing Li, Jonathan Huang, Jordi Pont-Tuset, Karmel Allison, Kathy Ruan, +Kaushik Shivakumar, Lu He, Mingxing Tan, Pengchong Jin, Ronny Votel, Sara Beery, +Sergi Caelles Prat, Shan Yang, Sudheendra Vijayanarasimhan, Tina Tian, Tomer +Kaftan, Vighnesh Birodkar, Vishnu Banna, Vivek Rathod, Yanhui Liang, Yiming Shi, +Yixin Shi, Yu-hui Chen, Zhichao Lu. + +### June 26th, 2020 + +We have released SSDLite with MobileDet GPU backbone, which achieves 17% mAP +higher than the MobileNetV2 SSDLite (27.5 mAP vs 23.5 mAP) on a NVIDIA Jetson +Xavier at comparable latency (3.2ms vs 3.3ms). + +Along with the model definition, we are also releasing model checkpoints trained +on the COCO dataset. + +Thanks to contributors: Yongzhe Wang, Bo Chen, Hanxiao Liu, Le An +(NVIDIA), Yu-Te Cheng (NVIDIA), Oliver Knieps (NVIDIA), and Josh Park (NVIDIA). + +### June 17th, 2020 + +We have released [Context R-CNN](https://arxiv.org/abs/1912.03538), a model that +uses attention to incorporate contextual information images (e.g. from +temporally nearby frames taken by a static camera) in order to improve accuracy. +Importantly, these contextual images need not be labeled. + +* When applied to a challenging wildlife detection dataset + ([Snapshot Serengeti](http://lila.science/datasets/snapshot-serengeti)), + Context R-CNN with context from up to a month of images outperforms a + single-frame baseline by 17.9% mAP, and outperforms S3D (a 3d convolution + based baseline) by 11.2% mAP. +* Context R-CNN leverages temporal context from the unlabeled frames of a + novel camera deployment to improve performance at that camera, boosting + model generalizeability. + +Read about Context R-CNN on the Google AI blog +[here](https://ai.googleblog.com/2020/06/leveraging-temporal-context-for-object.html). + +We have provided code for generating data with associated context +[here](context_rcnn.md), and a sample config for a Context R-CNN model +[here](../samples/configs/context_rcnn_resnet101_snapshot_serengeti_sync.config). + +Snapshot Serengeti-trained Faster R-CNN and Context R-CNN models can be found in +the +[model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md#snapshot-serengeti-camera-trap-trained-models). + +A colab demonstrating Context R-CNN is provided +[here](../colab_tutorials/context_rcnn_tutorial.ipynb). + +Thanks to contributors: Sara Beery, Jonathan Huang, Guanhang Wu, Vivek +Rathod, Ronny Votel, Zhichao Lu, David Ross, Pietro Perona, Tanya Birch, and the +Wildlife Insights AI Team. + +### May 19th, 2020 + +We have released [MobileDets](https://arxiv.org/abs/2004.14525), a set of +high-performance models for mobile CPUs, DSPs and EdgeTPUs. + +* MobileDets outperform MobileNetV3+SSDLite by 1.7 mAP at comparable mobile + CPU inference latencies. MobileDets also outperform MobileNetV2+SSDLite by + 1.9 mAP on mobile CPUs, 3.7 mAP on EdgeTPUs and 3.4 mAP on DSPs while + running equally fast. MobileDets also offer up to 2x speedup over MnasFPN on + EdgeTPUs and DSPs. + +For each of the three hardware platforms we have released model definition, +model checkpoints trained on the COCO14 dataset and converted TFLite models in +fp32 and/or uint8. + +Thanks to contributors: Yunyang Xiong, Hanxiao Liu, Suyog Gupta, Berkin +Akin, Gabriel Bender, Pieter-Jan Kindermans, Mingxing Tan, Vikas Singh, Bo Chen, +Quoc Le, Zhichao Lu. + +### May 7th, 2020 + +We have released a mobile model with the +[MnasFPN head](https://arxiv.org/abs/1912.01106). + +* MnasFPN with MobileNet-V2 backbone is the most accurate (26.6 mAP at 183ms + on Pixel 1) mobile detection model we have released to date. With + depth-multiplier, MnasFPN with MobileNet-V2 backbone is 1.8 mAP higher than + MobileNet-V3-Large with SSDLite (23.8 mAP vs 22.0 mAP) at similar latency + (120ms) on Pixel 1. + +We have released model definition, model checkpoints trained on the COCO14 +dataset and a converted TFLite model. + +Thanks to contributors: Bo Chen, Golnaz Ghiasi, Hanxiao Liu, Tsung-Yi +Lin, Dmitry Kalenichenko, Hartwig Adam, Quoc Le, Zhichao Lu, Jonathan Huang, Hao +Xu. + +### Nov 13th, 2019 + +We have released MobileNetEdgeTPU SSDLite model. + +* SSDLite with MobileNetEdgeTPU backbone, which achieves 10% mAP higher than + MobileNetV2 SSDLite (24.3 mAP vs 22 mAP) on a Google Pixel4 at comparable + latency (6.6ms vs 6.8ms). + +Along with the model definition, we are also releasing model checkpoints trained +on the COCO dataset. + +Thanks to contributors: Yunyang Xiong, Bo Chen, Suyog Gupta, Hanxiao Liu, +Gabriel Bender, Mingxing Tan, Berkin Akin, Zhichao Lu, Quoc Le + +### Oct 15th, 2019 + +We have released two MobileNet V3 SSDLite models (presented in +[Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)). + +* SSDLite with MobileNet-V3-Large backbone, which is 27% faster than Mobilenet + V2 SSDLite (119ms vs 162ms) on a Google Pixel phone CPU at the same mAP. +* SSDLite with MobileNet-V3-Small backbone, which is 37% faster than MnasNet + SSDLite reduced with depth-multiplier (43ms vs 68ms) at the same mAP. + +Along with the model definition, we are also releasing model checkpoints trained +on the COCO dataset. + +Thanks to contributors: Bo Chen, Zhichao Lu, Vivek Rathod, Jonathan Huang + +### July 1st, 2019 + +We have released an updated set of utils and an updated +[tutorial](challenge_evaluation.md) for all three tracks of the +[Open Images Challenge 2019](https://storage.googleapis.com/openimages/web/challenge2019.html)! + +The Instance Segmentation metric for +[Open Images V5](https://storage.googleapis.com/openimages/web/index.html) and +[Challenge 2019](https://storage.googleapis.com/openimages/web/challenge2019.html) +is part of this release. Check out +[the metric description](https://storage.googleapis.com/openimages/web/evaluation.html#instance_segmentation_eval) +on the Open Images website. + +Thanks to contributors: Alina Kuznetsova, Rodrigo Benenson + +### Feb 11, 2019 + +We have released detection models trained on the Open Images Dataset V4 in our +detection model zoo, including + +* Faster R-CNN detector with Inception Resnet V2 feature extractor +* SSD detector with MobileNet V2 feature extractor +* SSD detector with ResNet 101 FPN feature extractor (aka RetinaNet-101) + +Thanks to contributors: Alina Kuznetsova, Yinxiao Li + +### Sep 17, 2018 + +We have released Faster R-CNN detectors with ResNet-50 / ResNet-101 feature +extractors trained on the +[iNaturalist Species Detection Dataset](https://github.com/visipedia/inat_comp/blob/master/2017/README.md#bounding-boxes). +The models are trained on the training split of the iNaturalist data for 4M +iterations, they achieve 55% and 58% mean AP@.5 over 2854 classes respectively. +For more details please refer to this [paper](https://arxiv.org/abs/1707.06642). + +Thanks to contributors: Chen Sun + +### July 13, 2018 + +There are many new updates in this release, extending the functionality and +capability of the API: + +* Moving from slim-based training to + [Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)-based + training. +* Support for [RetinaNet](https://arxiv.org/abs/1708.02002), and a + [MobileNet](https://ai.googleblog.com/2017/06/mobilenets-open-source-models-for.html) + adaptation of RetinaNet. +* A novel SSD-based architecture called the + [Pooling Pyramid Network](https://arxiv.org/abs/1807.03284) (PPN). +* Releasing several [TPU](https://cloud.google.com/tpu/)-compatible models. + These can be found in the `samples/configs/` directory with a comment in the + pipeline configuration files indicating TPU compatibility. +* Support for quantized training. +* Updated documentation for new binaries, Cloud training, and + [TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/). + +See also our +[expanded announcement blogpost](https://ai.googleblog.com/2018/07/accelerated-training-and-inference-with.html) +and accompanying tutorial at the +[TensorFlow blog](https://medium.com/tensorflow/training-and-serving-a-realtime-mobile-object-detector-in-30-minutes-with-cloud-tpus-b78971cf1193). + +Thanks to contributors: Sara Robinson, Aakanksha Chowdhery, Derek Chow, +Pengchong Jin, Jonathan Huang, Vivek Rathod, Zhichao Lu, Ronny Votel + +### June 25, 2018 + +Additional evaluation tools for the +[Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) +are out. Check out our short tutorial on data preparation and running evaluation +[here](challenge_evaluation.md)! + +Thanks to contributors: Alina Kuznetsova + +### June 5, 2018 + +We have released the implementation of evaluation metrics for both tracks of the +[Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) +as a part of the Object Detection API - see the +[evaluation protocols](evaluation_protocols.md) for more details. Additionally, +we have released a tool for hierarchical labels expansion for the Open Images +Challenge: check out +[oid_hierarchical_labels_expansion.py](../dataset_tools/oid_hierarchical_labels_expansion.py). + +Thanks to contributors: Alina Kuznetsova, Vittorio Ferrari, Jasper +Uijlings + +### April 30, 2018 + +We have released a Faster R-CNN detector with ResNet-101 feature extractor +trained on [AVA](https://research.google.com/ava/) v2.1. Compared with other +commonly used object detectors, it changes the action classification loss +function to per-class Sigmoid loss to handle boxes with multiple labels. The +model is trained on the training split of AVA v2.1 for 1.5M iterations, it +achieves mean AP of 11.25% over 60 classes on the validation split of AVA v2.1. +For more details please refer to this [paper](https://arxiv.org/abs/1705.08421). + +Thanks to contributors: Chen Sun, David Ross + +### April 2, 2018 + +Supercharge your mobile phones with the next generation mobile object detector! +We are adding support for MobileNet V2 with SSDLite presented in +[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381). +This model is 35% faster than Mobilenet V1 SSD on a Google Pixel phone CPU +(200ms vs. 270ms) at the same accuracy. Along with the model definition, we are +also releasing a model checkpoint trained on the COCO dataset. + +Thanks to contributors: Menglong Zhu, Mark Sandler, Zhichao Lu, Vivek +Rathod, Jonathan Huang + +### February 9, 2018 + +We now support instance segmentation!! In this API update we support a number of +instance segmentation models similar to those discussed in the +[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). For further details refer +to [our slides](http://presentations.cocodataset.org/Places17-GMRI.pdf) from the +2017 Coco + Places Workshop. Refer to the section on +[Running an Instance Segmentation Model](instance_segmentation.md) for +instructions on how to configure a model that predicts masks in addition to +object bounding boxes. + +Thanks to contributors: Alireza Fathi, Zhichao Lu, Vivek Rathod, Ronny +Votel, Jonathan Huang + +### November 17, 2017 + +As a part of the Open Images V3 release we have released: + +* An implementation of the Open Images evaluation metric and the + [protocol](evaluation_protocols.md#open-images). +* Additional tools to separate inference of detection and evaluation (see + [this tutorial](oid_inference_and_evaluation.md)). +* A new detection model trained on the Open Images V2 data release (see + [Open Images model](tf1_detection_zoo.md#open-images-models)). + +See more information on the +[Open Images website](https://github.com/openimages/dataset)! + +Thanks to contributors: Stefan Popov, Alina Kuznetsova + +### November 6, 2017 + +We have re-released faster versions of our (pre-trained) models in the +model zoo. In addition to what was available +before, we are also adding Faster R-CNN models trained on COCO with Inception V2 +and Resnet-50 feature extractors, as well as a Faster R-CNN with Resnet-101 +model trained on the KITTI dataset. + +Thanks to contributors: Jonathan Huang, Vivek Rathod, Derek Chow, Tal +Remez, Chen Sun. + +### October 31, 2017 + +We have released a new state-of-the-art model for object detection using the +Faster-RCNN with the +[NASNet-A image featurization](https://arxiv.org/abs/1707.07012). This model +achieves mAP of 43.1% on the test-dev validation dataset for COCO, improving on +the best available model in the zoo by 6% in terms of absolute mAP. + +Thanks to contributors: Barret Zoph, Vijay Vasudevan, Jonathon Shlens, +Quoc Le + +### August 11, 2017 + +We have released an update to the +[Android Detect demo](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android) +which will now run models trained using the TensorFlow Object Detection API on +an Android device. By default, it currently runs a frozen SSD w/Mobilenet +detector trained on COCO, but we encourage you to try out other detection +models! + +Thanks to contributors: Jonathan Huang, Andrew Harp + +### June 15, 2017 + +In addition to our base TensorFlow detection model definitions, this release +includes: + +* A selection of trainable detection models, including: + * Single Shot Multibox Detector (SSD) with MobileNet, + * SSD with Inception V2, + * Region-Based Fully Convolutional Networks (R-FCN) with Resnet 101, + * Faster RCNN with Resnet 101, + * Faster RCNN with Inception Resnet v2 +* Frozen weights (trained on the COCO dataset) for each of the above models to + be used for out-of-the-box inference purposes. +* A [Jupyter notebook](../colab_tutorials/object_detection_tutorial.ipynb) for + performing out-of-the-box inference with one of our released models +* Convenient training and evaluation + [instructions](tf1_training_and_evaluation.md) for local runs and Google + Cloud. + +Thanks to contributors: Jonathan Huang, Vivek Rathod, Derek Chow, Chen +Sun, Menglong Zhu, Matthew Tang, Anoop Korattikara, Alireza Fathi, Ian Fischer, +Zbigniew Wojna, Yang Song, Sergio Guadarrama, Jasper Uijlings, Viacheslav +Kovalevskyi, Kevin Murphy diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_notebook.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_notebook.md new file mode 100644 index 0000000000000000000000000000000000000000..b92aec33aa1df12d0e29da9a50e9c8a59e5c2ff7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_notebook.md @@ -0,0 +1,18 @@ +# Quick Start: Jupyter notebook for off-the-shelf inference + +[![TensorFlow 2.2](https://img.shields.io/badge/TensorFlow-2.2-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +If you'd like to hit the ground running and run detection on a few example +images right out of the box, we recommend trying out the Jupyter notebook demo. +To run the Jupyter notebook, run the following command from +`tensorflow/models/research/object_detection`: + +``` +# From tensorflow/models/research/object_detection +jupyter notebook +``` + +The notebook should open in your favorite web browser. Click the +[`object_detection_tutorial.ipynb`](../object_detection_tutorial.ipynb) link to +open the demo. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_on_mobile_tensorflowlite.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_on_mobile_tensorflowlite.md new file mode 100644 index 0000000000000000000000000000000000000000..379652e34cb2241d6294679548c988e8916510bc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_on_mobile_tensorflowlite.md @@ -0,0 +1,149 @@ +# Running on mobile with TensorFlow Lite + +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +In this section, we will show you how to use [TensorFlow +Lite](https://www.tensorflow.org/mobile/tflite/) to get a smaller model and +allow you take advantage of ops that have been optimized for mobile devices. +TensorFlow Lite is TensorFlow’s lightweight solution for mobile and embedded +devices. It enables on-device machine learning inference with low latency and a +small binary size. TensorFlow Lite uses many techniques for this such as +quantized kernels that allow smaller and faster (fixed-point math) models. + +For this section, you will need to build [TensorFlow from +source](https://www.tensorflow.org/install/install_sources) to get the +TensorFlow Lite support for the SSD model. At this time only SSD models are supported. +Models like faster_rcnn are not supported at this time. You will also need to install the +[bazel build +tool](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#bazel). + +To make these commands easier to run, let’s set up some environment variables: + +```shell +export CONFIG_FILE=PATH_TO_BE_CONFIGURED/pipeline.config +export CHECKPOINT_PATH=PATH_TO_BE_CONFIGURED/model.ckpt +export OUTPUT_DIR=/tmp/tflite +``` + +We start with a checkpoint and get a TensorFlow frozen graph with compatible ops +that we can use with TensorFlow Lite. First, you’ll need to install these +[python +libraries](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). +Then to get the frozen graph, run the export_tflite_ssd_graph.py script from the +`models/research` directory with this command: + +```shell +object_detection/export_tflite_ssd_graph.py \ +--pipeline_config_path=$CONFIG_FILE \ +--trained_checkpoint_prefix=$CHECKPOINT_PATH \ +--output_directory=$OUTPUT_DIR \ +--add_postprocessing_op=true +``` + +In the /tmp/tflite directory, you should now see two files: tflite_graph.pb and +tflite_graph.pbtxt. Note that the add_postprocessing flag enables the model to +take advantage of a custom optimized detection post-processing operation which +can be thought of as a replacement for +[tf.image.non_max_suppression](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression). +Make sure not to confuse export_tflite_ssd_graph with export_inference_graph in +the same directory. Both scripts output frozen graphs: export_tflite_ssd_graph +will output the frozen graph that we can input to TensorFlow Lite directly and +is the one we’ll be using. + +Next we’ll use TensorFlow Lite to get the optimized model by using +[TOCO](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/toco), +the TensorFlow Lite Optimizing Converter. This will convert the resulting frozen +graph (tflite_graph.pb) to the TensorFlow Lite flatbuffer format (detect.tflite) +via the following command. For a quantized model, run this from the tensorflow/ +directory: + +```shell +bazel run -c opt tensorflow/lite/toco:toco -- \ +--input_file=$OUTPUT_DIR/tflite_graph.pb \ +--output_file=$OUTPUT_DIR/detect.tflite \ +--input_shapes=1,300,300,3 \ +--input_arrays=normalized_input_image_tensor \ +--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \ +--inference_type=QUANTIZED_UINT8 \ +--mean_values=128 \ +--std_values=128 \ +--change_concat_input_ranges=false \ +--allow_custom_ops +``` + +This command takes the input tensor normalized_input_image_tensor after resizing +each camera image frame to 300x300 pixels. The outputs of the quantized model +are named 'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1', +'TFLite_Detection_PostProcess:2', and 'TFLite_Detection_PostProcess:3' and +represent four arrays: detection_boxes, detection_classes, detection_scores, and +num_detections. The documentation for other flags used in this command is +[here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/convert/cmdline.md). +If things ran successfully, you should now see a third file in the /tmp/tflite +directory called detect.tflite. This file contains the graph and all model +parameters and can be run via the TensorFlow Lite interpreter on the Android +device. For a floating point model, run this from the tensorflow/ directory: + +```shell +bazel run -c opt tensorflow/lite/toco:toco -- \ +--input_file=$OUTPUT_DIR/tflite_graph.pb \ +--output_file=$OUTPUT_DIR/detect.tflite \ +--input_shapes=1,300,300,3 \ +--input_arrays=normalized_input_image_tensor \ +--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \ +--inference_type=FLOAT \ +--allow_custom_ops +``` + +# Running our model on Android + +To run our TensorFlow Lite model on device, we will use Android Studio to build +and run the TensorFlow Lite detection example with the new model. The example is +found in the +[TensorFlow examples repository](https://github.com/tensorflow/examples) under +`/lite/examples/object_detection`. The example can be built with +[Android Studio](https://developer.android.com/studio/index.html), and requires +the +[Android SDK with build tools](https://developer.android.com/tools/revisions/build-tools.html) +that support API >= 21. Additional details are available on the +[TensorFlow Lite example page](https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/android). + +Next we need to point the app to our new detect.tflite file and give it the +names of our new labels. Specifically, we will copy our TensorFlow Lite +flatbuffer to the app assets directory with the following command: + +```shell +mkdir $TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/assets +cp /tmp/tflite/detect.tflite \ + $TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/assets +``` + +You will also need to copy your new labelmap labelmap.txt to the assets +directory. + +We will now edit the gradle build file to use these assets. First, open the +`build.gradle` file +`$TF_EXAMPLES/lite/examples/object_detection/android/app/build.gradle`. Comment +out the model download script to avoid your assets being overwritten: `// apply +from:'download_model.gradle'` ``` + +If your model is named `detect.tflite`, and your labels file `labelmap.txt`, the +example will use them automatically as long as they've been properly copied into +the base assets directory. If you need to use a custom path or filename, open up +the +$TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java +file in a text editor and find the definition of TF_OD_API_LABELS_FILE. Update +this path to point to your new label map file: +"labels_list.txt". Note that if your model is quantized, +the flag TF_OD_API_IS_QUANTIZED is set to true, and if your model is floating +point, the flag TF_OD_API_IS_QUANTIZED is set to false. This new section of +DetectorActivity.java should now look as follows for a quantized model: + +```shell + private static final boolean TF_OD_API_IS_QUANTIZED = true; + private static final String TF_OD_API_MODEL_FILE = "detect.tflite"; + private static final String TF_OD_API_LABELS_FILE = "labels_list.txt"; +``` + +Once you’ve copied the TensorFlow Lite model and edited the gradle build script +to not use the downloaded assets, you can build and deploy the app using the +usual Android Studio build process. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_on_mobile_tf2.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_on_mobile_tf2.md new file mode 100644 index 0000000000000000000000000000000000000000..3553a16f857453635349bf2cd881ca4db65ee469 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_on_mobile_tf2.md @@ -0,0 +1,144 @@ +# Running TF2 Detection API Models on mobile + +[![TensorFlow 2.3](https://img.shields.io/badge/TensorFlow-2.3-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.3.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + +**NOTE:** This support was added *after* TF2.3, so please use the latest nightly +for the TensorFlow Lite Converter for this to work. + +[TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/)(TFLite) is +TensorFlow’s lightweight solution for mobile and embedded devices. It enables +on-device machine learning inference with low latency and a small binary size. +TensorFlow Lite uses many techniques for this such as quantized kernels that +allow smaller and faster (fixed-point math) models. + +This document shows how elgible models from the +[TF2 Detection zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md) +can be converted for inference with TFLite. + +For an end-to-end Python guide on how to fine-tune an SSD model for mobile +inference, look at +[this Colab](../colab_tutorials/eager_few_shot_od_training_tflite.ipynb). + +**NOTE:** TFLite currently only supports **SSD Architectures** (excluding +EfficientDet) for boxes-based detection. Support for EfficientDet is coming +soon. + +The output model has the following inputs & outputs: + +``` +One input: + image: a float32 tensor of shape[1, height, width, 3] containing the + *normalized* input image. + NOTE: See the `preprocess` function defined in the feature extractor class + in the object_detection/models directory. + +Four Outputs: + detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box + locations + detection_classes: a float32 tensor of shape [1, num_boxes] + with class indices + detection_scores: a float32 tensor of shape [1, num_boxes] + with class scores + num_boxes: a float32 tensor of size 1 containing the number of detected boxes +``` + +There are two steps to TFLite conversion: + +### Step 1: Export TFLite inference graph + +This step generates an intermediate SavedModel that can be used with the +[TFLite Converter](https://www.tensorflow.org/lite/convert) via commandline or +Python API. + +To use the script: + +```bash +# From the tensorflow/models/research/ directory +python object_detection/export_tflite_graph_tf2.py \ + --pipeline_config_path path/to/ssd_model/pipeline.config \ + --trained_checkpoint_dir path/to/ssd_model/checkpoint \ + --output_directory path/to/exported_model_directory +``` + +Use `--help` with the above script to get the full list of supported parameters. +These can fine-tune accuracy and speed for your model. + +### Step 2: Convert to TFLite + +Use the [TensorFlow Lite Converter](https://www.tensorflow.org/lite/convert) to +convert the `SavedModel` to TFLite. Note that you need to use `from_saved_model` +for TFLite conversion with the Python API. + +You can also leverage +[Post-training Quantization](https://www.tensorflow.org/lite/performance/post_training_quantization) +to +[optimize performance](https://www.tensorflow.org/lite/performance/model_optimization) +and obtain a smaller model. Note that this is only possible from the *Python +API*. Be sure to use a +[representative dataset](https://www.tensorflow.org/lite/performance/post_training_quantization#full_integer_quantization) +and set the following options on the converter: + +```python +converter.optimizations = [tf.lite.Optimize.DEFAULT] +converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, + tf.lite.OpsSet.TFLITE_BUILTINS] +converter.representative_dataset = <...> +``` + +## Running our model on Android + +To run our TensorFlow Lite model on device, we will use Android Studio to build +and run the TensorFlow Lite detection example with the new model. The example is +found in the +[TensorFlow examples repository](https://github.com/tensorflow/examples) under +`/lite/examples/object_detection`. The example can be built with +[Android Studio](https://developer.android.com/studio/index.html), and requires +the +[Android SDK with build tools](https://developer.android.com/tools/revisions/build-tools.html) +that support API >= 21. Additional details are available on the +[TensorFlow Lite example page](https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/android). + +Next we need to point the app to our new detect.tflite file and give it the +names of our new labels. Specifically, we will copy our TensorFlow Lite +flatbuffer to the app assets directory with the following command: + +```shell +mkdir $TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/assets +cp /tmp/tflite/detect.tflite \ + $TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/assets +``` + +You will also need to copy your new labelmap labelmap.txt to the assets +directory. + +We will now edit the gradle build file to use these assets. First, open the +`build.gradle` file +`$TF_EXAMPLES/lite/examples/object_detection/android/app/build.gradle`. Comment +out the model download script to avoid your assets being overwritten: + +```shell +// apply from:'download_model.gradle' +``` + +If your model is named `detect.tflite`, and your labels file `labelmap.txt`, the +example will use them automatically as long as they've been properly copied into +the base assets directory. If you need to use a custom path or filename, open up +the +$TF_EXAMPLES/lite/examples/object_detection/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java +file in a text editor and find the definition of TF_OD_API_LABELS_FILE. Update +this path to point to your new label map file: "labels_list.txt". Note that if +your model is quantized, the flag TF_OD_API_IS_QUANTIZED is set to true, and if +your model is floating point, the flag TF_OD_API_IS_QUANTIZED is set to false. +This new section of DetectorActivity.java should now look as follows for a +quantized model: + +```java + private static final boolean TF_OD_API_IS_QUANTIZED = true; + private static final String TF_OD_API_MODEL_FILE = "detect.tflite"; + private static final String TF_OD_API_LABELS_FILE = "labels_list.txt"; +``` + +Once you’ve copied the TensorFlow Lite model and edited the gradle build script +to not use the downloaded assets, you can build and deploy the app using the +usual Android Studio build process. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_pets.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_pets.md new file mode 100644 index 0000000000000000000000000000000000000000..7d6b7bfa7c0149a9083312849a544e1c3334cde8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/running_pets.md @@ -0,0 +1,321 @@ +# Quick Start: Distributed Training on the Oxford-IIIT Pets Dataset on Google Cloud + +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +This page is a walkthrough for training an object detector using the TensorFlow +Object Detection API. In this tutorial, we'll be training on the Oxford-IIIT Pets +dataset to build a system to detect various breeds of cats and dogs. The output +of the detector will look like the following: + +![](img/oxford_pet.png) + +## Setting up a Project on Google Cloud + +To accelerate the process, we'll run training and evaluation on [Google Cloud +ML Engine](https://cloud.google.com/ml-engine/) to leverage multiple GPUs. To +begin, you will have to set up Google Cloud via the following steps (if you have +already done this, feel free to skip to the next section): + +1. [Create a GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects). +2. [Install the Google Cloud SDK](https://cloud.google.com/sdk/downloads) on +your workstation or laptop. +This will provide the tools you need to upload files to Google Cloud Storage and +start ML training jobs. +3. [Enable the ML Engine +APIs](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component&_ga=1.73374291.1570145678.1496689256). +By default, a new GCP project does not enable APIs to start ML Engine training +jobs. Use the above link to explicitly enable them. +4. [Set up a Google Cloud Storage (GCS) +bucket](https://cloud.google.com/storage/docs/creating-buckets). ML Engine +training jobs can only access files on a Google Cloud Storage bucket. In this +tutorial, we'll be required to upload our dataset and configuration to GCS. + +Please remember the name of your GCS bucket, as we will reference it multiple +times in this document. Substitute `${YOUR_GCS_BUCKET}` with the name of +your bucket in this document. For your convenience, you should define the +environment variable below: + +``` bash +export YOUR_GCS_BUCKET=${YOUR_GCS_BUCKET} +``` + +It is also possible to run locally by following +[the running locally instructions](running_locally.md). + +## Installing TensorFlow and the TensorFlow Object Detection API + +Please run through the [installation instructions](installation.md) to install +TensorFlow and all it dependencies. Ensure the Protobuf libraries are +compiled and the library directories are added to `PYTHONPATH`. + +## Getting the Oxford-IIIT Pets Dataset and Uploading it to Google Cloud Storage + +In order to train a detector, we require a dataset of images, bounding boxes and +classifications. For this demo, we'll use the Oxford-IIIT Pets dataset. The raw +dataset for Oxford-IIIT Pets lives +[here](http://www.robots.ox.ac.uk/~vgg/data/pets/). You will need to download +both the image dataset [`images.tar.gz`](http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz) +and the groundtruth data [`annotations.tar.gz`](http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz) +to the `tensorflow/models/research/` directory and unzip them. This may take +some time. + +``` bash +# From tensorflow/models/research/ +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz +tar -xvf images.tar.gz +tar -xvf annotations.tar.gz +``` + +After downloading the tarballs, your `tensorflow/models/research/` directory +should appear as follows: + +```lang-none +- images.tar.gz +- annotations.tar.gz ++ images/ ++ annotations/ ++ object_detection/ +... other files and directories +``` + +The TensorFlow Object Detection API expects data to be in the TFRecord format, +so we'll now run the `create_pet_tf_record` script to convert from the raw +Oxford-IIIT Pet dataset into TFRecords. Run the following commands from the +`tensorflow/models/research/` directory: + +``` bash +# From tensorflow/models/research/ +python object_detection/dataset_tools/create_pet_tf_record.py \ + --label_map_path=object_detection/data/pet_label_map.pbtxt \ + --data_dir=`pwd` \ + --output_dir=`pwd` +``` + +Note: It is normal to see some warnings when running this script. You may ignore +them. + +Two 10-sharded TFRecord files named `pet_faces_train.record-*` and +`pet_faces_val.record-*` should be generated in the +`tensorflow/models/research/` directory. + +Now that the data has been generated, we'll need to upload it to Google Cloud +Storage so the data can be accessed by ML Engine. Run the following command to +copy the files into your GCS bucket (substituting `${YOUR_GCS_BUCKET}`): + +```bash +# From tensorflow/models/research/ +gsutil cp pet_faces_train.record-* gs://${YOUR_GCS_BUCKET}/data/ +gsutil cp pet_faces_val.record-* gs://${YOUR_GCS_BUCKET}/data/ +gsutil cp object_detection/data/pet_label_map.pbtxt gs://${YOUR_GCS_BUCKET}/data/pet_label_map.pbtxt +``` + +Please remember the path where you upload the data to, as we will need this +information when configuring the pipeline in a following step. + +## Downloading a COCO-pretrained Model for Transfer Learning + +Training a state of the art object detector from scratch can take days, even +when using multiple GPUs! In order to speed up training, we'll take an object +detector trained on a different dataset (COCO), and reuse some of it's +parameters to initialize our new model. + +Download our [COCO-pretrained Faster R-CNN with Resnet-101 +model](http://storage.googleapis.com/download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_11_06_2017.tar.gz). +Unzip the contents of the folder and copy the `model.ckpt*` files into your GCS +Bucket. + +``` bash +wget http://storage.googleapis.com/download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_11_06_2017.tar.gz +tar -xvf faster_rcnn_resnet101_coco_11_06_2017.tar.gz +gsutil cp faster_rcnn_resnet101_coco_11_06_2017/model.ckpt.* gs://${YOUR_GCS_BUCKET}/data/ +``` + +Remember the path where you uploaded the model checkpoint to, as we will need it +in the following step. + +## Configuring the Object Detection Pipeline + +In the TensorFlow Object Detection API, the model parameters, training +parameters and eval parameters are all defined by a config file. More details +can be found [here](configuring_jobs.md). For this tutorial, we will use some +predefined templates provided with the source code. In the +`object_detection/samples/configs` folder, there are skeleton object_detection +configuration files. We will use `faster_rcnn_resnet101_pets.config` as a +starting point for configuring the pipeline. Open the file with your favourite +text editor. + +We'll need to configure some paths in order for the template to work. Search the +file for instances of `PATH_TO_BE_CONFIGURED` and replace them with the +appropriate value (typically `gs://${YOUR_GCS_BUCKET}/data/`). Afterwards +upload your edited file onto GCS, making note of the path it was uploaded to +(we'll need it when starting the training/eval jobs). + +``` bash +# From tensorflow/models/research/ + +# Edit the faster_rcnn_resnet101_pets.config template. Please note that there +# are multiple places where PATH_TO_BE_CONFIGURED needs to be set. +sed -i "s|PATH_TO_BE_CONFIGURED|"gs://${YOUR_GCS_BUCKET}"/data|g" \ + object_detection/samples/configs/faster_rcnn_resnet101_pets.config + +# Copy edited template to cloud. +gsutil cp object_detection/samples/configs/faster_rcnn_resnet101_pets.config \ + gs://${YOUR_GCS_BUCKET}/data/faster_rcnn_resnet101_pets.config +``` + +## Checking Your Google Cloud Storage Bucket + +At this point in the tutorial, you should have uploaded the training/validation +datasets (including label map), our COCO trained FasterRCNN finetune checkpoint and your job +configuration to your Google Cloud Storage Bucket. Your bucket should look like +the following: + +```lang-none ++ ${YOUR_GCS_BUCKET}/ + + data/ + - faster_rcnn_resnet101_pets.config + - model.ckpt.index + - model.ckpt.meta + - model.ckpt.data-00000-of-00001 + - pet_label_map.pbtxt + - pet_faces_train.record-* + - pet_faces_val.record-* +``` + +You can inspect your bucket using the [Google Cloud Storage +browser](https://console.cloud.google.com/storage/browser). + +## Starting Training and Evaluation Jobs on Google Cloud ML Engine + +Before we can start a job on Google Cloud ML Engine, we must: + +1. Package the TensorFlow Object Detection code. +2. Write a cluster configuration for our Google Cloud ML job. + +To package the TensorFlow Object Detection code, run the following commands from +the `tensorflow/models/research/` directory: + +```bash +# From tensorflow/models/research/ +bash object_detection/dataset_tools/create_pycocotools_package.sh /tmp/pycocotools +python setup.py sdist +(cd slim && python setup.py sdist) +``` + +This will create python packages dist/object_detection-0.1.tar.gz, +slim/dist/slim-0.1.tar.gz, and /tmp/pycocotools/pycocotools-2.0.tar.gz. + +For running the training Cloud ML job, we'll configure the cluster to use 5 +training jobs and three parameters servers. The +configuration file can be found at `object_detection/samples/cloud/cloud.yml`. + +Note: The code sample below is supported for use with 1.12 runtime version. + +To start training and evaluation, execute the following command from the +`tensorflow/models/research/` directory: + +```bash +# From tensorflow/models/research/ +gcloud ml-engine jobs submit training `whoami`_object_detection_pets_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 1.12 \ + --job-dir=gs://${YOUR_GCS_BUCKET}/model_dir \ + --packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \ + --module-name object_detection.model_main \ + --region us-central1 \ + --config object_detection/samples/cloud/cloud.yml \ + -- \ + --model_dir=gs://${YOUR_GCS_BUCKET}/model_dir \ + --pipeline_config_path=gs://${YOUR_GCS_BUCKET}/data/faster_rcnn_resnet101_pets.config +``` + +Users can monitor and stop training and evaluation jobs on the [ML Engine +Dashboard](https://console.cloud.google.com/mlengine/jobs). + +## Monitoring Progress with Tensorboard + +You can monitor progress of the training and eval jobs by running Tensorboard on +your local machine: + +```bash +# This command needs to be run once to allow your local machine to access your +# GCS bucket. +gcloud auth application-default login + +tensorboard --logdir=gs://${YOUR_GCS_BUCKET}/model_dir +``` + +Once Tensorboard is running, navigate to `localhost:6006` from your favourite +web browser. You should see something similar to the following: + +![](img/tensorboard.png) + +Make sure your Tensorboard version is the same minor version as your TensorFlow (1.x) + +You will also want to click on the images tab to see example detections made by +the model while it trains. After about an hour and a half of training, you can +expect to see something like this: + +![](img/tensorboard2.png) + +Note: It takes roughly 10 minutes for a job to get started on ML Engine, and +roughly an hour for the system to evaluate the validation dataset. It may take +some time to populate the dashboards. If you do not see any entries after half +an hour, check the logs from the [ML Engine +Dashboard](https://console.cloud.google.com/mlengine/jobs). Note that by default +the training jobs are configured to go for much longer than is necessary for +convergence. To save money, we recommend killing your jobs once you've seen +that they've converged. + +## Exporting the TensorFlow Graph + +After your model has been trained, you should export it to a TensorFlow graph +proto. First, you need to identify a candidate checkpoint to export. You can +search your bucket using the [Google Cloud Storage +Browser](https://console.cloud.google.com/storage/browser). The file should be +stored under `${YOUR_GCS_BUCKET}/model_dir`. The checkpoint will typically +consist of three files: + +* `model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001` +* `model.ckpt-${CHECKPOINT_NUMBER}.index` +* `model.ckpt-${CHECKPOINT_NUMBER}.meta` + +After you've identified a candidate checkpoint to export, run the following +command from `tensorflow/models/research/`: + +```bash +# From tensorflow/models/research/ +gsutil cp gs://${YOUR_GCS_BUCKET}/model_dir/model.ckpt-${CHECKPOINT_NUMBER}.* . +python object_detection/export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path object_detection/samples/configs/faster_rcnn_resnet101_pets.config \ + --trained_checkpoint_prefix model.ckpt-${CHECKPOINT_NUMBER} \ + --output_directory exported_graphs +``` + +Afterwards, you should see a directory named `exported_graphs` containing the +SavedModel and frozen graph. + +## Configuring the Instance Segmentation Pipeline + +Mask prediction can be turned on for an object detection config by adding +`predict_instance_masks: true` within the `MaskRCNNBoxPredictor`. Other +parameters such as mask size, number of convolutions in the mask layer, and the +convolution hyper parameters can be defined. We will use +`mask_rcnn_resnet101_pets.config` as a starting point for configuring the +instance segmentation pipeline. Everything above that was mentioned about object +detection holds true for instance segmentation. Instance segmentation consists +of an object detection model with an additional head that predicts the object +mask inside each predicted box once we remove the training and other details. +Please refer to the section on [Running an Instance Segmentation +Model](instance_segmentation.md) for instructions on how to configure a model +that predicts masks in addition to object bounding boxes. + +## What's Next + +Congratulations, you have now trained an object detector for various cats and +dogs! There different things you can do now: + +1. [Test your exported model using the provided Jupyter notebook.](running_notebook.md) +2. [Experiment with different model configurations.](configuring_jobs.md) +3. Train an object detector using your own data. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1.md new file mode 100644 index 0000000000000000000000000000000000000000..f1577600963e1af99b6fdd192028a12622240cc2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1.md @@ -0,0 +1,94 @@ +# Object Detection API with TensorFlow 1 + +## Requirements + +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) +[![Protobuf Compiler >= 3.0](https://img.shields.io/badge/ProtoBuf%20Compiler-%3E3.0-brightgreen)](https://grpc.io/docs/protoc-installation/#install-using-a-package-manager) + +## Installation + +You can install the TensorFlow Object Detection API either with Python Package +Installer (pip) or Docker. For local runs we recommend using Docker and for +Google Cloud runs we recommend using pip. + +Clone the TensorFlow Models repository and proceed to one of the installation +options. + +```bash +git clone https://github.com/tensorflow/models.git +``` + +### Docker Installation + +```bash +# From the root of the git repository +docker build -f research/object_detection/dockerfiles/tf1/Dockerfile -t od . +docker run -it od +``` + +### Python Package Installation + +```bash +cd models/research +# Compile protos. +protoc object_detection/protos/*.proto --python_out=. +# Install TensorFlow Object Detection API. +cp object_detection/packages/tf1/setup.py . +python -m pip install --use-feature=2020-resolver . +``` + +```bash +# Test the installation. +python object_detection/builders/model_builder_tf1_test.py +``` + +## Quick Start + +### Colabs + +* [Jupyter notebook for off-the-shelf inference](../colab_tutorials/object_detection_tutorial.ipynb) +* [Training a pet detector](running_pets.md) + +### Training and Evaluation + +To train and evaluate your models either locally or on Google Cloud see +[instructions](tf1_training_and_evaluation.md). + +## Model Zoo + +We provide a large collection of models that are trained on several datasets in +the [Model Zoo](tf1_detection_zoo.md). + +## Guides + +* + Configuring an object detection pipeline
+* Preparing inputs
+* + Defining your own model architecture
+* + Bringing in your own dataset
+* + Supported object detection evaluation protocols
+* + TPU compatible detection pipelines
+* + Training and evaluation guide (CPU, GPU, or TPU)
+ +## Extras: + +* + Exporting a trained model for inference
+* + Exporting a trained model for TPU inference
+* + Inference and evaluation on the Open Images dataset
+* + Run an instance segmentation model
+* + Run the evaluation for the Open Images Challenge 2018/2019
+* + Running object detection on mobile devices with TensorFlow Lite
+* + Context R-CNN documentation for data preparation, training, and export
diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1_detection_zoo.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1_detection_zoo.md new file mode 100644 index 0000000000000000000000000000000000000000..6f002cd09bb8208e02af3d4960212470cd291555 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1_detection_zoo.md @@ -0,0 +1,189 @@ +# TensorFlow 1 Detection Model Zoo + +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + +We provide a collection of detection models pre-trained on the +[COCO dataset](http://cocodataset.org), the +[Kitti dataset](http://www.cvlibs.net/datasets/kitti/), the +[Open Images dataset](https://storage.googleapis.com/openimages/web/index.html), +the [AVA v2.1 dataset](https://research.google.com/ava/) the +[iNaturalist Species Detection Dataset](https://github.com/visipedia/inat_comp/blob/master/2017/README.md#bounding-boxes) +and the +[Snapshot Serengeti Dataset](http://lila.science/datasets/snapshot-serengeti). +These models can be useful for out-of-the-box inference if you are interested in +categories already in those datasets. They are also useful for initializing your +models when training on novel datasets. + +In the table below, we list each such pre-trained model including: + +* a model name that corresponds to a config file that was used to train this + model in the `samples/configs` directory, +* a download link to a tar.gz file containing the pre-trained model, +* model speed --- we report running time in ms per 600x600 image (including + all pre and post-processing), but please be aware that these timings depend + highly on one's specific hardware configuration (these timings were + performed using an Nvidia GeForce GTX TITAN X card) and should be treated + more as relative timings in many cases. Also note that desktop GPU timing + does not always reflect mobile run time. For example Mobilenet V2 is faster + on mobile devices than Mobilenet V1, but is slightly slower on desktop GPU. +* detector performance on subset of the COCO validation set, Open Images test + split, iNaturalist test split, or Snapshot Serengeti LILA.science test + split. as measured by the dataset-specific mAP measure. Here, higher is + better, and we only report bounding box mAP rounded to the nearest integer. +* Output types (`Boxes`, and `Masks` if applicable ) + +You can un-tar each tar.gz file via, e.g.,: + +``` +tar -xzvf ssd_mobilenet_v1_coco.tar.gz +``` + +Inside the un-tar'ed directory, you will find: + +* a graph proto (`graph.pbtxt`) +* a checkpoint (`model.ckpt.data-00000-of-00001`, `model.ckpt.index`, + `model.ckpt.meta`) +* a frozen graph proto with weights baked into the graph as constants + (`frozen_inference_graph.pb`) to be used for out of the box inference (try + this out in the Jupyter notebook!) +* a config file (`pipeline.config`) which was used to generate the graph. + These directly correspond to a config file in the + [samples/configs](https://github.com/tensorflow/models/tree/master/research/object_detection/samples/configs)) + directory but often with a modified score threshold. In the case of the + heavier Faster R-CNN models, we also provide a version of the model that + uses a highly reduced number of proposals for speed. +* Mobile model only: a TfLite file (`model.tflite`) that can be deployed on + mobile devices. + +Some remarks on frozen inference graphs: + +* If you try to evaluate the frozen graph, you may find performance numbers + for some of the models to be slightly lower than what we report in the below + tables. This is because we discard detections with scores below a threshold + (typically 0.3) when creating the frozen graph. This corresponds effectively + to picking a point on the precision recall curve of a detector (and + discarding the part past that point), which negatively impacts standard mAP + metrics. +* Our frozen inference graphs are generated using the + [v1.12.0](https://github.com/tensorflow/tensorflow/tree/v1.12.0) release + version of TensorFlow; this being said, each frozen inference graph can be + regenerated using your current version of TensorFlow by re-running the + [exporter](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/exporting_models.md), + pointing it at the model directory as well as the corresponding config file + in + [samples/configs](https://github.com/tensorflow/models/tree/master/research/object_detection/samples/configs). + +## COCO-trained models + +Model name | Speed (ms) | COCO mAP[^1] | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :----------: | :-----: +[ssd_mobilenet_v1_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz) | 30 | 21 | Boxes +[ssd_mobilenet_v1_0.75_depth_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03.tar.gz) | 26 | 18 | Boxes +[ssd_mobilenet_v1_quantized_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz) | 29 | 18 | Boxes +[ssd_mobilenet_v1_0.75_depth_quantized_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_0.75_depth_quantized_300x300_coco14_sync_2018_07_18.tar.gz) | 29 | 16 | Boxes +[ssd_mobilenet_v1_ppn_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync_2018_07_03.tar.gz) | 26 | 20 | Boxes +[ssd_mobilenet_v1_fpn_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz) | 56 | 32 | Boxes +[ssd_resnet_50_fpn_coco ☆](http://download.tensorflow.org/models/object_detection/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz) | 76 | 35 | Boxes +[ssd_mobilenet_v2_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz) | 31 | 22 | Boxes +[ssd_mobilenet_v2_quantized_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz) | 29 | 22 | Boxes +[ssdlite_mobilenet_v2_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz) | 27 | 22 | Boxes +[ssd_inception_v2_coco](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz) | 42 | 24 | Boxes +[faster_rcnn_inception_v2_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_v2_coco_2018_01_28.tar.gz) | 58 | 28 | Boxes +[faster_rcnn_resnet50_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz) | 89 | 30 | Boxes +[faster_rcnn_resnet50_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_lowproposals_coco_2018_01_28.tar.gz) | 64 | | Boxes +[rfcn_resnet101_coco](http://download.tensorflow.org/models/object_detection/rfcn_resnet101_coco_2018_01_28.tar.gz) | 92 | 30 | Boxes +[faster_rcnn_resnet101_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_2018_01_28.tar.gz) | 106 | 32 | Boxes +[faster_rcnn_resnet101_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_lowproposals_coco_2018_01_28.tar.gz) | 82 | | Boxes +[faster_rcnn_inception_resnet_v2_atrous_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_coco_2018_01_28.tar.gz) | 620 | 37 | Boxes +[faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco_2018_01_28.tar.gz) | 241 | | Boxes +[faster_rcnn_nas](http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_coco_2018_01_28.tar.gz) | 1833 | 43 | Boxes +[faster_rcnn_nas_lowproposals_coco](http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_lowproposals_coco_2018_01_28.tar.gz) | 540 | | Boxes +[mask_rcnn_inception_resnet_v2_atrous_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28.tar.gz) | 771 | 36 | Masks +[mask_rcnn_inception_v2_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz) | 79 | 25 | Masks +[mask_rcnn_resnet101_atrous_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_resnet101_atrous_coco_2018_01_28.tar.gz) | 470 | 33 | Masks +[mask_rcnn_resnet50_atrous_coco](http://download.tensorflow.org/models/object_detection/mask_rcnn_resnet50_atrous_coco_2018_01_28.tar.gz) | 343 | 29 | Masks + +Note: The asterisk (☆) at the end of model name indicates that this model +supports TPU training. + +Note: If you download the tar.gz file of quantized models and un-tar, you will +get different set of files - a checkpoint, a config file and tflite frozen +graphs (txt/binary). + +### Mobile models + +Model name | Pixel 1 Latency (ms) | COCO mAP | Outputs +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------: | :------: | :-----: +[ssd_mobiledet_cpu_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_cpu_320x320_coco_2020_05_19.tar.gz) | 113 | 24.0 | Boxes +[ssd_mobilenet_v2_mnasfpn_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_mnasfpn_shared_box_predictor_320x320_coco_sync_2020_05_18.tar.gz) | 183 | 26.6 | Boxes +[ssd_mobilenet_v3_large_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_large_coco_2020_01_14.tar.gz) | 119 | 22.6 | Boxes +[ssd_mobilenet_v3_small_coco](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v3_small_coco_2020_01_14.tar.gz) | 43 | 15.4 | Boxes + +### Pixel4 Edge TPU models + +Model name | Pixel 4 Edge TPU Latency (ms) | COCO mAP (fp32/uint8) | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------- | :---------------------------: | :-------------------: | :-----: +[ssd_mobiledet_edgetpu_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19.tar.gz) | 6.9 | 25.9/25.6 | Boxes +[ssd_mobilenet_edgetpu_coco](https://storage.cloud.google.com/mobilenet_edgetpu/checkpoints/ssdlite_mobilenet_edgetpu_coco_quant.tar.gz) | 6.6 | -/24.3 | Boxes + +### Pixel4 DSP models + +Model name | Pixel 4 DSP Latency (ms) | COCO mAP (fp32/uint8) | Outputs +------------------------------------------------------------------------------------------------------------------------------------- | :----------------------: | :-------------------: | :-----: +[ssd_mobiledet_dsp_coco](http://download.tensorflow.org/models/object_detection/ssdlite_mobiledet_dsp_320x320_coco_2020_05_19.tar.gz) | 12.3 | 28.9/28.8 | Boxes + +## Kitti-trained models + +Model name | Speed (ms) | Pascal mAP@0.5 | Outputs +----------------------------------------------------------------------------------------------------------------------------------- | :--------: | :------------: | :-----: +[faster_rcnn_resnet101_kitti](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_kitti_2018_01_28.tar.gz) | 79 | 87 | Boxes + +## Open Images-trained models + +Model name | Speed (ms) | Open Images mAP@0.5[^2] | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :---------------------: | :-----: +[faster_rcnn_inception_resnet_v2_atrous_oidv2](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_2018_01_28.tar.gz) | 727 | 37 | Boxes +[faster_rcnn_inception_resnet_v2_atrous_lowproposals_oidv2](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid_2018_01_28.tar.gz) | 347 | | Boxes +[facessd_mobilenet_v2_quantized_open_image_v4](http://download.tensorflow.org/models/object_detection/facessd_mobilenet_v2_quantized_320x320_open_image_v4.tar.gz) [^3] | 20 | 73 (faces) | Boxes + +Model name | Speed (ms) | Open Images mAP@0.5[^4] | Outputs +---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :---------------------: | :-----: +[faster_rcnn_inception_resnet_v2_atrous_oidv4](http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_oid_v4_2018_12_12.tar.gz) | 425 | 54 | Boxes +[ssd_mobilenetv2_oidv4](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_oid_v4_2018_12_12.tar.gz) | 89 | 36 | Boxes +[ssd_resnet_101_fpn_oidv4](http://download.tensorflow.org/models/object_detection/ssd_resnet101_v1_fpn_shared_box_predictor_oid_512x512_sync_2019_01_20.tar.gz) | 237 | 38 | Boxes + +## iNaturalist Species-trained models + +Model name | Speed (ms) | Pascal mAP@0.5 | Outputs +--------------------------------------------------------------------------------------------------------------------------------- | :--------: | :------------: | :-----: +[faster_rcnn_resnet101_fgvc](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_fgvc_2018_07_19.tar.gz) | 395 | 58 | Boxes +[faster_rcnn_resnet50_fgvc](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_fgvc_2018_07_19.tar.gz) | 366 | 55 | Boxes + +## AVA v2.1 trained models + +Model name | Speed (ms) | Pascal mAP@0.5 | Outputs +----------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :------------: | :-----: +[faster_rcnn_resnet101_ava_v2.1](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_ava_v2.1_2018_04_30.tar.gz) | 93 | 11 | Boxes + +## Snapshot Serengeti Camera Trap trained models + +Model name | COCO mAP@0.5 | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------: | :-----: +[faster_rcnn_resnet101_snapshot_serengeti](http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz) | 38 | Boxes +[context_rcnn_resnet101_snapshot_serengeti](http://download.tensorflow.org/models/object_detection/context_rcnn_resnet101_snapshot_serengeti_2020_06_10.tar.gz) | 56 | Boxes + +[^1]: See [MSCOCO evaluation protocol](http://cocodataset.org/#detections-eval). + The COCO mAP numbers here are evaluated on COCO 14 minival set (note that + our split is different from COCO 17 Val). A full list of image ids used in + our split could be fould + [here](https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_minival_ids.txt). +[^2]: This is PASCAL mAP with a slightly different way of true positives + computation: see + [Open Images evaluation protocols](evaluation_protocols.md), + oid_V2_detection_metrics. +[^3]: Non-face boxes are dropped during training and non-face groundtruth boxes + are ignored when evaluating. +[^4]: This is Open Images Challenge metric: see + [Open Images evaluation protocols](evaluation_protocols.md), + oid_challenge_detection_metrics. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1_training_and_evaluation.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1_training_and_evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..76c601f1897be6f70e670130a13330a4eddb63d0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf1_training_and_evaluation.md @@ -0,0 +1,237 @@ +# Training and Evaluation with TensorFlow 1 + +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +This page walks through the steps required to train an object detection model. +It assumes the reader has completed the following prerequisites: + +1. The TensorFlow Object Detection API has been installed as documented in the + [installation instructions](tf1.md#installation). +2. A valid data set has been created. See [this page](preparing_inputs.md) for + instructions on how to generate a dataset for the PASCAL VOC challenge or + the Oxford-IIIT Pet dataset. + +## Recommended Directory Structure for Training and Evaluation + +```bash +. +├── data/ +│   ├── eval-00000-of-00001.tfrecord +│   ├── label_map.txt +│   ├── train-00000-of-00002.tfrecord +│   └── train-00001-of-00002.tfrecord +└── models/ + └── my_model_dir/ + ├── eval/ # Created by evaluation job. + ├── my_model.config + └── train/ # + └── model_ckpt-100-data@1 # Created by training job. + └── model_ckpt-100-index # + └── checkpoint # +``` + +## Writing a model configuration + +Please refer to sample [TF1 configs](../samples/configs) and +[configuring jobs](configuring_jobs.md) to create a model config. + +### Model Parameter Initialization + +While optional, it is highly recommended that users utilize classification or +object detection checkpoints. Training an object detector from scratch can take +days. To speed up the training process, it is recommended that users re-use the +feature extractor parameters from a pre-existing image classification or object +detection checkpoint. The`train_config` section in the config provides two +fields to specify pre-existing checkpoints: + +* `fine_tune_checkpoint`: a path prefix to the pre-existing checkpoint + (ie:"/usr/home/username/checkpoint/model.ckpt-#####"). + +* `fine_tune_checkpoint_type`: with value `classification` or `detection` + depending on the type. + +A list of detection checkpoints can be found [here](tf1_detection_zoo.md). + +## Local + +### Training + +A local training job can be run with the following command: + +```bash +# From the tensorflow/models/research/ directory +PIPELINE_CONFIG_PATH={path to pipeline config file} +MODEL_DIR={path to model directory} +NUM_TRAIN_STEPS=50000 +SAMPLE_1_OF_N_EVAL_EXAMPLES=1 +python object_detection/model_main.py \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --model_dir=${MODEL_DIR} \ + --num_train_steps=${NUM_TRAIN_STEPS} \ + --sample_1_of_n_eval_examples=${SAMPLE_1_OF_N_EVAL_EXAMPLES} \ + --alsologtostderr +``` + +where `${PIPELINE_CONFIG_PATH}` points to the pipeline config and `${MODEL_DIR}` +points to the directory in which training checkpoints and events will be +written. Note that this binary will interleave both training and evaluation. + +## Google Cloud AI Platform + +The TensorFlow Object Detection API supports training on Google Cloud AI +Platform. This section documents instructions on how to train and evaluate your +model using Cloud AI Platform. The reader should complete the following +prerequistes: + +1. The reader has created and configured a project on Google Cloud AI Platform. + See + [Using GPUs](https://cloud.google.com/ai-platform/training/docs/using-gpus) + and + [Using TPUs](https://cloud.google.com/ai-platform/training/docs/using-tpus) + guides. +2. The reader has a valid data set and stored it in a Google Cloud Storage + bucket. See [this page](preparing_inputs.md) for instructions on how to + generate a dataset for the PASCAL VOC challenge or the Oxford-IIIT Pet + dataset. + +Additionally, it is recommended users test their job by running training and +evaluation jobs for a few iterations [locally on their own machines](#local). + +### Training with multiple workers with single GPU + +Google Cloud ML requires a YAML configuration file for a multiworker training +job using GPUs. A sample YAML file is given below: + +``` +trainingInput: + runtimeVersion: "1.15" + scaleTier: CUSTOM + masterType: standard_gpu + workerCount: 9 + workerType: standard_gpu + parameterServerCount: 3 + parameterServerType: standard + +``` + +Please keep the following guidelines in mind when writing the YAML +configuration: + +* A job with n workers will have n + 1 training machines (n workers + 1 + master). +* The number of parameters servers used should be an odd number to prevent a + parameter server from storing only weight variables or only bias variables + (due to round robin parameter scheduling). +* The learning rate in the training config should be decreased when using a + larger number of workers. Some experimentation is required to find the + optimal learning rate. + +The YAML file should be saved on the local machine (not on GCP). Once it has +been written, a user can start a training job on Cloud ML Engine using the +following command: + +```bash +# From the tensorflow/models/research/ directory +cp object_detection/packages/tf1/setup.py . +gcloud ml-engine jobs submit training object_detection_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 1.15 \ + --python-version 3.6 \ + --job-dir=gs://${MODEL_DIR} \ + --package-path ./object_detection \ + --module-name object_detection.model_main \ + --region us-central1 \ + --config ${PATH_TO_LOCAL_YAML_FILE} \ + -- \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} +``` + +Where `${PATH_TO_LOCAL_YAML_FILE}` is the local path to the YAML configuration, +`gs://${MODEL_DIR}` specifies the directory on Google Cloud Storage where the +training checkpoints and events will be written to and +`gs://${PIPELINE_CONFIG_PATH}` points to the pipeline configuration stored on +Google Cloud Storage. + +Users can monitor the progress of their training job on the +[ML Engine Dashboard](https://console.cloud.google.com/ai-platform/jobs). + +## Training with TPU + +Launching a training job with a TPU compatible pipeline config requires using a +similar command: + +```bash +# From the tensorflow/models/research/ directory +cp object_detection/packages/tf1/setup.py . +gcloud ml-engine jobs submit training `whoami`_object_detection_`date +%m_%d_%Y_%H_%M_%S` \ + --job-dir=gs://${MODEL_DIR} \ + --package-path ./object_detection \ + --module-name object_detection.model_tpu_main \ + --runtime-version 1.15 \ + --python-version 3.6 \ + --scale-tier BASIC_TPU \ + --region us-central1 \ + -- \ + --tpu_zone us-central1 \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} +``` + +In contrast with the GPU training command, there is no need to specify a YAML +file, and we point to the *object_detection.model_tpu_main* binary instead of +*object_detection.model_main*. We must also now set `scale-tier` to be +`BASIC_TPU` and provide a `tpu_zone`. Finally as before `pipeline_config_path` +points to a points to the pipeline configuration stored on Google Cloud Storage +(but is now must be a TPU compatible model). + +## Evaluation with GPU + +Note: You only need to do this when using TPU for training, as it does not +interleave evaluation during training, as in the case of Multiworker GPU +training. + +Evaluation jobs run on a single machine, so it is not necessary to write a YAML +configuration for evaluation. Run the following command to start the evaluation +job: + +```bash +# From the tensorflow/models/research/ directory +cp object_detection/packages/tf1/setup.py . +gcloud ml-engine jobs submit training object_detection_eval_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 1.15 \ + --python-version 3.6 \ + --job-dir=gs://${MODEL_DIR} \ + --package-path ./object_detection \ + --module-name object_detection.model_main \ + --region us-central1 \ + --scale-tier BASIC_GPU \ + -- \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} \ + --checkpoint_dir=gs://${MODEL_DIR} +``` + +Where `gs://${MODEL_DIR}` points to the directory on Google Cloud Storage where +training checkpoints are saved (same as the training job), as well as to where +evaluation events will be saved on Google Cloud Storage and +`gs://${PIPELINE_CONFIG_PATH}` points to where the pipeline configuration is +stored on Google Cloud Storage. + +Typically one starts an evaluation job concurrently with the training job. Note +that we do not support running evaluation on TPU, so the above command line for +launching evaluation jobs is the same whether you are training on GPU or TPU. + +## Running Tensorboard + +Progress for training and eval jobs can be inspected using Tensorboard. If using +the recommended directory structure, Tensorboard can be run using the following +command: + +```bash +tensorboard --logdir=${MODEL_DIR} +``` + +where `${MODEL_DIR}` points to the directory that contains the train and eval +directories. Please note it may take Tensorboard a couple minutes to populate +with data. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2.md new file mode 100644 index 0000000000000000000000000000000000000000..d45d157f3b94cff36a3a76fd18a9fe7b4f7a2d9c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2.md @@ -0,0 +1,87 @@ +# Object Detection API with TensorFlow 2 + +## Requirements + +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) +[![TensorFlow 2.2](https://img.shields.io/badge/TensorFlow-2.2-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) +[![Protobuf Compiler >= 3.0](https://img.shields.io/badge/ProtoBuf%20Compiler-%3E3.0-brightgreen)](https://grpc.io/docs/protoc-installation/#install-using-a-package-manager) + +## Installation + +You can install the TensorFlow Object Detection API either with Python Package +Installer (pip) or Docker. For local runs we recommend using Docker and for +Google Cloud runs we recommend using pip. + +Clone the TensorFlow Models repository and proceed to one of the installation +options. + +```bash +git clone https://github.com/tensorflow/models.git +``` + +### Docker Installation + +```bash +# From the root of the git repository +docker build -f research/object_detection/dockerfiles/tf2/Dockerfile -t od . +docker run -it od +``` + +### Python Package Installation + +```bash +cd models/research +# Compile protos. +protoc object_detection/protos/*.proto --python_out=. +# Install TensorFlow Object Detection API. +cp object_detection/packages/tf2/setup.py . +python -m pip install --use-feature=2020-resolver . +``` + +```bash +# Test the installation. +python object_detection/builders/model_builder_tf2_test.py +``` + +## Quick Start + +### Colabs + + + +* Training - + [Fine-tune a pre-trained detector in eager mode on custom data](../colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb) + +* Inference - + [Run inference with models from the zoo](../colab_tutorials/inference_tf2_colab.ipynb) + +* Few Shot Learning for Mobile Inference - + [Fine-tune a pre-trained detector for use with TensorFlow Lite](../colab_tutorials/eager_few_shot_od_training_tflite.ipynb) + + + +## Training and Evaluation + +To train and evaluate your models either locally or on Google Cloud see +[instructions](tf2_training_and_evaluation.md). + +## Model Zoo + +We provide a large collection of models that are trained on COCO 2017 in the +[Model Zoo](tf2_detection_zoo.md). + +## Guides + +* + Configuring an object detection pipeline
+* Preparing inputs
+* + Defining your own model architecture
+* + Bringing in your own dataset
+* + Supported object detection evaluation protocols
+* + TPU compatible detection pipelines
+* + Training and evaluation guide (CPU, GPU, or TPU)
diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_classification_zoo.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_classification_zoo.md new file mode 100644 index 0000000000000000000000000000000000000000..23c629ac0e9296b10d069159aed3b26bb2cb54e5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_classification_zoo.md @@ -0,0 +1,25 @@ +# TensorFlow 2 Classification Model Zoo + +[![TensorFlow 2.2](https://img.shields.io/badge/TensorFlow-2.2-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + +We provide a collection of classification models pre-trained on the +[Imagenet](http://www.image-net.org). These can be used to initilize detection +model parameters. + +Model name | +---------- | +[EfficientNet B0](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b0.tar.gz) | +[EfficientNet B1](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b1.tar.gz) | +[EfficientNet B2](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b2.tar.gz) | +[EfficientNet B3](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b3.tar.gz) | +[EfficientNet B4](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b4.tar.gz) | +[EfficientNet B5](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b5.tar.gz) | +[EfficientNet B6](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b6.tar.gz) | +[EfficientNet B7](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b7.tar.gz) | +[Resnet V1 50](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/resnet50_v1.tar.gz) | +[Resnet V1 101](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/resnet101_v1.tar.gz) | +[Resnet V1 152](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/resnet152_v1.tar.gz) | +[Inception Resnet V2](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/inception_resnet_v2.tar.gz) | +[MobileNet V1](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/mobilnet_v1.tar.gz) | +[MobileNet V2](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/mobilnet_v2.tar.gz) | diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_detection_zoo.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_detection_zoo.md new file mode 100644 index 0000000000000000000000000000000000000000..f4c3a393a2668a871b71c16384c6d8a96288e31f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_detection_zoo.md @@ -0,0 +1,67 @@ +# TensorFlow 2 Detection Model Zoo + +[![TensorFlow 2.2](https://img.shields.io/badge/TensorFlow-2.2-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + + + +We provide a collection of detection models pre-trained on the +[COCO 2017 dataset](http://cocodataset.org). These models can be useful for +out-of-the-box inference if you are interested in categories already in those +datasets. You can try it in our inference +[colab](../colab_tutorials/inference_tf2_colab.ipynb) + +They are also useful for initializing your models when training on novel +datasets. You can try this out on our few-shot training +[colab](../colab_tutorials/eager_few_shot_od_training_tf2_colab.ipynb). + +Please look at [this guide](running_on_mobile_tf2.md) for mobile inference. + + + +Finally, if you would like to train these models from scratch, you can find the +model configs in this [directory](../configs/tf2) (also in the linked +`tar.gz`s). + +Model name | Speed (ms) | COCO mAP | Outputs +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :----------: | :-----: +[CenterNet HourGlass104 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200713/centernet_hg104_512x512_coco17_tpu-8.tar.gz) | 70 | 41.9 | Boxes +[CenterNet HourGlass104 Keypoints 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_hg104_512x512_kpts_coco17_tpu-32.tar.gz) | 76 | 40.0/61.4 | Boxes/Keypoints +[CenterNet HourGlass104 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200713/centernet_hg104_1024x1024_coco17_tpu-32.tar.gz) | 197 | 44.5 | Boxes +[CenterNet HourGlass104 Keypoints 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_hg104_1024x1024_kpts_coco17_tpu-32.tar.gz) | 211 | 42.8/64.5 | Boxes/Keypoints +[CenterNet Resnet50 V1 FPN 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_resnet50_v1_fpn_512x512_coco17_tpu-8.tar.gz) | 27 | 31.2 | Boxes +[CenterNet Resnet50 V1 FPN Keypoints 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.tar.gz) | 30 | 29.3/50.7 | Boxes/Keypoints +[CenterNet Resnet101 V1 FPN 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.tar.gz) | 34 | 34.2 | Boxes +[CenterNet Resnet50 V2 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_resnet50_v2_512x512_coco17_tpu-8.tar.gz) | 27 | 29.5 | Boxes +[CenterNet Resnet50 V2 Keypoints 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.tar.gz) | 30 | 27.6/48.2 | Boxes/Keypoints +[EfficientDet D0 512x512](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d0_coco17_tpu-32.tar.gz) | 39 | 33.6 | Boxes +[EfficientDet D1 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d1_coco17_tpu-32.tar.gz) | 54 | 38.4 | Boxes +[EfficientDet D2 768x768](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d2_coco17_tpu-32.tar.gz) | 67 | 41.8 | Boxes +[EfficientDet D3 896x896](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d3_coco17_tpu-32.tar.gz) | 95 | 45.4 | Boxes +[EfficientDet D4 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d4_coco17_tpu-32.tar.gz) | 133 | 48.5 | Boxes +[EfficientDet D5 1280x1280](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d5_coco17_tpu-32.tar.gz) | 222 | 49.7 | Boxes +[EfficientDet D6 1280x1280](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d6_coco17_tpu-32.tar.gz) | 268 | 50.5 | Boxes +[EfficientDet D7 1536x1536](http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d7_coco17_tpu-32.tar.gz) | 325 | 51.2 | Boxes +[SSD MobileNet v2 320x320](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz) |19 | 20.2 | Boxes +[SSD MobileNet V1 FPN 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.tar.gz) | 48 | 29.1 | Boxes +[SSD MobileNet V2 FPNLite 320x320](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz) | 22 | 22.2 | Boxes +[SSD MobileNet V2 FPNLite 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz) | 39 | 28.2 | Boxes +[SSD ResNet50 V1 FPN 640x640 (RetinaNet50)](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz) | 46 | 34.3 | Boxes +[SSD ResNet50 V1 FPN 1024x1024 (RetinaNet50)](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.tar.gz) | 87 | 38.3 | Boxes +[SSD ResNet101 V1 FPN 640x640 (RetinaNet101)](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.tar.gz) | 57 | 35.6 | Boxes +[SSD ResNet101 V1 FPN 1024x1024 (RetinaNet101)](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.tar.gz) | 104 | 39.5 | Boxes +[SSD ResNet152 V1 FPN 640x640 (RetinaNet152)](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.tar.gz) | 80 | 35.4 | Boxes +[SSD ResNet152 V1 FPN 1024x1024 (RetinaNet152)](http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.tar.gz) | 111 | 39.6 | Boxes +[Faster R-CNN ResNet50 V1 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet50_v1_640x640_coco17_tpu-8.tar.gz) | 53 | 29.3 | Boxes +[Faster R-CNN ResNet50 V1 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8.tar.gz) | 65 | 31.0 | Boxes +[Faster R-CNN ResNet50 V1 800x1333](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8.tar.gz) | 65 | 31.6 | Boxes +[Faster R-CNN ResNet101 V1 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet101_v1_640x640_coco17_tpu-8.tar.gz) | 55 | 31.8 | Boxes +[Faster R-CNN ResNet101 V1 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8.tar.gz) | 72 | 37.1 | Boxes +[Faster R-CNN ResNet101 V1 800x1333](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8.tar.gz) | 77 | 36.6 | Boxes +[Faster R-CNN ResNet152 V1 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet152_v1_640x640_coco17_tpu-8.tar.gz) | 64 | 32.4 | Boxes +[Faster R-CNN ResNet152 V1 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8.tar.gz) | 85 | 37.6 | Boxes +[Faster R-CNN ResNet152 V1 800x1333](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8.tar.gz) | 101 | 37.4 | Boxes +[Faster R-CNN Inception ResNet V2 640x640](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_inception_resnet_v2_640x640_coco17_tpu-8.tar.gz) | 206 | 37.7 | Boxes +[Faster R-CNN Inception ResNet V2 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/faster_rcnn_inception_resnet_v2_1024x1024_coco17_tpu-8.tar.gz) | 236 | 38.7 | Boxes +[Mask R-CNN Inception ResNet V2 1024x1024](http://download.tensorflow.org/models/object_detection/tf2/20200711/mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8.tar.gz) | 301 | 39.0/34.6 | Boxes/Masks +[ExtremeNet](http://download.tensorflow.org/models/object_detection/tf2/20200711/extremenet.tar.gz) | -- | -- | Boxes diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_training_and_evaluation.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_training_and_evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..8d05a04f8dbf6d4e7e152ded8d7b936d8a70f3ad --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tf2_training_and_evaluation.md @@ -0,0 +1,285 @@ +# Training and Evaluation with TensorFlow 2 + +[![TensorFlow 2.2](https://img.shields.io/badge/TensorFlow-2.2-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) +[![Python 3.6](https://img.shields.io/badge/Python-3.6-3776AB)](https://www.python.org/downloads/release/python-360/) + +This page walks through the steps required to train an object detection model. +It assumes the reader has completed the following prerequisites: + +1. The TensorFlow Object Detection API has been installed as documented in the + [installation instructions](tf2.md#installation). +2. A valid data set has been created. See [this page](preparing_inputs.md) for + instructions on how to generate a dataset for the PASCAL VOC challenge or + the Oxford-IIIT Pet dataset. + +## Recommended Directory Structure for Training and Evaluation + +```bash +. +├── data/ +│   ├── eval-00000-of-00001.tfrecord +│   ├── label_map.txt +│   ├── train-00000-of-00002.tfrecord +│   └── train-00001-of-00002.tfrecord +└── models/ + └── my_model_dir/ + ├── eval/ # Created by evaluation job. + ├── my_model.config + └── model_ckpt-100-data@1 # + └── model_ckpt-100-index # Created by training job. + └── checkpoint # +``` + +## Writing a model configuration + +Please refer to sample [TF2 configs](../configs/tf2) and +[configuring jobs](configuring_jobs.md) to create a model config. + +### Model Parameter Initialization + +While optional, it is highly recommended that users utilize classification or +object detection checkpoints. Training an object detector from scratch can take +days. To speed up the training process, it is recommended that users re-use the +feature extractor parameters from a pre-existing image classification or object +detection checkpoint. The `train_config` section in the config provides two +fields to specify pre-existing checkpoints: + +* `fine_tune_checkpoint`: a path prefix to the pre-existing checkpoint + (ie:"/usr/home/username/checkpoint/model.ckpt-#####"). + +* `fine_tune_checkpoint_type`: with value `classification` or `detection` + depending on the type. + +A list of classification checkpoints can be found +[here](tf2_classification_zoo.md) + +A list of detection checkpoints can be found [here](tf2_detection_zoo.md). + +## Local + +### Training + +A local training job can be run with the following command: + +```bash +# From the tensorflow/models/research/ directory +PIPELINE_CONFIG_PATH={path to pipeline config file} +MODEL_DIR={path to model directory} +python object_detection/model_main_tf2.py \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --model_dir=${MODEL_DIR} \ + --alsologtostderr +``` + +where `${PIPELINE_CONFIG_PATH}` points to the pipeline config and `${MODEL_DIR}` +points to the directory in which training checkpoints and events will be +written. + +### Evaluation + +A local evaluation job can be run with the following command: + +```bash +# From the tensorflow/models/research/ directory +PIPELINE_CONFIG_PATH={path to pipeline config file} +MODEL_DIR={path to model directory} +CHECKPOINT_DIR=${MODEL_DIR} +MODEL_DIR={path to model directory} +python object_detection/model_main_tf2.py \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --model_dir=${MODEL_DIR} \ + --checkpoint_dir=${CHECKPOINT_DIR} \ + --alsologtostderr +``` + +where `${CHECKPOINT_DIR}` points to the directory with checkpoints produced by +the training job. Evaluation events are written to `${MODEL_DIR/eval}` + +## Google Cloud VM + +The TensorFlow Object Detection API supports training on Google Cloud with Deep +Learning GPU VMs and TPU VMs. This section documents instructions on how to +train and evaluate your model on them. The reader should complete the following +prerequistes: + +1. The reader has create and configured a GPU VM or TPU VM on Google Cloud with + TensorFlow >= 2.2.0. See + [TPU quickstart](https://cloud.google.com/tpu/docs/quickstart) and + [GPU quickstart](https://cloud.google.com/ai-platform/deep-learning-vm/docs/tensorflow_start_instance#with-one-or-more-gpus) + +2. The reader has installed the TensorFlow Object Detection API as documented + in the [installation instructions](tf2.md#installation) on the VM. + +3. The reader has a valid data set and stored it in a Google Cloud Storage + bucket or locally on the VM. See [this page](preparing_inputs.md) for + instructions on how to generate a dataset for the PASCAL VOC challenge or + the Oxford-IIIT Pet dataset. + +Additionally, it is recommended users test their job by running training and +evaluation jobs for a few iterations [locally on their own machines](#local). + +### Training + +Training on GPU or TPU VMs is similar to local training. It can be launched +using the following command. + +```bash +# From the tensorflow/models/research/ directory +USE_TPU=true +TPU_NAME="MY_TPU_NAME" +PIPELINE_CONFIG_PATH={path to pipeline config file} +MODEL_DIR={path to model directory} +python object_detection/model_main_tf2.py \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --model_dir=${MODEL_DIR} \ + --use_tpu=${USE_TPU} \ # (optional) only required for TPU training. + --tpu_name=${TPU_NAME} \ # (optional) only required for TPU training. + --alsologtostderr +``` + +where `${PIPELINE_CONFIG_PATH}` points to the pipeline config and `${MODEL_DIR}` +points to the root directory for the files produces. Training checkpoints and +events are written to `${MODEL_DIR}`. Note that the paths can be either local or +a path to GCS bucket. + +### Evaluation + +Evaluation is only supported on GPU. Similar to local evaluation it can be +launched using the following command: + +```bash +# From the tensorflow/models/research/ directory +PIPELINE_CONFIG_PATH={path to pipeline config file} +MODEL_DIR={path to model directory} +CHECKPOINT_DIR=${MODEL_DIR} +MODEL_DIR={path to model directory} +python object_detection/model_main_tf2.py \ + --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ + --model_dir=${MODEL_DIR} \ + --checkpoint_dir=${CHECKPOINT_DIR} \ + --alsologtostderr +``` + +where `${CHECKPOINT_DIR}` points to the directory with checkpoints produced by +the training job. Evaluation events are written to `${MODEL_DIR/eval}`. Note +that the paths can be either local or a path to GCS bucket. + +## Google Cloud AI Platform + +The TensorFlow Object Detection API supports also supports training on Google +Cloud AI Platform. This section documents instructions on how to train and +evaluate your model using Cloud ML. The reader should complete the following +prerequistes: + +1. The reader has created and configured a project on Google Cloud AI Platform. + See + [Using GPUs](https://cloud.google.com/ai-platform/training/docs/using-gpus) + and + [Using TPUs](https://cloud.google.com/ai-platform/training/docs/using-tpus) + guides. +2. The reader has a valid data set and stored it in a Google Cloud Storage + bucket. See [this page](preparing_inputs.md) for instructions on how to + generate a dataset for the PASCAL VOC challenge or the Oxford-IIIT Pet + dataset. + +Additionally, it is recommended users test their job by running training and +evaluation jobs for a few iterations [locally on their own machines](#local). + +### Training with multiple GPUs + +A user can start a training job on Cloud AI Platform using the following +command: + +```bash +# From the tensorflow/models/research/ directory +cp object_detection/packages/tf2/setup.py . +gcloud ai-platform jobs submit training object_detection_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 2.1 \ + --python-version 3.6 \ + --job-dir=gs://${MODEL_DIR} \ + --package-path ./object_detection \ + --module-name object_detection.model_main_tf2 \ + --region us-central1 \ + --master-machine-type n1-highcpu-16 \ + --master-accelerator count=8,type=nvidia-tesla-v100 \ + -- \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} +``` + +Where `gs://${MODEL_DIR}` specifies the directory on Google Cloud Storage where +the training checkpoints and events will be written to and +`gs://${PIPELINE_CONFIG_PATH}` points to the pipeline configuration stored on +Google Cloud Storage. + +Users can monitor the progress of their training job on the +[ML Engine Dashboard](https://console.cloud.google.com/ai-platform/jobs). + +### Training with TPU + +Launching a training job with a TPU compatible pipeline config requires using a +similar command: + +```bash +# From the tensorflow/models/research/ directory +cp object_detection/packages/tf2/setup.py . +gcloud ai-platform jobs submit training `whoami`_object_detection_`date +%m_%d_%Y_%H_%M_%S` \ + --job-dir=gs://${MODEL_DIR} \ + --package-path ./object_detection \ + --module-name object_detection.model_main_tf2 \ + --runtime-version 2.1 \ + --python-version 3.6 \ + --scale-tier BASIC_TPU \ + --region us-central1 \ + -- \ + --use_tpu true \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} +``` + +As before `pipeline_config_path` points to the pipeline configuration stored on +Google Cloud Storage (but is now must be a TPU compatible model). + +### Evaluating with GPU + +Evaluation jobs run on a single machine. Run the following command to start the +evaluation job: + +```bash +# From the tensorflow/models/research/ directory +cp object_detection/packages/tf2/setup.py . +gcloud ai-platform jobs submit training object_detection_eval_`date +%m_%d_%Y_%H_%M_%S` \ + --runtime-version 2.1 \ + --python-version 3.6 \ + --job-dir=gs://${MODEL_DIR} \ + --package-path ./object_detection \ + --module-name object_detection.model_main_tf2 \ + --region us-central1 \ + --scale-tier BASIC_GPU \ + -- \ + --model_dir=gs://${MODEL_DIR} \ + --pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} \ + --checkpoint_dir=gs://${MODEL_DIR} +``` + +where `gs://${MODEL_DIR}` points to the directory on Google Cloud Storage where +training checkpoints are saved and `gs://{PIPELINE_CONFIG_PATH}` points to where +the model configuration file stored on Google Cloud Storage. Evaluation events +are written to `gs://${MODEL_DIR}/eval` + +Typically one starts an evaluation job concurrently with the training job. Note +that we do not support running evaluation on TPU. + +## Running Tensorboard + +Progress for training and eval jobs can be inspected using Tensorboard. If using +the recommended directory structure, Tensorboard can be run using the following +command: + +```bash +tensorboard --logdir=${MODEL_DIR} +``` + +where `${MODEL_DIR}` points to the directory that contains the train and eval +directories. Please note it may take Tensorboard a couple minutes to populate +with data. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tpu_compatibility.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tpu_compatibility.md new file mode 100644 index 0000000000000000000000000000000000000000..411f1c55cf55140d49c66250e6273e8f6b12d50b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tpu_compatibility.md @@ -0,0 +1,196 @@ +# TPU compatible detection pipelines + +[TOC] + +The TensorFlow Object Detection API supports TPU training for some models. To +make models TPU compatible you need to make a few tweaks to the model config as +mentioned below. We also provide several sample configs that you can use as a +template. + +## TPU compatibility + +### Static shaped tensors + +TPU training currently requires all tensors in the TensorFlow Graph to have +static shapes. However, most of the sample configs in Object Detection API have +a few different tensors that are dynamically shaped. Fortunately, we provide +simple alternatives in the model configuration that modifies these tensors to +have static shape: + +* **Image tensors with static shape** - This can be achieved either by using a + `fixed_shape_resizer` that resizes images to a fixed spatial shape or by + setting `pad_to_max_dimension: true` in `keep_aspect_ratio_resizer` which + pads the resized images with zeros to the bottom and right. Padded image + tensors are correctly handled internally within the model. + + ``` + image_resizer { + fixed_shape_resizer { + height: 640 + width: 640 + } + } + ``` + + or + + ``` + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 640 + max_dimension: 640 + pad_to_max_dimension: true + } + } + ``` + +* **Groundtruth tensors with static shape** - Images in a typical detection + dataset have variable number of groundtruth boxes and associated classes. + Setting `max_number_of_boxes` to a large enough number in `train_config` + pads the groundtruth tensors with zeros to a static shape. Padded + groundtruth tensors are correctly handled internally within the model. + + ``` + train_config: { + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" + batch_size: 64 + max_number_of_boxes: 200 + unpad_groundtruth_tensors: false + } + ``` + +### TPU friendly ops + +Although TPU supports a vast number of tensorflow ops, a few used in the +TensorFlow Object Detection API are unsupported. We list such ops below and +recommend compatible substitutes. + +* **Anchor sampling** - Typically we use hard example mining in standard SSD + pipeliens to balance positive and negative anchors that contribute to the + loss. Hard Example mining uses non max suppression as a subroutine and since + non max suppression is not currently supported on TPUs we cannot use hard + example mining. Fortunately, we provide an implementation of focal loss that + can be used instead of hard example mining. Remove `hard_example_miner` from + the config and substitute `weighted_sigmoid` classification loss with + `weighted_sigmoid_focal` loss. + + ``` + loss { + classification_loss { + weighted_sigmoid_focal { + alpha: 0.25 + gamma: 2.0 + } + } + localization_loss { + weighted_smooth_l1 { + } + } + classification_weight: 1.0 + localization_weight: 1.0 + } + ``` + +* **Target Matching** - Object detection API provides two choices for matcher + used in target assignment: `argmax_matcher` and `bipartite_matcher`. + Bipartite matcher is not currently supported on TPU, therefore we must + modify the configs to use `argmax_matcher`. Additionally, set + `use_matmul_gather: true` for efficiency on TPU. + + ``` + matcher { + argmax_matcher { + matched_threshold: 0.5 + unmatched_threshold: 0.5 + ignore_thresholds: false + negatives_lower_than_unmatched: true + force_match_for_each_row: true + use_matmul_gather: true + } + } + ``` + +### TPU training hyperparameters + +Object Detection training on TPU uses synchronous SGD. On a typical cloud TPU +with 8 cores we recommend batch sizes that are 8x large when compared to a GPU +config that uses asynchronous SGD. We also use fewer training steps (~ 1/100 x) +due to the large batch size. This necessitates careful tuning of some other +training parameters as listed below. + +* **Batch size** - Use the largest batch size that can fit on cloud TPU. + + ``` + train_config { + batch_size: 1024 + } + ``` + +* **Training steps** - Typically only 10s of thousands. + + ``` + train_config { + num_steps: 25000 + } + ``` + +* **Batch norm decay** - Use smaller decay constants (0.97 or 0.997) since we + take fewer training steps. + + ``` + batch_norm { + scale: true, + decay: 0.97, + epsilon: 0.001, + } + ``` + +* **Learning rate** - Use large learning rate with warmup. Scale learning rate + linearly with batch size. See `cosine_decay_learning_rate` or + `manual_step_learning_rate` for examples. + + ``` + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: .04 + total_steps: 25000 + warmup_learning_rate: .013333 + warmup_steps: 2000 + } + } + ``` + + or + + ``` + learning_rate: { + manual_step_learning_rate { + warmup: true + initial_learning_rate: .01333 + schedule { + step: 2000 + learning_rate: 0.04 + } + schedule { + step: 15000 + learning_rate: 0.004 + } + } + } + ``` + +## Example TPU compatible configs + +We provide example config files that you can use to train your own models on TPU + +* ssd_mobilenet_v1_300x300
+* ssd_mobilenet_v1_ppn_300x300
+* ssd_mobilenet_v1_fpn_640x640 + (mobilenet based retinanet)
+* ssd_resnet50_v1_fpn_640x640 + (retinanet)
+ +## Supported Meta architectures + +Currently, `SSDMetaArch` models are supported on TPUs. `FasterRCNNMetaArch` is +going to be supported soon. diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tpu_exporters.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tpu_exporters.md new file mode 100644 index 0000000000000000000000000000000000000000..4cc3395aea676e8e05fec9a5b86790edc7a9e36a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/tpu_exporters.md @@ -0,0 +1,37 @@ +# Object Detection TPU Inference Exporter + +[![TensorFlow 1.15](https://img.shields.io/badge/TensorFlow-1.15-FF6F00?logo=tensorflow)](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0) + +This package contains SavedModel Exporter for TPU Inference of object detection +models. + +## Usage + +This Exporter is intended for users who have trained models with CPUs / GPUs, +but would like to use them for inference on TPU without changing their code or +re-training their models. + +Users are assumed to have: + ++ `PIPELINE_CONFIG`: A pipeline_pb2.TrainEvalPipelineConfig config file; ++ `CHECKPOINT`: A model checkpoint trained on any device; + +and need to correctly set: + ++ `EXPORT_DIR`: Path to export SavedModel; ++ `INPUT_PLACEHOLDER`: Name of input placeholder in model's signature_def_map; ++ `INPUT_TYPE`: Type of input node, which can be one of 'image_tensor', + 'encoded_image_string_tensor', or 'tf_example'; ++ `USE_BFLOAT16`: Whether to use bfloat16 instead of float32 on TPU. + +The model can be exported with: + +``` +python object_detection/tpu_exporters/export_saved_model_tpu.py \ + --pipeline_config_file= \ + --ckpt_path= \ + --export_dir= \ + --input_placeholder_name= \ + --input_type= \ + --use_bfloat16= +``` diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/using_your_own_dataset.md b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/using_your_own_dataset.md new file mode 100644 index 0000000000000000000000000000000000000000..6192af2dda1320bc7c0961ee7f2c9dc9972148d1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/g3doc/using_your_own_dataset.md @@ -0,0 +1,209 @@ +# Preparing Inputs + +[TOC] + +To use your own dataset in TensorFlow Object Detection API, you must convert it +into the [TFRecord file format](https://www.tensorflow.org/api_guides/python/python_io#tfrecords_format_details). +This document outlines how to write a script to generate the TFRecord file. + +## Label Maps + +Each dataset is required to have a label map associated with it. This label map +defines a mapping from string class names to integer class Ids. The label map +should be a `StringIntLabelMap` text protobuf. Sample label maps can be found in +object_detection/data. Label maps should always start from id 1. + +## Dataset Requirements + +For every example in your dataset, you should have the following information: + +1. An RGB image for the dataset encoded as jpeg or png. +2. A list of bounding boxes for the image. Each bounding box should contain: + 1. A bounding box coordinates (with origin in top left corner) defined by 4 + floating point numbers [ymin, xmin, ymax, xmax]. Note that we store the + _normalized_ coordinates (x / width, y / height) in the TFRecord dataset. + 2. The class of the object in the bounding box. + +# Example Image + +Consider the following image: + +![Example Image](img/example_cat.jpg "Example Image") + +with the following label map: + +``` +item { + id: 1 + name: 'Cat' +} + + +item { + id: 2 + name: 'Dog' +} +``` + +We can generate a tf.Example proto for this image using the following code: + +```python + +def create_cat_tf_example(encoded_cat_image_data): + """Creates a tf.Example proto from sample cat image. + + Args: + encoded_cat_image_data: The jpg encoded data of the cat image. + + Returns: + example: The created tf.Example. + """ + + height = 1032.0 + width = 1200.0 + filename = 'example_cat.jpg' + image_format = b'jpg' + + xmins = [322.0 / 1200.0] + xmaxs = [1062.0 / 1200.0] + ymins = [174.0 / 1032.0] + ymaxs = [761.0 / 1032.0] + classes_text = ['Cat'] + classes = [1] + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(filename), + 'image/source_id': dataset_util.bytes_feature(filename), + 'image/encoded': dataset_util.bytes_feature(encoded_image_data), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example +``` + +## Conversion Script Outline {#conversion-script-outline} + +A typical conversion script will look like the following: + +```python + +import tensorflow as tf + +from object_detection.utils import dataset_util + + +flags = tf.app.flags +flags.DEFINE_string('output_path', '', 'Path to output TFRecord') +FLAGS = flags.FLAGS + + +def create_tf_example(example): + # TODO(user): Populate the following variables from your example. + height = None # Image height + width = None # Image width + filename = None # Filename of the image. Empty if image is not from file + encoded_image_data = None # Encoded image bytes + image_format = None # b'jpeg' or b'png' + + xmins = [] # List of normalized left x coordinates in bounding box (1 per box) + xmaxs = [] # List of normalized right x coordinates in bounding box + # (1 per box) + ymins = [] # List of normalized top y coordinates in bounding box (1 per box) + ymaxs = [] # List of normalized bottom y coordinates in bounding box + # (1 per box) + classes_text = [] # List of string class name of bounding box (1 per box) + classes = [] # List of integer class id of bounding box (1 per box) + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(filename), + 'image/source_id': dataset_util.bytes_feature(filename), + 'image/encoded': dataset_util.bytes_feature(encoded_image_data), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example + + +def main(_): + writer = tf.python_io.TFRecordWriter(FLAGS.output_path) + + # TODO(user): Write code to read in your dataset to examples variable + + for example in examples: + tf_example = create_tf_example(example) + writer.write(tf_example.SerializeToString()) + + writer.close() + + +if __name__ == '__main__': + tf.app.run() + +``` + +Note: You may notice additional fields in some other datasets. They are +currently unused by the API and are optional. + +Note: Please refer to the section on [Running an Instance Segmentation +Model](instance_segmentation.md) for instructions on how to configure a model +that predicts masks in addition to object bounding boxes. + +## Sharding datasets + +When you have more than a few thousand examples, it is beneficial to shard your +dataset into multiple files: + +* tf.data.Dataset API can read input examples in parallel improving + throughput. +* tf.data.Dataset API can shuffle the examples better with sharded files which + improves performance of the model slightly. + +Instead of writing all tf.Example protos to a single file as shown in +[conversion script outline](#conversion-script-outline), use the snippet below. + +```python +import contextlib2 +from object_detection.dataset_tools import tf_record_creation_util + +num_shards=10 +output_filebase='/path/to/train_dataset.record' + +with contextlib2.ExitStack() as tf_record_close_stack: + output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( + tf_record_close_stack, output_filebase, num_shards) + for index, example in examples: + tf_example = create_tf_example(example) + output_shard_index = index % num_shards + output_tfrecords[output_shard_index].write(tf_example.SerializeToString()) +``` + +This will produce the following output files + +```bash +/path/to/train_dataset.record-00000-00010 +/path/to/train_dataset.record-00001-00010 +... +/path/to/train_dataset.record-00009-00010 +``` + +which can then be used in the config file as below. + +```bash +tf_record_input_reader { + input_path: "/path/to/train_dataset.record-?????-of-00010" +} +``` diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/generate_tfrecord.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/generate_tfrecord.py new file mode 100644 index 0000000000000000000000000000000000000000..caad456d8bd86613a9aa74ccb6e934862c8e584a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/generate_tfrecord.py @@ -0,0 +1,168 @@ +""" Sample TensorFlow XML-to-TFRecord converter + +usage: generate_tfrecord.py [-h] [-x XML_DIR] [-l LABELS_PATH] [-o OUTPUT_PATH] [-i IMAGE_DIR] [-c CSV_PATH] + +optional arguments: + -h, --help show this help message and exit + -x XML_DIR, --xml_dir XML_DIR + Path to the folder where the input .xml files are stored. + -l LABELS_PATH, --labels_path LABELS_PATH + Path to the labels (.pbtxt) file. + -o OUTPUT_PATH, --output_path OUTPUT_PATH + Path of output TFRecord (.record) file. + -i IMAGE_DIR, --image_dir IMAGE_DIR + Path to the folder where the input image files are stored. Defaults to the same directory as XML_DIR. + -c CSV_PATH, --csv_path CSV_PATH + Path of output .csv file. If none provided, then no file will be written. +""" + +import os +import glob +import pandas as pd +import io +import xml.etree.ElementTree as ET +import argparse + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) +import tensorflow.compat.v1 as tf +from PIL import Image +from object_detection.utils import dataset_util, label_map_util +from collections import namedtuple + +# Initiate argument parser +parser = argparse.ArgumentParser( + description="Sample TensorFlow XML-to-TFRecord converter") +parser.add_argument("-x", + "--xml_dir", + help="Path to the folder where the input .xml files are stored.", + type=str) +parser.add_argument("-l", + "--labels_path", + help="Path to the labels (.pbtxt) file.", type=str) +parser.add_argument("-o", + "--output_path", + help="Path of output TFRecord (.record) file.", type=str) +parser.add_argument("-i", + "--image_dir", + help="Path to the folder where the input image files are stored. " + "Defaults to the same directory as XML_DIR.", + type=str, default=None) +parser.add_argument("-c", + "--csv_path", + help="Path of output .csv file. If none provided, then no file will be " + "written.", + type=str, default=None) + +args = parser.parse_args() + +if args.image_dir is None: + args.image_dir = args.xml_dir + +label_map = label_map_util.load_labelmap(args.labels_path) +label_map_dict = label_map_util.get_label_map_dict(label_map) + + +def xml_to_csv(path): + """Iterates through all .xml files (generated by labelImg) in a given directory and combines + them in a single Pandas dataframe. + + Parameters: + ---------- + path : str + The path containing the .xml files + Returns + ------- + Pandas DataFrame + The produced dataframe + """ + + xml_list = [] + for xml_file in glob.glob(path + '/*.xml'): + tree = ET.parse(xml_file) + root = tree.getroot() + for member in root.findall('object'): + value = (root.find('filename').text, + int(root.find('size')[0].text), + int(root.find('size')[1].text), + member[0].text, + int(member[4][0].text), + int(member[4][1].text), + int(member[4][2].text), + int(member[4][3].text) + ) + xml_list.append(value) + column_name = ['filename', 'width', 'height', + 'class', 'xmin', 'ymin', 'xmax', 'ymax'] + xml_df = pd.DataFrame(xml_list, columns=column_name) + return xml_df + + +def class_text_to_int(row_label): + return label_map_dict[row_label] + + +def split(df, group): + data = namedtuple('data', ['filename', 'object']) + gb = df.groupby(group) + return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] + + +def create_tf_example(group, path): + with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: + encoded_jpg = fid.read() + encoded_jpg_io = io.BytesIO(encoded_jpg) + image = Image.open(encoded_jpg_io) + width, height = image.size + + filename = group.filename.encode('utf8') + image_format = b'jpg' + xmins = [] + xmaxs = [] + ymins = [] + ymaxs = [] + classes_text = [] + classes = [] + + for index, row in group.object.iterrows(): + xmins.append(row['xmin'] / width) + xmaxs.append(row['xmax'] / width) + ymins.append(row['ymin'] / height) + ymaxs.append(row['ymax'] / height) + classes_text.append(row['class'].encode('utf8')) + classes.append(class_text_to_int(row['class'])) + + tf_example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': dataset_util.int64_feature(height), + 'image/width': dataset_util.int64_feature(width), + 'image/filename': dataset_util.bytes_feature(filename), + 'image/source_id': dataset_util.bytes_feature(filename), + 'image/encoded': dataset_util.bytes_feature(encoded_jpg), + 'image/format': dataset_util.bytes_feature(image_format), + 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), + 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), + 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), + 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), + 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), + 'image/object/class/label': dataset_util.int64_list_feature(classes), + })) + return tf_example + + +def main(_): + + writer = tf.python_io.TFRecordWriter(args.output_path) + path = os.path.join(args.image_dir) + examples = xml_to_csv(args.xml_dir) + grouped = split(examples, 'filename') + for group in grouped: + tf_example = create_tf_example(group, path) + writer.write(tf_example.SerializeToString()) + writer.close() + print('Successfully created the TFRecord file: {}'.format(args.output_path)) + if args.csv_path is not None: + examples.to_csv(args.csv_path, index=None) + print('Successfully created the CSV file: {}'.format(args.csv_path)) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1000.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0f015cc0eccfe6a4287be7904e412f7a499a783 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1000.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1000.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1000.xml new file mode 100644 index 0000000000000000000000000000000000000000..5a5d2b7fa7690219d1ff79af5be77aa3cc0e595e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1000.xml @@ -0,0 +1,86 @@ + + test + frame1000.jpg + /home/job/workspace/virtuallab/test/frame1000.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 166 + 108 + 381 + + + + corobot + Unspecified + 0 + 0 + + 202 + 64 + 282 + 312 + + + + corobot + Unspecified + 0 + 0 + + 187 + 416 + 270 + 566 + + + + corobot + Unspecified + 0 + 0 + + 438 + 77 + 541 + 298 + + + + corobot + Unspecified + 0 + 0 + + 430 + 491 + 523 + 653 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 235 + 350 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1035.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1035.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f683cb8d0b28593eb6a48da23a5f04fae0618dc3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1035.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1035.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1035.xml new file mode 100644 index 0000000000000000000000000000000000000000..24d6da5d6bd36c82cbc16a80ef55e8b92662174e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1035.xml @@ -0,0 +1,86 @@ + + test + frame1035.jpg + /home/job/workspace/virtuallab/test/frame1035.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 272 + 232 + 352 + 442 + + + + corobot + Unspecified + 1 + 0 + + 1 + 166 + 106 + 379 + + + + corobot + Unspecified + 0 + 0 + + 171 + 54 + 257 + 296 + + + + corobot + Unspecified + 0 + 0 + + 453 + 95 + 547 + 320 + + + + corobot + Unspecified + 0 + 0 + + 200 + 375 + 282 + 540 + + + + corobot + Unspecified + 0 + 0 + + 407 + 487 + 504 + 645 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1040.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..217a501ae9025ceab7ec302eea14a303a5f611e7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1040.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1040.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1040.xml new file mode 100644 index 0000000000000000000000000000000000000000..0082530ab8fb600a4d681e86b4483213b5e96e1d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1040.xml @@ -0,0 +1,86 @@ + + test + frame1040.jpg + /home/job/workspace/virtuallab/test/frame1040.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 7 + 182 + 121 + 392 + + + + corobot + Unspecified + 0 + 0 + + 162 + 59 + 257 + 303 + + + + corobot + Unspecified + 0 + 0 + + 201 + 372 + 285 + 542 + + + + corobot + Unspecified + 0 + 0 + + 445 + 96 + 548 + 323 + + + + corobot + Unspecified + 0 + 0 + + 399 + 495 + 494 + 651 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 235 + 353 + 447 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1045.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1045.jpg new file mode 100644 index 0000000000000000000000000000000000000000..214fb2fb5f449ad74ba4a9bfe33e46eab8e36730 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1045.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1045.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1045.xml new file mode 100644 index 0000000000000000000000000000000000000000..d544748f42bdb4046c9a97661f277639ba6515a0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1045.xml @@ -0,0 +1,86 @@ + + test + frame1045.jpg + /home/job/workspace/virtuallab/test/frame1045.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 267 + 232 + 354 + 448 + + + + corobot + Unspecified + 0 + 0 + + 45 + 218 + 148 + 421 + + + + corobot + Unspecified + 0 + 0 + + 126 + 86 + 219 + 322 + + + + corobot + Unspecified + 0 + 0 + + 163 + 331 + 251 + 509 + + + + corobot + Unspecified + 0 + 0 + + 414 + 129 + 516 + 351 + + + + corobot + Unspecified + 0 + 0 + + 376 + 516 + 471 + 668 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1050.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87eda87832f86d200eddedd477a644d70ea71af2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1050.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1050.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1050.xml new file mode 100644 index 0000000000000000000000000000000000000000..26e2486b67a2f97cea112db9b9f77d5ff20ba835 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame1050.xml @@ -0,0 +1,86 @@ + + test + frame1050.jpg + /home/job/workspace/virtuallab/test/frame1050.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 130 + 87 + 226 + 320 + + + + corobot + Unspecified + 0 + 0 + + 32 + 208 + 143 + 412 + + + + corobot + Unspecified + 0 + 0 + + 429 + 121 + 520 + 346 + + + + corobot + Unspecified + 0 + 0 + + 388 + 513 + 479 + 662 + + + + corobot + Unspecified + 0 + 0 + + 159 + 329 + 251 + 515 + + + + myrobot + Unspecified + 0 + 0 + + 273 + 229 + 354 + 450 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2025.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2025.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f9ce7d96e2dd17fc60d7c3d3ed23a1c659d9c99 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2025.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2025.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2025.xml new file mode 100644 index 0000000000000000000000000000000000000000..35b9c31f9e31eab83cc1228dd0657ee257d3c161 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2025.xml @@ -0,0 +1,86 @@ + + test + frame2025.jpg + /home/job/workspace/virtuallab/test/frame2025.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 162 + 53 + 254 + 298 + + + + corobot + Unspecified + 0 + 0 + + 497 + 160 + 609 + 373 + + + + corobot + Unspecified + 0 + 0 + + 89 + 416 + 194 + 571 + + + + corobot + Unspecified + 0 + 0 + + 344 + 317 + 438 + 512 + + + + corobot + Unspecified + 0 + 0 + + 247 + 478 + 342 + 635 + + + + myrobot + Unspecified + 0 + 0 + + 265 + 253 + 348 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2035.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2035.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7aee262f311ba71ef4534c4e1944c206fa715d4b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2035.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2035.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2035.xml new file mode 100644 index 0000000000000000000000000000000000000000..6c0eb3ca7528461e9dfdc36a46fccb91ef595f5f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2035.xml @@ -0,0 +1,86 @@ + + test + frame2035.jpg + /home/job/workspace/virtuallab/test/frame2035.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 113 + 50 + 211 + 286 + + + + corobot + Unspecified + 0 + 0 + + 80 + 352 + 178 + 524 + + + + corobot + Unspecified + 0 + 0 + + 194 + 514 + 280 + 661 + + + + corobot + Unspecified + 0 + 0 + + 382 + 315 + 473 + 500 + + + + corobot + Unspecified + 0 + 0 + + 489 + 113 + 598 + 322 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 263 + 341 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2040.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b5be62c39de7329fa3117b59039c07f19b1327d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2040.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2040.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2040.xml new file mode 100644 index 0000000000000000000000000000000000000000..cd503ef7e3c446215454b6c00c38f9efbcb5f71f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2040.xml @@ -0,0 +1,86 @@ + + test + frame2040.jpg + /home/job/workspace/virtuallab/test/frame2040.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 110 + 44 + 210 + 285 + + + + corobot + Unspecified + 0 + 0 + + 79 + 347 + 180 + 531 + + + + corobot + Unspecified + 0 + 0 + + 382 + 309 + 480 + 500 + + + + corobot + Unspecified + 0 + 0 + + 202 + 498 + 295 + 651 + + + + corobot + Unspecified + 0 + 0 + + 495 + 109 + 604 + 330 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 256 + 341 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2045.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2045.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a4b2c76c76ddd55f8cd2ed6f7ad0b8f9d149107 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2045.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2045.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2045.xml new file mode 100644 index 0000000000000000000000000000000000000000..985cb4137d556d434676ffa3425084c8d9b0824e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2045.xml @@ -0,0 +1,86 @@ + + test + frame2045.jpg + /home/job/workspace/virtuallab/test/frame2045.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 146 + 85 + 240 + 310 + + + + corobot + Unspecified + 0 + 0 + + 110 + 389 + 207 + 559 + + + + corobot + Unspecified + 0 + 0 + + 238 + 465 + 332 + 621 + + + + corobot + Unspecified + 0 + 0 + + 351 + 350 + 442 + 532 + + + + corobot + Unspecified + 0 + 0 + + 514 + 137 + 630 + 354 + + + + myrobot + Unspecified + 0 + 0 + + 264 + 256 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2050.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de598946977e105ce799518a3ac2997ebf64a1c4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2050.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2050.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2050.xml new file mode 100644 index 0000000000000000000000000000000000000000..b0fb30906338570e493e144cd7d284a0c233534b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2050.xml @@ -0,0 +1,86 @@ + + test + frame2050.jpg + /home/job/workspace/virtuallab/test/frame2050.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 142 + 72 + 237 + 309 + + + + corobot + Unspecified + 0 + 0 + + 109 + 382 + 208 + 558 + + + + corobot + Unspecified + 0 + 0 + + 226 + 475 + 321 + 632 + + + + corobot + Unspecified + 0 + 0 + + 348 + 354 + 442 + 535 + + + + corobot + Unspecified + 0 + 0 + + 510 + 121 + 629 + 347 + + + + myrobot + Unspecified + 0 + 0 + + 265 + 257 + 346 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2051.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..136b51a6ae6d4cdd628fe532fb648388f7d3de43 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2051.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2051.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2051.xml new file mode 100644 index 0000000000000000000000000000000000000000..12d3d2fa3fa99594a8f33a6126b271205dfe4b5d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame2051.xml @@ -0,0 +1,86 @@ + + test + frame2051.jpg + /home/job/workspace/virtuallab/test/frame2051.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 144 + 72 + 235 + 309 + + + + corobot + Unspecified + 0 + 0 + + 107 + 380 + 207 + 552 + + + + corobot + Unspecified + 0 + 0 + + 353 + 350 + 448 + 527 + + + + corobot + Unspecified + 0 + 0 + + 222 + 476 + 317 + 635 + + + + corobot + Unspecified + 0 + 0 + + 510 + 117 + 623 + 343 + + + + myrobot + Unspecified + 0 + 0 + + 265 + 259 + 347 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3035.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3035.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33e5f36edff96900fda462b89cd40166ede22c0f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3035.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3035.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3035.xml new file mode 100644 index 0000000000000000000000000000000000000000..e269bd46c1ab49bde08182496a29db2b937d270e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3035.xml @@ -0,0 +1,86 @@ + + test + frame3035.jpg + /home/job/workspace/virtuallab/test/frame3035.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 13 + 396 + 126 + 556 + + + + corobot + Unspecified + 0 + 0 + + 414 + 443 + 503 + 598 + + + + corobot + Unspecified + 0 + 0 + + 142 + 166 + 234 + 381 + + + + corobot + Unspecified + 0 + 0 + + 440 + 149 + 536 + 358 + + + + corobot + Unspecified + 0 + 0 + + 338 + 2 + 418 + 224 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 238 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3040.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59ffd7d485b8aec96c668ea72a9581c41ab95542 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3040.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3040.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3040.xml new file mode 100644 index 0000000000000000000000000000000000000000..a63e1f2967df2d800c54d1cbcd65024a8915b4bf --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3040.xml @@ -0,0 +1,86 @@ + + test + frame3040.jpg + /home/job/workspace/virtuallab/test/frame3040.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 12 + 391 + 121 + 552 + + + + corobot + Unspecified + 0 + 0 + + 412 + 446 + 503 + 598 + + + + corobot + Unspecified + 0 + 0 + + 440 + 147 + 531 + 357 + + + + corobot + Unspecified + 0 + 0 + + 137 + 173 + 227 + 382 + + + + corobot + Unspecified + 1 + 0 + + 326 + 1 + 405 + 232 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 246 + 344 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3045.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3045.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a99f3d141f6b4211994468a6e0e20e75d33522a4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3045.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3045.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3045.xml new file mode 100644 index 0000000000000000000000000000000000000000..37bb435ac8a8a01132264f8bef25db0669e00f8e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3045.xml @@ -0,0 +1,86 @@ + + test + frame3045.jpg + /home/job/workspace/virtuallab/test/frame3045.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 359 + 100 + 530 + + + + corobot + Unspecified + 0 + 0 + + 386 + 409 + 470 + 570 + + + + corobot + Unspecified + 0 + 0 + + 413 + 102 + 498 + 325 + + + + corobot + Unspecified + 0 + 0 + + 181 + 135 + 265 + 351 + + + + corobot + Unspecified + 0 + 0 + + 292 + 21 + 367 + 259 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 264 + 346 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3050.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40da0e5369dea09290ad68396765fc34917d6924 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3050.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3050.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3050.xml new file mode 100644 index 0000000000000000000000000000000000000000..2424874db643512ae4a35439bc0710a6dd82f40d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame3050.xml @@ -0,0 +1,86 @@ + + test + frame3050.jpg + /home/job/workspace/virtuallab/test/frame3050.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 366 + 102 + 534 + + + + corobot + Unspecified + 0 + 0 + + 388 + 414 + 470 + 570 + + + + corobot + Unspecified + 0 + 0 + + 184 + 132 + 268 + 347 + + + + corobot + Unspecified + 0 + 0 + + 410 + 103 + 498 + 324 + + + + corobot + Unspecified + 0 + 0 + + 304 + 18 + 380 + 256 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 259 + 346 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4025.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4025.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09e105fb0d4bf7c6aadbc30f9b55231b6ae9ce99 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4025.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4025.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4025.xml new file mode 100644 index 0000000000000000000000000000000000000000..6f1208c433e75b474056e247dea893f4dc12cc27 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4025.xml @@ -0,0 +1,86 @@ + + test + frame4025.jpg + /home/job/workspace/virtuallab/test/frame4025.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 266 + 235 + 351 + 452 + + + + corobot + Unspecified + 0 + 0 + + 139 + 4 + 236 + 247 + + + + corobot + Unspecified + 0 + 0 + + 75 + 400 + 180 + 556 + + + + corobot + Unspecified + 0 + 0 + + 348 + 158 + 429 + 360 + + + + corobot + Unspecified + 0 + 0 + + 381 + 408 + 471 + 588 + + + + corobot + Unspecified + 0 + 0 + + 497 + 296 + 610 + 480 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4035.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4035.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2206a52f42fa31da5b45d8d3dcb2e0718a70cfb9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4035.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4035.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4035.xml new file mode 100644 index 0000000000000000000000000000000000000000..64f197790d8a3aee00d26bd212d465e305a5202d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4035.xml @@ -0,0 +1,86 @@ + + test + frame4035.jpg + /home/job/workspace/virtuallab/test/frame4035.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 134 + 1 + 225 + 235 + + + + corobot + Unspecified + 0 + 0 + + 89 + 373 + 188 + 547 + + + + corobot + Unspecified + 0 + 0 + + 352 + 152 + 428 + 363 + + + + corobot + Unspecified + 0 + 0 + + 502 + 309 + 601 + 481 + + + + corobot + Unspecified + 0 + 0 + + 386 + 412 + 479 + 581 + + + + myrobot + Unspecified + 0 + 0 + + 266 + 237 + 347 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4040.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54aebe2fa82f92d7e6bb4259ad87f7e0f3582716 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4040.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4040.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4040.xml new file mode 100644 index 0000000000000000000000000000000000000000..438725263cc247b7d9d6cb6d5f58ac427e2c9382 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4040.xml @@ -0,0 +1,86 @@ + + test + frame4040.jpg + /home/job/workspace/virtuallab/test/frame4040.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 169 + 1 + 256 + 219 + + + + corobot + Unspecified + 0 + 0 + + 109 + 357 + 206 + 535 + + + + corobot + Unspecified + 0 + 0 + + 368 + 164 + 446 + 375 + + + + corobot + Unspecified + 0 + 0 + + 476 + 289 + 573 + 428 + + + + corobot + Unspecified + 0 + 0 + + 412 + 436 + 498 + 555 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 240 + 342 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4045.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4045.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc41eed455529419b20c189215776c4c759754da Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4045.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4045.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4045.xml new file mode 100644 index 0000000000000000000000000000000000000000..a3e00a71bf398cd60e168e0086c2894a3aea6aaf --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4045.xml @@ -0,0 +1,86 @@ + + test + frame4045.jpg + /home/job/workspace/virtuallab/test/frame4045.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 134 + 1 + 224 + 232 + + + + corobot + Unspecified + 0 + 0 + + 79 + 386 + 177 + 558 + + + + corobot + Unspecified + 0 + 0 + + 357 + 146 + 437 + 358 + + + + corobot + Unspecified + 0 + 0 + + 384 + 414 + 469 + 581 + + + + corobot + Unspecified + 0 + 0 + + 493 + 299 + 592 + 480 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 232 + 348 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4050.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..471ba812f5d622f405385ab7ce77a6d181ddeee2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4050.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4050.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4050.xml new file mode 100644 index 0000000000000000000000000000000000000000..a68cb3273f093d8ea70334c1bae80a336b2356dc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/test/frame4050.xml @@ -0,0 +1,86 @@ + + test + frame4050.jpg + /home/job/workspace/virtuallab/test/frame4050.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 102 + 14 + 193 + 251 + + + + corobot + Unspecified + 0 + 0 + + 54 + 404 + 153 + 568 + + + + corobot + Unspecified + 0 + 0 + + 344 + 122 + 418 + 337 + + + + corobot + Unspecified + 0 + 0 + + 364 + 432 + 445 + 596 + + + + corobot + Unspecified + 0 + 0 + + 511 + 330 + 618 + 501 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 235 + 342 + 442 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1024.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1024.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffcc8b9a018cf0115664b9ea7b1d87342b66f70d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1024.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1024.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1024.xml new file mode 100644 index 0000000000000000000000000000000000000000..d118de0c9bf916353cb2817c9dea29806ec82023 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1024.xml @@ -0,0 +1,86 @@ + + test + frame1024.jpg + /home/job/workspace/virtuallab/test/frame1024.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 7 + 197 + 123 + 408 + + + + corobot + Unspecified + 0 + 0 + + 162 + 63 + 246 + 307 + + + + corobot + Unspecified + 0 + 0 + + 187 + 373 + 273 + 535 + + + + corobot + Unspecified + 0 + 0 + + 441 + 123 + 542 + 332 + + + + corobot + Unspecified + 0 + 0 + + 394 + 491 + 481 + 652 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 226 + 350 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1025.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1025.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6adfa401f7a3f2a5884fdb0b2f02d791d12ccac2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1025.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1025.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1025.xml new file mode 100644 index 0000000000000000000000000000000000000000..4c2614ac248f39254b1fbfd2d3db279a55a49c24 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1025.xml @@ -0,0 +1,86 @@ + + test + frame1025.jpg + /home/job/workspace/virtuallab/test/frame1025.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 118 + 64 + 214 + 312 + + + + corobot + Unspecified + 0 + 0 + + 30 + 232 + 139 + 429 + + + + corobot + Unspecified + 0 + 0 + + 187 + 341 + 274 + 508 + + + + corobot + Unspecified + 0 + 0 + + 440 + 159 + 543 + 362 + + + + corobot + Unspecified + 0 + 0 + + 368 + 491 + 452 + 652 + + + + myrobot + Unspecified + 0 + 0 + + 273 + 231 + 353 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1026.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bdad646d3906246ceea2471d1c599b877948cc33 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1026.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1026.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1026.xml new file mode 100644 index 0000000000000000000000000000000000000000..2fc4602bd2a227deaae59f5813082cdd0cdda5fc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1026.xml @@ -0,0 +1,86 @@ + + test + frame1026.jpg + /home/job/workspace/virtuallab/test/frame1026.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 174 + 110 + 383 + + + + corobot + Unspecified + 0 + 0 + + 192 + 63 + 276 + 310 + + + + corobot + Unspecified + 0 + 0 + + 444 + 87 + 547 + 308 + + + + corobot + Unspecified + 0 + 0 + + 191 + 411 + 275 + 564 + + + + corobot + Unspecified + 0 + 0 + + 436 + 490 + 529 + 652 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 236 + 350 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1027.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1027.jpg new file mode 100644 index 0000000000000000000000000000000000000000..681e8735a28c119a7e9af35ed617935a878bb28b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1027.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1027.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1027.xml new file mode 100644 index 0000000000000000000000000000000000000000..0c55ef3fdc36715063a6a2874e4041435f5971a2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1027.xml @@ -0,0 +1,86 @@ + + test + frame1027.jpg + /home/job/workspace/virtuallab/test/frame1027.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 139 + 70 + 231 + 315 + + + + corobot + Unspecified + 0 + 0 + + 23 + 214 + 131 + 418 + + + + corobot + Unspecified + 0 + 0 + + 184 + 351 + 269 + 520 + + + + corobot + Unspecified + 0 + 0 + + 436 + 134 + 531 + 349 + + + + corobot + Unspecified + 0 + 0 + + 391 + 495 + 478 + 654 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 232 + 353 + 449 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1028.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1028.jpg new file mode 100644 index 0000000000000000000000000000000000000000..154060ff5d3cf2af96eb34c8db487ebd65513369 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1028.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1028.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1028.xml new file mode 100644 index 0000000000000000000000000000000000000000..effb90621fa7bdd711dafab596575257c1ee847c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1028.xml @@ -0,0 +1,86 @@ + + test + frame1028.jpg + /home/job/workspace/virtuallab/test/frame1028.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 25 + 220 + 134 + 420 + + + + corobot + Unspecified + 0 + 0 + + 130 + 73 + 221 + 317 + + + + corobot + Unspecified + 0 + 0 + + 180 + 330 + 266 + 503 + + + + corobot + Unspecified + 0 + 0 + + 434 + 145 + 531 + 350 + + + + corobot + Unspecified + 0 + 0 + + 378 + 494 + 469 + 655 + + + + myrobot + Unspecified + 0 + 0 + + 271 + 230 + 353 + 447 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1029.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1029.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a633919c1039d38aadc24ac30ce84b97540470b0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1029.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1029.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1029.xml new file mode 100644 index 0000000000000000000000000000000000000000..976b8fe2be9aa35c6d00d9bc23365e8237d43398 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1029.xml @@ -0,0 +1,86 @@ + + test + frame1029.jpg + /home/job/workspace/virtuallab/test/frame1029.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 271 + 232 + 351 + 446 + + + + corobot + Unspecified + 1 + 0 + + 1 + 161 + 106 + 379 + + + + corobot + Unspecified + 0 + 0 + + 181 + 53 + 270 + 298 + + + + corobot + Unspecified + 0 + 0 + + 452 + 90 + 554 + 304 + + + + corobot + Unspecified + 0 + 0 + + 200 + 391 + 282 + 550 + + + + corobot + Unspecified + 0 + 0 + + 433 + 481 + 528 + 642 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1030.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e840bd4af401dc52abd8d70b246460d781653758 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1030.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1030.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1030.xml new file mode 100644 index 0000000000000000000000000000000000000000..bbae6792036be62f94bd8db924f21edd959c4839 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1030.xml @@ -0,0 +1,86 @@ + + test + frame1030.jpg + /home/job/workspace/virtuallab/test/frame1030.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 22 + 210 + 131 + 413 + + + + corobot + Unspecified + 0 + 0 + + 121 + 76 + 219 + 323 + + + + corobot + Unspecified + 0 + 0 + + 424 + 132 + 517 + 346 + + + + corobot + Unspecified + 0 + 0 + + 175 + 331 + 263 + 508 + + + + corobot + Unspecified + 0 + 0 + + 377 + 507 + 470 + 659 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 228 + 352 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1031.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06e7f341e1a96233e747f562e71e2f2c5dbc003f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1031.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1031.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1031.xml new file mode 100644 index 0000000000000000000000000000000000000000..506d6462714b803f4b6915e040a8f6b0be37aaf0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1031.xml @@ -0,0 +1,86 @@ + + test + frame1031.jpg + /home/job/workspace/virtuallab/test/frame1031.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 271 + 228 + 352 + 450 + + + + corobot + Unspecified + 0 + 0 + + 24 + 209 + 131 + 414 + + + + corobot + Unspecified + 0 + 0 + + 125 + 76 + 221 + 316 + + + + corobot + Unspecified + 0 + 0 + + 425 + 136 + 521 + 350 + + + + corobot + Unspecified + 0 + 0 + + 180 + 341 + 266 + 515 + + + + corobot + Unspecified + 0 + 0 + + 376 + 506 + 472 + 660 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1032.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..685503e8d41922fb76125f2f7940a0718a974ada Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1032.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1032.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1032.xml new file mode 100644 index 0000000000000000000000000000000000000000..86ee8b2b8d1222be8fa22791a9d75a776417afbc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1032.xml @@ -0,0 +1,86 @@ + + test + frame1032.jpg + /home/job/workspace/virtuallab/test/frame1032.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 167 + 56 + 259 + 302 + + + + corobot + Unspecified + 1 + 0 + + 1 + 171 + 107 + 385 + + + + corobot + Unspecified + 0 + 0 + + 451 + 97 + 548 + 320 + + + + corobot + Unspecified + 0 + 0 + + 205 + 387 + 287 + 549 + + + + corobot + Unspecified + 0 + 0 + + 417 + 481 + 514 + 642 + + + + myrobot + Unspecified + 0 + 0 + + 271 + 231 + 351 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1033.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1033.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7074d28a53a1bd433f72d68198d30810cd7e6712 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1033.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1033.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1033.xml new file mode 100644 index 0000000000000000000000000000000000000000..ac7ce19b777a58b4a55f61c2e11739887ee46c19 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1033.xml @@ -0,0 +1,86 @@ + + test + frame1033.jpg + /home/job/workspace/virtuallab/test/frame1033.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 270 + 233 + 352 + 449 + + + + corobot + Unspecified + 0 + 0 + + 33 + 221 + 139 + 420 + + + + corobot + Unspecified + 0 + 0 + + 108 + 88 + 207 + 328 + + + + corobot + Unspecified + 0 + 0 + + 413 + 146 + 503 + 359 + + + + corobot + Unspecified + 0 + 0 + + 170 + 326 + 255 + 502 + + + + corobot + Unspecified + 0 + 0 + + 367 + 516 + 453 + 668 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1034.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e365c7a7bbac1c16e029b8f88e3396287e0fb695 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1034.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1034.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1034.xml new file mode 100644 index 0000000000000000000000000000000000000000..9e766552369776c955fd756717a941a907228d7f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1034.xml @@ -0,0 +1,86 @@ + + test + frame1034.jpg + /home/job/workspace/virtuallab/test/frame1034.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 9 + 191 + 123 + 403 + + + + corobot + Unspecified + 0 + 0 + + 141 + 75 + 232 + 313 + + + + corobot + Unspecified + 0 + 0 + + 433 + 119 + 523 + 341 + + + + corobot + Unspecified + 0 + 0 + + 186 + 350 + 270 + 522 + + + + corobot + Unspecified + 0 + 0 + + 394 + 500 + 485 + 654 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 235 + 351 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1036.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62a26ad22a0ea185b5ed5d407dcaef6f4daae66e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1036.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1036.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1036.xml new file mode 100644 index 0000000000000000000000000000000000000000..d835a60e0b55f4e7cf1f6439c7774f113d203a89 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1036.xml @@ -0,0 +1,86 @@ + + test + frame1036.jpg + /home/job/workspace/virtuallab/test/frame1036.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 35 + 217 + 143 + 419 + + + + corobot + Unspecified + 0 + 0 + + 112 + 87 + 211 + 323 + + + + corobot + Unspecified + 0 + 0 + + 412 + 144 + 505 + 354 + + + + corobot + Unspecified + 0 + 0 + + 165 + 322 + 251 + 497 + + + + corobot + Unspecified + 0 + 0 + + 356 + 525 + 444 + 673 + + + + myrobot + Unspecified + 0 + 0 + + 271 + 232 + 350 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1037.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4f42d300ae99411088a50edf1878419e31635cf Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1037.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1037.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1037.xml new file mode 100644 index 0000000000000000000000000000000000000000..ba14b6d2309195c29867e31b32398269ff2a9bc9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1037.xml @@ -0,0 +1,86 @@ + + test + frame1037.jpg + /home/job/workspace/virtuallab/test/frame1037.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 269 + 235 + 354 + 446 + + + + corobot + Unspecified + 0 + 0 + + 2 + 183 + 116 + 393 + + + + corobot + Unspecified + 0 + 0 + + 152 + 68 + 246 + 302 + + + + corobot + Unspecified + 0 + 0 + + 441 + 105 + 539 + 334 + + + + corobot + Unspecified + 0 + 0 + + 193 + 364 + 280 + 536 + + + + corobot + Unspecified + 0 + 0 + + 390 + 501 + 485 + 653 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1038.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17c0b5e89e5b85f69e8a0fc57f5c14600d383271 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1038.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1038.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1038.xml new file mode 100644 index 0000000000000000000000000000000000000000..9ce4bd265f2944f9b6e15e46959e547f9515ce05 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1038.xml @@ -0,0 +1,86 @@ + + test + frame1038.jpg + /home/job/workspace/virtuallab/test/frame1038.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 5 + 173 + 116 + 388 + + + + corobot + Unspecified + 0 + 0 + + 158 + 60 + 251 + 301 + + + + corobot + Unspecified + 0 + 0 + + 444 + 100 + 540 + 328 + + + + corobot + Unspecified + 0 + 0 + + 203 + 374 + 287 + 541 + + + + corobot + Unspecified + 0 + 0 + + 396 + 497 + 494 + 651 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 233 + 354 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1039.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..affd0f26054d7fb6f3201734313bc11cbb32aef1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1039.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1039.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1039.xml new file mode 100644 index 0000000000000000000000000000000000000000..d5729d1d48d2550357a7ce8c062f0a1fdd944086 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1039.xml @@ -0,0 +1,86 @@ + + test + frame1039.jpg + /home/job/workspace/virtuallab/test/frame1039.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 269 + 232 + 353 + 448 + + + + corobot + Unspecified + 0 + 0 + + 44 + 218 + 146 + 422 + + + + corobot + Unspecified + 0 + 0 + + 116 + 86 + 212 + 323 + + + + corobot + Unspecified + 0 + 0 + + 171 + 328 + 257 + 510 + + + + corobot + Unspecified + 0 + 0 + + 413 + 136 + 508 + 355 + + + + corobot + Unspecified + 0 + 0 + + 357 + 524 + 449 + 676 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1041.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1041.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0216364487c18d00d077f644e95991a6687b7f32 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1041.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1041.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1041.xml new file mode 100644 index 0000000000000000000000000000000000000000..e43ec75b5dce29f88a567574ee2dfc55f9c427b9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1041.xml @@ -0,0 +1,86 @@ + + test + frame1041.jpg + /home/job/workspace/virtuallab/test/frame1041.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 267 + 233 + 355 + 446 + + + + corobot + Unspecified + 0 + 0 + + 12 + 185 + 125 + 396 + + + + corobot + Unspecified + 0 + 0 + + 161 + 61 + 254 + 300 + + + + corobot + Unspecified + 0 + 0 + + 449 + 100 + 545 + 328 + + + + corobot + Unspecified + 0 + 0 + + 194 + 365 + 281 + 536 + + + + corobot + Unspecified + 0 + 0 + + 401 + 491 + 498 + 650 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1042.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87344e54efc78bbed0f2309cc69d6a15f790c28f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1042.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1042.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1042.xml new file mode 100644 index 0000000000000000000000000000000000000000..3e6605b03b83736ee39b892cbfe457e723497740 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1042.xml @@ -0,0 +1,86 @@ + + test + frame1042.jpg + /home/job/workspace/virtuallab/test/frame1042.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 41 + 215 + 147 + 421 + + + + corobot + Unspecified + 0 + 0 + + 121 + 85 + 228 + 319 + + + + corobot + Unspecified + 0 + 0 + + 161 + 327 + 253 + 505 + + + + corobot + Unspecified + 0 + 0 + + 419 + 131 + 512 + 351 + + + + corobot + Unspecified + 0 + 0 + + 370 + 518 + 461 + 669 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 233 + 352 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1043.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0b54f58f48e4e9e9956ef0aa3a8058cbdfe3be1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1043.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1043.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1043.xml new file mode 100644 index 0000000000000000000000000000000000000000..09658186734305ab7cd961f118a73020723413bc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1043.xml @@ -0,0 +1,86 @@ + + test + frame1043.jpg + /home/job/workspace/virtuallab/test/frame1043.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 269 + 232 + 354 + 446 + + + + corobot + Unspecified + 0 + 0 + + 4 + 172 + 116 + 384 + + + + corobot + Unspecified + 0 + 0 + + 179 + 52 + 267 + 292 + + + + corobot + Unspecified + 0 + 0 + + 459 + 86 + 558 + 317 + + + + corobot + Unspecified + 0 + 0 + + 196 + 370 + 282 + 541 + + + + corobot + Unspecified + 0 + 0 + + 413 + 483 + 511 + 640 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1044.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1044.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac21f1c70a0f8a176b7f7bac4e20076ceffd7f0c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1044.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1044.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1044.xml new file mode 100644 index 0000000000000000000000000000000000000000..838f36b61e1f3c4d494d83fa7ed775d71d9967f5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1044.xml @@ -0,0 +1,86 @@ + + test + frame1044.jpg + /home/job/workspace/virtuallab/test/frame1044.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 151 + 60 + 246 + 305 + + + + corobot + Unspecified + 0 + 0 + + 27 + 189 + 129 + 403 + + + + corobot + Unspecified + 0 + 0 + + 444 + 103 + 539 + 332 + + + + corobot + Unspecified + 0 + 0 + + 182 + 352 + 268 + 528 + + + + corobot + Unspecified + 0 + 0 + + 393 + 501 + 489 + 655 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 232 + 354 + 447 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1046.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..877a05f6175edb482897b9643aa520da28ae46c5 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1046.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1046.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1046.xml new file mode 100644 index 0000000000000000000000000000000000000000..d82e9f9268f411e1dcfe044ab5848651ee379d48 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1046.xml @@ -0,0 +1,86 @@ + + test + frame1046.jpg + /home/job/workspace/virtuallab/test/frame1046.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 169 + 119 + 381 + + + + corobot + Unspecified + 0 + 0 + + 176 + 52 + 269 + 296 + + + + corobot + Unspecified + 0 + 0 + + 462 + 86 + 562 + 317 + + + + corobot + Unspecified + 0 + 0 + + 203 + 379 + 289 + 550 + + + + corobot + Unspecified + 0 + 0 + + 424 + 478 + 524 + 634 + + + + myrobot + Unspecified + 0 + 0 + + 271 + 234 + 353 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1047.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1047.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2cc6e46ab16bf4c1bfa262d0a5792be9dd2a5ce Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1047.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1047.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1047.xml new file mode 100644 index 0000000000000000000000000000000000000000..82b4ac42f37e925c0bac5d8a52ed724fdff315bc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1047.xml @@ -0,0 +1,86 @@ + + test + frame1047.jpg + /home/job/workspace/virtuallab/test/frame1047.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 271 + 228 + 355 + 445 + + + + corobot + Unspecified + 0 + 0 + + 34 + 205 + 138 + 409 + + + + corobot + Unspecified + 0 + 0 + + 140 + 74 + 239 + 312 + + + + corobot + Unspecified + 0 + 0 + + 433 + 114 + 529 + 341 + + + + corobot + Unspecified + 0 + 0 + + 170 + 343 + 263 + 521 + + + + corobot + Unspecified + 0 + 0 + + 394 + 502 + 491 + 654 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1048.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1048.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c058eb37016114a21ad112ba9180b6340f14675 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1048.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1048.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1048.xml new file mode 100644 index 0000000000000000000000000000000000000000..411c0259f2526fa0b2f79b0c48a3ba1b013a5a66 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1048.xml @@ -0,0 +1,86 @@ + + test + frame1048.jpg + /home/job/workspace/virtuallab/test/frame1048.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 38 + 214 + 141 + 416 + + + + corobot + Unspecified + 0 + 0 + + 130 + 82 + 230 + 315 + + + + corobot + Unspecified + 0 + 0 + + 162 + 329 + 251 + 510 + + + + corobot + Unspecified + 0 + 0 + + 430 + 121 + 520 + 341 + + + + corobot + Unspecified + 0 + 0 + + 387 + 509 + 480 + 659 + + + + myrobot + Unspecified + 0 + 0 + + 272 + 233 + 354 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1049.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7179749cef0248f5879aa4c6c497bf33e3cc719 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1049.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1049.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1049.xml new file mode 100644 index 0000000000000000000000000000000000000000..4b91ec60cae0ec7c0271ee3827f655df65ecee7a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1049.xml @@ -0,0 +1,86 @@ + + test + frame1049.jpg + /home/job/workspace/virtuallab/test/frame1049.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 271 + 233 + 356 + 448 + + + + corobot + Unspecified + 1 + 0 + + 1 + 162 + 117 + 379 + + + + corobot + Unspecified + 0 + 0 + + 174 + 53 + 266 + 295 + + + + corobot + Unspecified + 0 + 0 + + 464 + 88 + 562 + 316 + + + + corobot + Unspecified + 0 + 0 + + 198 + 373 + 284 + 545 + + + + corobot + Unspecified + 0 + 0 + + 425 + 476 + 523 + 634 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1051.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f79632e8e3bbf3c0e04a8add43939dbc9674ede1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1051.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1051.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1051.xml new file mode 100644 index 0000000000000000000000000000000000000000..b203b1c1e4526de82965b012468814397712782c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame1051.xml @@ -0,0 +1,86 @@ + + test + frame1051.jpg + /home/job/workspace/virtuallab/test/frame1051.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 272 + 231 + 354 + 444 + + + + corobot + Unspecified + 0 + 0 + + 31 + 200 + 139 + 405 + + + + corobot + Unspecified + 0 + 0 + + 131 + 82 + 231 + 315 + + + + corobot + Unspecified + 0 + 0 + + 429 + 123 + 523 + 354 + + + + corobot + Unspecified + 0 + 0 + + 167 + 335 + 256 + 516 + + + + corobot + Unspecified + 0 + 0 + + 386 + 514 + 483 + 664 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2000.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3839e5fa90d41ab0e48409564902aead9075528e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2000.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2000.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2000.xml new file mode 100644 index 0000000000000000000000000000000000000000..91c4acf761c0b1c7fdd63643d5023a2ad43f7d4f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2000.xml @@ -0,0 +1,86 @@ + + test + frame2000.jpg + /home/job/workspace/virtuallab/test/frame2000.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 79 + 51 + 179 + 300 + + + + corobot + Unspecified + 0 + 0 + + 497 + 87 + 607 + 308 + + + + corobot + Unspecified + 0 + 0 + + 85 + 329 + 193 + 500 + + + + corobot + Unspecified + 0 + 0 + + 413 + 318 + 503 + 514 + + + + corobot + Unspecified + 0 + 0 + + 185 + 513 + 275 + 668 + + + + myrobot + Unspecified + 0 + 0 + + 266 + 254 + 342 + 449 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2024.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2024.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a247a4fc846af0a54096d43db41681014834b4f0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2024.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2024.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2024.xml new file mode 100644 index 0000000000000000000000000000000000000000..c0b672cba80360b61bd9dfba214a08e1c9ce6014 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2024.xml @@ -0,0 +1,86 @@ + + test + frame2024.jpg + /home/job/workspace/virtuallab/test/frame2024.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 265 + 250 + 341 + 449 + + + + corobot + Unspecified + 0 + 0 + + 127 + 52 + 222 + 301 + + + + corobot + Unspecified + 0 + 0 + + 493 + 140 + 610 + 346 + + + + corobot + Unspecified + 0 + 0 + + 88 + 379 + 193 + 538 + + + + corobot + Unspecified + 0 + 0 + + 225 + 494 + 314 + 646 + + + + corobot + Unspecified + 0 + 0 + + 367 + 318 + 455 + 516 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2026.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba1dfa28b94b2df96c0b46e790c6fa5a9abf2240 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2026.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2026.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2026.xml new file mode 100644 index 0000000000000000000000000000000000000000..e4dbf0a03b04111a7487fb8a3c3690ff99c29b4b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2026.xml @@ -0,0 +1,86 @@ + + test + frame2026.jpg + /home/job/workspace/virtuallab/test/frame2026.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 264 + 251 + 345 + 450 + + + + corobot + Unspecified + 0 + 0 + + 82 + 41 + 191 + 298 + + + + corobot + Unspecified + 0 + 0 + + 86 + 335 + 189 + 514 + + + + corobot + Unspecified + 0 + 0 + + 196 + 508 + 290 + 662 + + + + corobot + Unspecified + 0 + 0 + + 404 + 310 + 503 + 511 + + + + corobot + Unspecified + 0 + 0 + + 495 + 87 + 607 + 315 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2027.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2027.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c0b4a2b440c34318c282435b6560f28e1867a49 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2027.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2027.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2027.xml new file mode 100644 index 0000000000000000000000000000000000000000..803d49cd0e77a22c80e0b6a10b1416bc5c26e761 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2027.xml @@ -0,0 +1,86 @@ + + test + frame2027.jpg + /home/job/workspace/virtuallab/test/frame2027.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 135 + 54 + 235 + 308 + + + + corobot + Unspecified + 0 + 0 + + 96 + 388 + 196 + 555 + + + + corobot + Unspecified + 0 + 0 + + 497 + 140 + 612 + 349 + + + + corobot + Unspecified + 0 + 0 + + 236 + 485 + 322 + 641 + + + + corobot + Unspecified + 0 + 0 + + 357 + 326 + 446 + 518 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 253 + 343 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2028.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2028.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eeb3aa1a1e66540625e4f321702de5ad667348ab Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2028.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2028.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2028.xml new file mode 100644 index 0000000000000000000000000000000000000000..bc4541256cf1f9f24dfd08986ca4782eafd88a88 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2028.xml @@ -0,0 +1,86 @@ + + test + frame2028.jpg + /home/job/workspace/virtuallab/test/frame2028.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 270 + 254 + 339 + 446 + + + + corobot + Unspecified + 0 + 0 + + 343 + 329 + 428 + 518 + + + + corobot + Unspecified + 0 + 0 + + 498 + 147 + 611 + 355 + + + + corobot + Unspecified + 0 + 0 + + 94 + 396 + 199 + 559 + + + + corobot + Unspecified + 0 + 0 + + 239 + 482 + 327 + 637 + + + + corobot + Unspecified + 0 + 0 + + 148 + 65 + 240 + 309 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2029.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2029.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56b0897bc3cd039ad9b1ec31c9649e048824ab0b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2029.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2029.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2029.xml new file mode 100644 index 0000000000000000000000000000000000000000..c7179967bcd6904db5115539cf5ce26f7db95ba5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2029.xml @@ -0,0 +1,86 @@ + + test + frame2029.jpg + /home/job/workspace/virtuallab/test/frame2029.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 97 + 39 + 197 + 294 + + + + corobot + Unspecified + 0 + 0 + + 485 + 86 + 592 + 311 + + + + corobot + Unspecified + 0 + 0 + + 78 + 342 + 182 + 516 + + + + corobot + Unspecified + 0 + 0 + + 392 + 309 + 485 + 508 + + + + corobot + Unspecified + 0 + 0 + + 194 + 514 + 281 + 664 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 251 + 342 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2030.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..befd964d7c1a94488588f6c96f101935cf896d40 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2030.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2030.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2030.xml new file mode 100644 index 0000000000000000000000000000000000000000..b7c2893ecf193817bb2ea5ec2cb56190af165656 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2030.xml @@ -0,0 +1,86 @@ + + test + frame2030.jpg + /home/job/workspace/virtuallab/test/frame2030.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 270 + 251 + 341 + 446 + + + + corobot + Unspecified + 0 + 0 + + 150 + 74 + 241 + 315 + + + + corobot + Unspecified + 0 + 0 + + 509 + 141 + 618 + 362 + + + + corobot + Unspecified + 0 + 0 + + 109 + 401 + 204 + 560 + + + + corobot + Unspecified + 0 + 0 + + 341 + 337 + 427 + 522 + + + + corobot + Unspecified + 0 + 0 + + 235 + 483 + 323 + 637 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2031.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7895781d88aff2e53c28be5fb04d9365eaf4125 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2031.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2031.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2031.xml new file mode 100644 index 0000000000000000000000000000000000000000..ebe401a175b0c42bcd2a2d2641e4aed8a7c9afc1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2031.xml @@ -0,0 +1,86 @@ + + test + frame2031.jpg + /home/job/workspace/virtuallab/test/frame2031.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 142 + 78 + 235 + 314 + + + + corobot + Unspecified + 0 + 0 + + 507 + 144 + 620 + 356 + + + + corobot + Unspecified + 0 + 0 + + 104 + 396 + 200 + 559 + + + + corobot + Unspecified + 0 + 0 + + 348 + 328 + 439 + 519 + + + + corobot + Unspecified + 0 + 0 + + 230 + 487 + 316 + 643 + + + + myrobot + Unspecified + 0 + 0 + + 266 + 259 + 340 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2032.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd0bb234e59e2f4e0b1a14f67c0170596fa16d17 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2032.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2032.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2032.xml new file mode 100644 index 0000000000000000000000000000000000000000..3591df044b078591f9d6a5a16b18a403e6cb3aad --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2032.xml @@ -0,0 +1,86 @@ + + test + frame2032.jpg + /home/job/workspace/virtuallab/test/frame2032.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + myrobot + Unspecified + 0 + 0 + + 268 + 261 + 339 + 446 + + + + corobot + Unspecified + 0 + 0 + + 112 + 45 + 205 + 284 + + + + corobot + Unspecified + 0 + 0 + + 485 + 96 + 592 + 325 + + + + corobot + Unspecified + 0 + 0 + + 80 + 355 + 181 + 525 + + + + corobot + Unspecified + 0 + 0 + + 391 + 308 + 483 + 499 + + + + corobot + Unspecified + 0 + 0 + + 195 + 514 + 288 + 664 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2033.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2033.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7014d5c3c552b588d74db17e668f7313833710c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2033.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2033.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2033.xml new file mode 100644 index 0000000000000000000000000000000000000000..661269d007e32ba97ea9c7a8dca711122b39aa74 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2033.xml @@ -0,0 +1,86 @@ + + test + frame2033.jpg + /home/job/workspace/virtuallab/test/frame2033.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 159 + 88 + 249 + 322 + + + + corobot + Unspecified + 0 + 0 + + 116 + 409 + 210 + 570 + + + + corobot + Unspecified + 0 + 0 + + 246 + 473 + 333 + 630 + + + + corobot + Unspecified + 0 + 0 + + 341 + 344 + 426 + 527 + + + + corobot + Unspecified + 0 + 0 + + 514 + 155 + 628 + 361 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 259 + 337 + 442 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2034.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..330465cc4d637bc787ec19d227982eec758a3895 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2034.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2034.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2034.xml new file mode 100644 index 0000000000000000000000000000000000000000..8affc6d8a4422c03c24ca5e8b161dec0e56ec3a7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2034.xml @@ -0,0 +1,86 @@ + + test + frame2034.jpg + /home/job/workspace/virtuallab/test/frame2034.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 135 + 67 + 226 + 304 + + + + corobot + Unspecified + 0 + 0 + + 94 + 380 + 194 + 549 + + + + corobot + Unspecified + 0 + 0 + + 361 + 329 + 453 + 514 + + + + corobot + Unspecified + 0 + 0 + + 217 + 496 + 306 + 650 + + + + corobot + Unspecified + 0 + 0 + + 502 + 122 + 609 + 341 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 255 + 340 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2036.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f39d21e1a39f77d87163aae5d8ad30906f14d999 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2036.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2036.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2036.xml new file mode 100644 index 0000000000000000000000000000000000000000..87056ef44445868b9d2ffbb4faa25056c59d58cf --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2036.xml @@ -0,0 +1,86 @@ + + test + frame2036.jpg + /home/job/workspace/virtuallab/test/frame2036.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 155 + 95 + 251 + 320 + + + + corobot + Unspecified + 0 + 0 + + 115 + 403 + 213 + 568 + + + + corobot + Unspecified + 0 + 0 + + 243 + 475 + 328 + 628 + + + + corobot + Unspecified + 0 + 0 + + 339 + 349 + 429 + 530 + + + + corobot + Unspecified + 0 + 0 + + 519 + 164 + 637 + 371 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 258 + 343 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2037.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1577406edefacf8944b9fb5cef3a52f54814fac6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2037.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2037.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2037.xml new file mode 100644 index 0000000000000000000000000000000000000000..ab4fc93b3f25ad3fda5f60d12bdae253289e1464 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2037.xml @@ -0,0 +1,86 @@ + + test + frame2037.jpg + /home/job/workspace/virtuallab/test/frame2037.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 121 + 56 + 222 + 293 + + + + corobot + Unspecified + 0 + 0 + + 85 + 364 + 190 + 537 + + + + corobot + Unspecified + 0 + 0 + + 373 + 318 + 467 + 510 + + + + corobot + Unspecified + 0 + 0 + + 200 + 505 + 298 + 651 + + + + corobot + Unspecified + 0 + 0 + + 503 + 123 + 615 + 344 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 257 + 345 + 449 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2038.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0e271791debbc66aea442bfd15aa3ec2f3d1f57 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2038.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2038.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2038.xml new file mode 100644 index 0000000000000000000000000000000000000000..19bfce7f1c89fd9196b00e29f4d974b30e061a3a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2038.xml @@ -0,0 +1,86 @@ + + test + frame2038.jpg + /home/job/workspace/virtuallab/test/frame2038.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 117 + 52 + 216 + 292 + + + + corobot + Unspecified + 0 + 0 + + 81 + 359 + 186 + 533 + + + + corobot + Unspecified + 0 + 0 + + 202 + 508 + 289 + 655 + + + + corobot + Unspecified + 0 + 0 + + 385 + 312 + 479 + 501 + + + + corobot + Unspecified + 0 + 0 + + 498 + 118 + 604 + 337 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 257 + 345 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2039.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c6a4d877862c17a959064d9e87290fab0d3a177 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2039.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2039.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2039.xml new file mode 100644 index 0000000000000000000000000000000000000000..0bbf9eec249febc8b35c17bf392b8b27328c5e40 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2039.xml @@ -0,0 +1,86 @@ + + test + frame2039.jpg + /home/job/workspace/virtuallab/test/frame2039.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 152 + 91 + 243 + 321 + + + + corobot + Unspecified + 0 + 0 + + 115 + 400 + 211 + 563 + + + + corobot + Unspecified + 0 + 0 + + 247 + 467 + 331 + 630 + + + + corobot + Unspecified + 0 + 0 + + 349 + 348 + 437 + 531 + + + + corobot + Unspecified + 0 + 0 + + 524 + 157 + 641 + 368 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 255 + 342 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2041.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2041.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3fc8c59ae01c16f3e05258717a7c0d0963f2c4f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2041.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2041.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2041.xml new file mode 100644 index 0000000000000000000000000000000000000000..1aee2d6c88bb70c3d2cd5f08f0811b9cd73da6b8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2041.xml @@ -0,0 +1,86 @@ + + test + frame2041.jpg + /home/job/workspace/virtuallab/test/frame2041.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 118 + 52 + 211 + 288 + + + + corobot + Unspecified + 0 + 0 + + 82 + 358 + 184 + 532 + + + + corobot + Unspecified + 0 + 0 + + 211 + 492 + 302 + 647 + + + + corobot + Unspecified + 0 + 0 + + 380 + 323 + 467 + 507 + + + + corobot + Unspecified + 0 + 0 + + 498 + 109 + 609 + 336 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 255 + 343 + 447 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2042.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73acf40a87d7b5d8eb1a3041326927f2fc324dc3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2042.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2042.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2042.xml new file mode 100644 index 0000000000000000000000000000000000000000..bb98762b4a13684f5866f3598376074042f23bb3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2042.xml @@ -0,0 +1,86 @@ + + test + frame2042.jpg + /home/job/workspace/virtuallab/test/frame2042.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 144 + 82 + 238 + 311 + + + + corobot + Unspecified + 0 + 0 + + 518 + 141 + 635 + 359 + + + + corobot + Unspecified + 0 + 0 + + 107 + 389 + 207 + 556 + + + + corobot + Unspecified + 0 + 0 + + 346 + 356 + 435 + 532 + + + + corobot + Unspecified + 0 + 0 + + 242 + 469 + 328 + 626 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 255 + 344 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2043.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42de38a7c475527b82d8c26c53992a328ea3a478 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2043.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2043.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2043.xml new file mode 100644 index 0000000000000000000000000000000000000000..d7426f541b5b9b3d85050a81815eca0bf64cb5d7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2043.xml @@ -0,0 +1,86 @@ + + test + frame2043.jpg + /home/job/workspace/virtuallab/test/frame2043.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 102 + 39 + 200 + 271 + + + + corobot + Unspecified + 0 + 0 + + 485 + 95 + 591 + 316 + + + + corobot + Unspecified + 0 + 0 + + 71 + 336 + 176 + 517 + + + + corobot + Unspecified + 0 + 0 + + 385 + 317 + 478 + 500 + + + + corobot + Unspecified + 0 + 0 + + 192 + 509 + 290 + 659 + + + + myrobot + Unspecified + 0 + 0 + + 264 + 255 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2044.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2044.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aab4d066fd8c628e7833eaa7192d8e1211e7b021 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2044.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2044.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2044.xml new file mode 100644 index 0000000000000000000000000000000000000000..5bf6e5d0534022f244fee11f26ef9cd53d6bb562 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2044.xml @@ -0,0 +1,86 @@ + + test + frame2044.jpg + /home/job/workspace/virtuallab/test/frame2044.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 124 + 52 + 217 + 296 + + + + corobot + Unspecified + 0 + 0 + + 91 + 365 + 190 + 536 + + + + corobot + Unspecified + 0 + 0 + + 212 + 490 + 307 + 641 + + + + corobot + Unspecified + 0 + 0 + + 368 + 334 + 460 + 522 + + + + corobot + Unspecified + 0 + 0 + + 501 + 119 + 616 + 340 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 255 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2046.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef345f6bb62ffe9516953ce559a847fe0f98a653 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2046.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2046.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2046.xml new file mode 100644 index 0000000000000000000000000000000000000000..eda74077a93ec211e71beb50b7d08eb80bb4e1c8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2046.xml @@ -0,0 +1,86 @@ + + test + frame2046.jpg + /home/job/workspace/virtuallab/test/frame2046.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 102 + 34 + 203 + 274 + + + + corobot + Unspecified + 0 + 0 + + 480 + 83 + 587 + 310 + + + + corobot + Unspecified + 0 + 0 + + 71 + 332 + 177 + 518 + + + + corobot + Unspecified + 0 + 0 + + 394 + 306 + 484 + 497 + + + + corobot + Unspecified + 0 + 0 + + 189 + 502 + 286 + 656 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 255 + 342 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2047.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2047.jpg new file mode 100644 index 0000000000000000000000000000000000000000..648daaf9bac0092f1cab9de519618f7ea5892ff2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2047.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2047.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2047.xml new file mode 100644 index 0000000000000000000000000000000000000000..4fa750607aa1a511daaf0c27a42ab27d32595b62 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2047.xml @@ -0,0 +1,86 @@ + + test + frame2047.jpg + /home/job/workspace/virtuallab/test/frame2047.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 133 + 65 + 228 + 305 + + + + corobot + Unspecified + 0 + 0 + + 101 + 377 + 203 + 549 + + + + corobot + Unspecified + 0 + 0 + + 505 + 118 + 617 + 339 + + + + corobot + Unspecified + 0 + 0 + + 233 + 473 + 321 + 631 + + + + corobot + Unspecified + 0 + 0 + + 357 + 342 + 448 + 527 + + + + myrobot + Unspecified + 0 + 0 + + 265 + 255 + 343 + 450 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2048.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2048.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cab76e10a3d7a5e7b05dfd7f22e82c05b34f3194 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2048.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2048.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2048.xml new file mode 100644 index 0000000000000000000000000000000000000000..4a94d173a522065f673d56c23e65ece71d460ac8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2048.xml @@ -0,0 +1,86 @@ + + test + frame2048.jpg + /home/job/workspace/virtuallab/test/frame2048.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 139 + 70 + 234 + 305 + + + + corobot + Unspecified + 0 + 0 + + 507 + 124 + 620 + 343 + + + + corobot + Unspecified + 0 + 0 + + 105 + 382 + 205 + 551 + + + + corobot + Unspecified + 0 + 0 + + 349 + 354 + 438 + 538 + + + + corobot + Unspecified + 0 + 0 + + 233 + 470 + 321 + 628 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 253 + 344 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2049.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a2ee4985da0fe3a61b362f7f95fb393c3516bb2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2049.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2049.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2049.xml new file mode 100644 index 0000000000000000000000000000000000000000..c19914978bfa5eebeaa416a5833cc0bf28f33e8e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame2049.xml @@ -0,0 +1,86 @@ + + test + frame2049.jpg + /home/job/workspace/virtuallab/test/frame2049.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 105 + 32 + 203 + 274 + + + + corobot + Unspecified + 0 + 0 + + 480 + 83 + 588 + 313 + + + + corobot + Unspecified + 0 + 0 + + 74 + 339 + 179 + 517 + + + + corobot + Unspecified + 0 + 0 + + 385 + 316 + 477 + 505 + + + + corobot + Unspecified + 0 + 0 + + 185 + 506 + 280 + 657 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 250 + 345 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3000.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6805699a9b71359ee337ca5cd6e53df8c1d08899 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3000.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3000.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3000.xml new file mode 100644 index 0000000000000000000000000000000000000000..4b54742af96d820e2c7bf2c5513975f3d16e966a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3000.xml @@ -0,0 +1,86 @@ + + test + frame3000.jpg + /home/job/workspace/virtuallab/test/frame3000.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 344 + 1 + 428 + 234 + + + + corobot + Unspecified + 0 + 0 + + 112 + 152 + 215 + 384 + + + + corobot + Unspecified + 1 + 0 + + 1 + 433 + 125 + 582 + + + + corobot + Unspecified + 0 + 0 + + 428 + 193 + 526 + 394 + + + + corobot + Unspecified + 0 + 0 + + 440 + 422 + 542 + 600 + + + + myrobot + Unspecified + 0 + 0 + + 273 + 246 + 355 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3024.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3024.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f88becd061318addc614230c1b9a58b0744809d9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3024.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3024.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3024.xml new file mode 100644 index 0000000000000000000000000000000000000000..e3b1b2a7482f80bbdabbc552e46408ada9c5e9e9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3024.xml @@ -0,0 +1,86 @@ + + test + frame3024.jpg + /home/job/workspace/virtuallab/test/frame3024.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 160 + 152 + 248 + 385 + + + + corobot + Unspecified + 1 + 0 + + 1 + 383 + 124 + 542 + + + + corobot + Unspecified + 1 + 0 + + 312 + 1 + 388 + 242 + + + + corobot + Unspecified + 0 + 0 + + 424 + 141 + 520 + 353 + + + + corobot + Unspecified + 0 + 0 + + 399 + 424 + 489 + 596 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 250 + 344 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3025.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3025.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ade2c64fe95d60c6c35962c5ebcbb63e4a5bddc Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3025.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3025.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3025.xml new file mode 100644 index 0000000000000000000000000000000000000000..a856987c74dcc6eb233c7310a4a69cfdf6b3967b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3025.xml @@ -0,0 +1,86 @@ + + test + frame3025.jpg + /home/job/workspace/virtuallab/test/frame3025.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 3 + 360 + 123 + 529 + + + + corobot + Unspecified + 0 + 0 + + 365 + 422 + 452 + 596 + + + + corobot + Unspecified + 0 + 0 + + 423 + 107 + 525 + 326 + + + + corobot + Unspecified + 0 + 0 + + 279 + 5 + 363 + 257 + + + + corobot + Unspecified + 0 + 0 + + 189 + 150 + 271 + 379 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 259 + 345 + 447 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3026.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2895d87ed63945a9ce2eabc8448937a4a9252b8d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3026.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3026.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3026.xml new file mode 100644 index 0000000000000000000000000000000000000000..bd2582ebcbdf0ca731e1095877816a489d90eab9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3026.xml @@ -0,0 +1,86 @@ + + test + frame3026.jpg + /home/job/workspace/virtuallab/test/frame3026.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 3 + 431 + 123 + 586 + + + + corobot + Unspecified + 0 + 0 + + 436 + 423 + 526 + 597 + + + + corobot + Unspecified + 0 + 0 + + 429 + 179 + 526 + 387 + + + + corobot + Unspecified + 0 + 0 + + 118 + 154 + 208 + 384 + + + + corobot + Unspecified + 1 + 0 + + 334 + 1 + 416 + 228 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 230 + 345 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3027.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3027.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7e6ae514c25c6aa2a85dbdd44c279af2573523c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3027.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3027.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3027.xml new file mode 100644 index 0000000000000000000000000000000000000000..8309ddd6863f7cc10fc861802e004f78197afa24 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3027.xml @@ -0,0 +1,86 @@ + + test + frame3027.jpg + /home/job/workspace/virtuallab/test/frame3027.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 384 + 121 + 548 + + + + corobot + Unspecified + 0 + 0 + + 389 + 417 + 478 + 590 + + + + corobot + Unspecified + 0 + 0 + + 172 + 148 + 262 + 377 + + + + corobot + Unspecified + 0 + 0 + + 419 + 129 + 516 + 341 + + + + corobot + Unspecified + 0 + 0 + + 297 + 4 + 380 + 249 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 253 + 344 + 450 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3028.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3028.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2911234c2339fe527a4c909e282a4cf17bf6925 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3028.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3028.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3028.xml new file mode 100644 index 0000000000000000000000000000000000000000..832000923cd73c95c52ff2e77355e3999d3f45fc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3028.xml @@ -0,0 +1,86 @@ + + test + frame3028.jpg + /home/job/workspace/virtuallab/test/frame3028.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 3 + 368 + 121 + 533 + + + + corobot + Unspecified + 0 + 0 + + 380 + 413 + 471 + 588 + + + + corobot + Unspecified + 0 + 0 + + 189 + 147 + 275 + 369 + + + + corobot + Unspecified + 0 + 0 + + 422 + 102 + 513 + 330 + + + + corobot + Unspecified + 0 + 0 + + 293 + 5 + 371 + 249 + + + + myrobot + Unspecified + 0 + 0 + + 280 + 254 + 345 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3029.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3029.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61a0f74659ee947e24f1fb32beb548e924b4c683 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3029.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3029.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3029.xml new file mode 100644 index 0000000000000000000000000000000000000000..394e4138064486fbab11760d224d7e31638a3876 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3029.xml @@ -0,0 +1,86 @@ + + test + frame3029.jpg + /home/job/workspace/virtuallab/test/frame3029.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 22 + 423 + 134 + 583 + + + + corobot + Unspecified + 0 + 0 + + 426 + 437 + 521 + 605 + + + + corobot + Unspecified + 0 + 0 + + 134 + 161 + 225 + 392 + + + + corobot + Unspecified + 1 + 0 + + 339 + 1 + 423 + 227 + + + + corobot + Unspecified + 0 + 0 + + 437 + 167 + 537 + 371 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 235 + 344 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3030.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a65c1bc16cd725031775b68080aed243577826f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3030.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3030.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3030.xml new file mode 100644 index 0000000000000000000000000000000000000000..56ad7b534cec5ed8f455903ed9d98bed2509c29a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3030.xml @@ -0,0 +1,86 @@ + + test + frame3030.jpg + /home/job/workspace/virtuallab/test/frame3030.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 360 + 110 + 532 + + + + corobot + Unspecified + 0 + 0 + + 378 + 405 + 467 + 575 + + + + corobot + Unspecified + 0 + 0 + + 179 + 147 + 276 + 370 + + + + corobot + Unspecified + 0 + 0 + + 414 + 103 + 505 + 323 + + + + corobot + Unspecified + 0 + 0 + + 298 + 7 + 376 + 250 + + + + myrobot + Unspecified + 0 + 0 + + 279 + 257 + 345 + 441 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3031.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfcaf77ed03cccd588dccaa4f09ab872a5cf3b10 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3031.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3031.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3031.xml new file mode 100644 index 0000000000000000000000000000000000000000..28538a07e0e1afbe5cf270006b0358105622a400 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3031.xml @@ -0,0 +1,86 @@ + + test + frame3031.jpg + /home/job/workspace/virtuallab/test/frame3031.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 363 + 112 + 532 + + + + corobot + Unspecified + 0 + 0 + + 384 + 415 + 473 + 576 + + + + corobot + Unspecified + 0 + 0 + + 174 + 148 + 265 + 368 + + + + corobot + Unspecified + 0 + 0 + + 417 + 109 + 509 + 327 + + + + corobot + Unspecified + 0 + 0 + + 304 + 2 + 382 + 247 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 254 + 345 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3032.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc305e3c39ef1c73ee4c0cb1b4af16f7ee7d9fe3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3032.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3032.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3032.xml new file mode 100644 index 0000000000000000000000000000000000000000..716399965090b51e28dc2d0a7777e8bf1d54c4f6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3032.xml @@ -0,0 +1,86 @@ + + test + frame3032.jpg + /home/job/workspace/virtuallab/test/frame3032.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 17 + 405 + 129 + 574 + + + + corobot + Unspecified + 0 + 0 + + 414 + 438 + 504 + 599 + + + + corobot + Unspecified + 0 + 0 + + 133 + 172 + 225 + 386 + + + + corobot + Unspecified + 0 + 0 + + 436 + 147 + 530 + 359 + + + + corobot + Unspecified + 0 + 0 + + 335 + 3 + 417 + 225 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 231 + 345 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3033.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3033.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16fb826619ca971854a1f548bb2a8741aa6eb041 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3033.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3033.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3033.xml new file mode 100644 index 0000000000000000000000000000000000000000..314e504ed34c27747f2144ec5a1bdd6c2bbd3bfa --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3033.xml @@ -0,0 +1,86 @@ + + test + frame3033.jpg + /home/job/workspace/virtuallab/test/frame3033.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 350 + 102 + 521 + + + + corobot + Unspecified + 0 + 0 + + 371 + 395 + 457 + 562 + + + + corobot + Unspecified + 0 + 0 + + 407 + 88 + 496 + 312 + + + + corobot + Unspecified + 0 + 0 + + 188 + 145 + 274 + 359 + + + + corobot + Unspecified + 0 + 0 + + 288 + 15 + 364 + 255 + + + + myrobot + Unspecified + 0 + 0 + + 277 + 260 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3034.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2628ed2dc9d16f3931139d555fb5c9ef59a00234 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3034.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3034.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3034.xml new file mode 100644 index 0000000000000000000000000000000000000000..50c4010f1ae446f68b816015c55c55d477f5de51 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3034.xml @@ -0,0 +1,86 @@ + + test + frame3034.jpg + /home/job/workspace/virtuallab/test/frame3034.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 4 + 378 + 116 + 550 + + + + corobot + Unspecified + 0 + 0 + + 393 + 420 + 480 + 580 + + + + corobot + Unspecified + 0 + 0 + + 163 + 152 + 253 + 373 + + + + corobot + Unspecified + 0 + 0 + + 425 + 119 + 514 + 337 + + + + corobot + Unspecified + 0 + 0 + + 316 + 3 + 394 + 241 + + + + myrobot + Unspecified + 0 + 0 + + 272 + 246 + 345 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3036.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5e0bf17dbeb9116209e82c64f1adbdcd216f7b0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3036.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3036.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3036.xml new file mode 100644 index 0000000000000000000000000000000000000000..080bc46c8c5f438f493259e46e8d4d3d0f731341 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3036.xml @@ -0,0 +1,86 @@ + + test + frame3036.jpg + /home/job/workspace/virtuallab/test/frame3036.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 337 + 94 + 510 + + + + corobot + Unspecified + 0 + 0 + + 375 + 396 + 459 + 562 + + + + corobot + Unspecified + 0 + 0 + + 408 + 91 + 496 + 318 + + + + corobot + Unspecified + 0 + 0 + + 186 + 132 + 276 + 352 + + + + corobot + Unspecified + 0 + 0 + + 294 + 14 + 370 + 255 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 263 + 344 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3037.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50a2e8050ed67804647f9513a69fe47f53a262ea Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3037.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3037.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3037.xml new file mode 100644 index 0000000000000000000000000000000000000000..bba80af69bfc761afbae51efc1320a1937dac341 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3037.xml @@ -0,0 +1,86 @@ + + test + frame3037.jpg + /home/job/workspace/virtuallab/test/frame3037.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 3 + 375 + 116 + 543 + + + + corobot + Unspecified + 0 + 0 + + 405 + 431 + 494 + 591 + + + + corobot + Unspecified + 0 + 0 + + 150 + 162 + 243 + 378 + + + + corobot + Unspecified + 0 + 0 + + 432 + 137 + 521 + 346 + + + + corobot + Unspecified + 1 + 0 + + 326 + 1 + 403 + 231 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 242 + 344 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3038.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9843ccb1b0cc4f0f839a60d04873bd71cd1b0e25 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3038.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3038.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3038.xml new file mode 100644 index 0000000000000000000000000000000000000000..a518756a3e8c38cd01c65a58f9a65276baf6882d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3038.xml @@ -0,0 +1,86 @@ + + test + frame3038.jpg + /home/job/workspace/virtuallab/test/frame3038.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 7 + 383 + 118 + 551 + + + + corobot + Unspecified + 0 + 0 + + 411 + 439 + 496 + 591 + + + + corobot + Unspecified + 0 + 0 + + 139 + 171 + 230 + 383 + + + + corobot + Unspecified + 0 + 0 + + 435 + 145 + 527 + 351 + + + + corobot + Unspecified + 0 + 0 + + 331 + 2 + 411 + 230 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 243 + 346 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3039.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f453c44614edf89a430927f57eb61e7744542933 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3039.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3039.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3039.xml new file mode 100644 index 0000000000000000000000000000000000000000..cdfd6b29e52067bf4a51eab47a9023d336ed83e3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3039.xml @@ -0,0 +1,86 @@ + + test + frame3039.jpg + /home/job/workspace/virtuallab/test/frame3039.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 339 + 94 + 514 + + + + corobot + Unspecified + 0 + 0 + + 379 + 402 + 461 + 564 + + + + corobot + Unspecified + 0 + 0 + + 408 + 100 + 497 + 317 + + + + corobot + Unspecified + 0 + 0 + + 179 + 144 + 267 + 355 + + + + corobot + Unspecified + 0 + 0 + + 289 + 18 + 367 + 257 + + + + myrobot + Unspecified + 0 + 0 + + 273 + 262 + 344 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3041.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3041.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78af0370576d2a7537dd71c702e3051c174ee9eb Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3041.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3041.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3041.xml new file mode 100644 index 0000000000000000000000000000000000000000..42648b664fe4c50059de83a78978005fc7ef0440 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3041.xml @@ -0,0 +1,86 @@ + + test + frame3041.jpg + /home/job/workspace/virtuallab/test/frame3041.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 8 + 385 + 118 + 553 + + + + corobot + Unspecified + 0 + 0 + + 411 + 436 + 494 + 594 + + + + corobot + Unspecified + 0 + 0 + + 151 + 161 + 237 + 376 + + + + corobot + Unspecified + 0 + 0 + + 435 + 146 + 525 + 351 + + + + corobot + Unspecified + 0 + 0 + + 320 + 4 + 398 + 241 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 250 + 344 + 442 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3042.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..934c5c0fbe54eb8f493e56cceb22c9fe0e319d4f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3042.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3042.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3042.xml new file mode 100644 index 0000000000000000000000000000000000000000..7df123bd250402b22e969c59f56d2869db7fe639 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3042.xml @@ -0,0 +1,86 @@ + + test + frame3042.jpg + /home/job/workspace/virtuallab/test/frame3042.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 1 + 351 + 96 + 527 + + + + corobot + Unspecified + 0 + 0 + + 385 + 410 + 470 + 570 + + + + corobot + Unspecified + 0 + 0 + + 417 + 109 + 501 + 323 + + + + corobot + Unspecified + 0 + 0 + + 184 + 133 + 269 + 350 + + + + corobot + Unspecified + 0 + 0 + + 293 + 21 + 368 + 260 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 263 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3043.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..417ceb46ddc76b51aec6e2b47e9dc0b2f85c7ec9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3043.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3043.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3043.xml new file mode 100644 index 0000000000000000000000000000000000000000..aa82f35ee57fb4328d15bd079ae12c97d4b10d22 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3043.xml @@ -0,0 +1,86 @@ + + test + frame3043.jpg + /home/job/workspace/virtuallab/test/frame3043.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 22 + 404 + 130 + 566 + + + + corobot + Unspecified + 0 + 0 + + 424 + 455 + 514 + 610 + + + + corobot + Unspecified + 0 + 0 + + 447 + 159 + 542 + 366 + + + + corobot + Unspecified + 0 + 0 + + 138 + 168 + 230 + 380 + + + + corobot + Unspecified + 1 + 0 + + 339 + 1 + 419 + 231 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 241 + 346 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3044.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3044.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b178a1474d43016c3664ff475b608f9a5453b1d7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3044.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3044.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3044.xml new file mode 100644 index 0000000000000000000000000000000000000000..29458ec02e66626296145c3a09a318bf4d9f766a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3044.xml @@ -0,0 +1,86 @@ + + test + frame3044.jpg + /home/job/workspace/virtuallab/test/frame3044.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 5 + 380 + 113 + 545 + + + + corobot + Unspecified + 0 + 0 + + 406 + 434 + 493 + 591 + + + + corobot + Unspecified + 0 + 0 + + 430 + 133 + 520 + 346 + + + + corobot + Unspecified + 0 + 0 + + 160 + 154 + 245 + 368 + + + + corobot + Unspecified + 0 + 0 + + 316 + 5 + 394 + 241 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 251 + 345 + 441 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3046.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b0e5f79993e53d6b079f1ff957466f1a6232694 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3046.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3046.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3046.xml new file mode 100644 index 0000000000000000000000000000000000000000..4698f10b09aa038a8bc7590ed629ee421fcdb288 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3046.xml @@ -0,0 +1,86 @@ + + test + frame3046.jpg + /home/job/workspace/virtuallab/test/frame3046.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 27 + 414 + 135 + 572 + + + + corobot + Unspecified + 0 + 0 + + 426 + 460 + 514 + 612 + + + + corobot + Unspecified + 0 + 0 + + 448 + 153 + 538 + 367 + + + + corobot + Unspecified + 0 + 0 + + 132 + 171 + 221 + 382 + + + + corobot + Unspecified + 0 + 0 + + 339 + 2 + 419 + 234 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 250 + 344 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3047.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3047.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cbd6b49943be0d42e26a56bbb2c8d75fac34bda Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3047.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3047.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3047.xml new file mode 100644 index 0000000000000000000000000000000000000000..c511f4e701295a40f52f3d629834fba872183fbc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3047.xml @@ -0,0 +1,86 @@ + + test + frame3047.jpg + /home/job/workspace/virtuallab/test/frame3047.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 378 + 108 + 542 + + + + corobot + Unspecified + 0 + 0 + + 393 + 425 + 479 + 580 + + + + corobot + Unspecified + 0 + 0 + + 421 + 119 + 507 + 337 + + + + corobot + Unspecified + 0 + 0 + + 171 + 146 + 257 + 353 + + + + corobot + Unspecified + 0 + 0 + + 303 + 15 + 380 + 256 + + + + myrobot + Unspecified + 0 + 0 + + 276 + 259 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3048.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3048.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6471067a6bb3c97a152daaa1fc7bdcba606190a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3048.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3048.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3048.xml new file mode 100644 index 0000000000000000000000000000000000000000..7e2cafe4561023a785233c78f01621c7f802e635 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3048.xml @@ -0,0 +1,86 @@ + + test + frame3048.jpg + /home/job/workspace/virtuallab/test/frame3048.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 373 + 107 + 540 + + + + corobot + Unspecified + 0 + 0 + + 394 + 419 + 476 + 578 + + + + corobot + Unspecified + 0 + 0 + + 417 + 114 + 504 + 333 + + + + corobot + Unspecified + 0 + 0 + + 181 + 135 + 267 + 350 + + + + corobot + Unspecified + 0 + 0 + + 299 + 19 + 378 + 257 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 263 + 344 + 443 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3049.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57e95450e0ee80b6b5eea3604b84ee35378e1711 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3049.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3049.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3049.xml new file mode 100644 index 0000000000000000000000000000000000000000..ac6de09125ed65293ec5a6edd5bd7d7de9606567 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3049.xml @@ -0,0 +1,86 @@ + + test + frame3049.jpg + /home/job/workspace/virtuallab/test/frame3049.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 28 + 413 + 134 + 573 + + + + corobot + Unspecified + 0 + 0 + + 424 + 461 + 511 + 608 + + + + corobot + Unspecified + 0 + 0 + + 444 + 153 + 534 + 363 + + + + corobot + Unspecified + 0 + 0 + + 139 + 164 + 230 + 378 + + + + corobot + Unspecified + 0 + 0 + + 344 + 2 + 422 + 230 + + + + myrobot + Unspecified + 0 + 0 + + 275 + 246 + 345 + 449 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3051.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ffbf69f6de302845af20c3b2378e92fdae16914 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3051.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3051.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3051.xml new file mode 100644 index 0000000000000000000000000000000000000000..7397661e0878363320a48bac4f3383d9ef17c721 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame3051.xml @@ -0,0 +1,86 @@ + + test + frame3051.jpg + /home/job/workspace/virtuallab/test/frame3051.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 2 + 372 + 105 + 539 + + + + corobot + Unspecified + 0 + 0 + + 392 + 417 + 473 + 576 + + + + corobot + Unspecified + 0 + 0 + + 415 + 113 + 502 + 329 + + + + corobot + Unspecified + 0 + 0 + + 170 + 145 + 260 + 355 + + + + corobot + Unspecified + 0 + 0 + + 313 + 10 + 388 + 252 + + + + myrobot + Unspecified + 0 + 0 + + 274 + 258 + 344 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4000.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..840a44a718ce866f96b126da73fc74feedde1bdd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4000.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4000.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4000.xml new file mode 100644 index 0000000000000000000000000000000000000000..da62cd5d777e3a3dfd22bd9032d990b9800a3f09 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4000.xml @@ -0,0 +1,86 @@ + + test + frame4000.jpg + /home/job/workspace/virtuallab/test/frame4000.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 94 + 1 + 191 + 246 + + + + corobot + Unspecified + 0 + 0 + + 351 + 111 + 425 + 322 + + + + corobot + Unspecified + 0 + 0 + + 75 + 446 + 186 + 597 + + + + corobot + Unspecified + 0 + 0 + + 339 + 410 + 416 + 587 + + + + corobot + Unspecified + 0 + 0 + + 517 + 334 + 635 + 516 + + + + myrobot + Unspecified + 0 + 0 + + 264 + 228 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4026.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f1b4951c3462634ec5ffd293143dc84c492bd42 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4026.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4026.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4026.xml new file mode 100644 index 0000000000000000000000000000000000000000..7ff554aca9d69a3aff893fe0b8c71464f0b70df8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4026.xml @@ -0,0 +1,86 @@ + + test + frame4026.jpg + /home/job/workspace/virtuallab/test/frame4026.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 162 + 1 + 256 + 246 + + + + corobot + Unspecified + 0 + 0 + + 71 + 361 + 181 + 523 + + + + corobot + Unspecified + 0 + 0 + + 349 + 187 + 428 + 382 + + + + corobot + Unspecified + 0 + 0 + + 484 + 267 + 590 + 453 + + + + corobot + Unspecified + 0 + 0 + + 411 + 445 + 503 + 583 + + + + myrobot + Unspecified + 0 + 0 + + 265 + 241 + 349 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4027.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4027.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cda7085f18d61c1dbb131cdbd752ab84b0ae1ef Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4027.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4027.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4027.xml new file mode 100644 index 0000000000000000000000000000000000000000..d87d36b2261dc223805e57d5ef2fbca8959c1de0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4027.xml @@ -0,0 +1,86 @@ + + test + frame4027.jpg + /home/job/workspace/virtuallab/test/frame4027.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 67 + 437 + 176 + 588 + + + + corobot + Unspecified + 0 + 0 + + 347 + 407 + 427 + 585 + + + + corobot + Unspecified + 0 + 0 + + 517 + 319 + 626 + 496 + + + + corobot + Unspecified + 0 + 0 + + 87 + 4 + 182 + 243 + + + + corobot + Unspecified + 0 + 0 + + 347 + 112 + 421 + 322 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 235 + 344 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4028.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4028.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52a9c005c579fb46812f449cb8b6589702c41537 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4028.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4028.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4028.xml new file mode 100644 index 0000000000000000000000000000000000000000..ea0533713971304f81c2764930fa69c4afb11cd6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4028.xml @@ -0,0 +1,86 @@ + + test + frame4028.jpg + /home/job/workspace/virtuallab/test/frame4028.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 80 + 379 + 185 + 538 + + + + corobot + Unspecified + 0 + 0 + + 145 + 2 + 233 + 242 + + + + corobot + Unspecified + 0 + 0 + + 398 + 437 + 484 + 578 + + + + corobot + Unspecified + 0 + 0 + + 492 + 278 + 595 + 464 + + + + corobot + Unspecified + 0 + 0 + + 355 + 170 + 436 + 370 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 233 + 346 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4029.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4029.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c592587873705a558f375f68216bc06a883a6836 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4029.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4029.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4029.xml new file mode 100644 index 0000000000000000000000000000000000000000..e1d0afd7d2717d17ecf9ef95d002e6edcd8f1b02 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4029.xml @@ -0,0 +1,86 @@ + + test + frame4029.jpg + /home/job/workspace/virtuallab/test/frame4029.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 86 + 370 + 186 + 537 + + + + corobot + Unspecified + 0 + 0 + + 152 + 4 + 237 + 241 + + + + corobot + Unspecified + 0 + 0 + + 357 + 181 + 438 + 379 + + + + corobot + Unspecified + 0 + 0 + + 496 + 278 + 589 + 454 + + + + corobot + Unspecified + 0 + 0 + + 392 + 404 + 488 + 580 + + + + myrobot + Unspecified + 0 + 0 + + 271 + 234 + 345 + 448 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4030.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0f7ad1fd52313074928f1b8dae3a976a3d608a7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4030.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4030.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4030.xml new file mode 100644 index 0000000000000000000000000000000000000000..d50169c46e1ffda990a3c1204627eb08276ff320 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4030.xml @@ -0,0 +1,86 @@ + + test + frame4030.jpg + /home/job/workspace/virtuallab/test/frame4030.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 57 + 422 + 167 + 572 + + + + corobot + Unspecified + 0 + 0 + + 354 + 425 + 438 + 590 + + + + corobot + Unspecified + 0 + 0 + + 521 + 324 + 629 + 502 + + + + corobot + Unspecified + 0 + 0 + + 94 + 5 + 191 + 252 + + + + corobot + Unspecified + 0 + 0 + + 343 + 121 + 419 + 326 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 236 + 341 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4031.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9909f9a53250b02d051d777e416d881f564d673e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4031.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4031.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4031.xml new file mode 100644 index 0000000000000000000000000000000000000000..5711948a9bcf65598a8bde0c6c993862278c33db --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4031.xml @@ -0,0 +1,86 @@ + + test + frame4031.jpg + /home/job/workspace/virtuallab/test/frame4031.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 153 + 1 + 243 + 234 + + + + corobot + Unspecified + 0 + 0 + + 100 + 361 + 198 + 531 + + + + corobot + Unspecified + 0 + 0 + + 362 + 175 + 442 + 385 + + + + corobot + Unspecified + 0 + 0 + + 492 + 285 + 593 + 460 + + + + corobot + Unspecified + 0 + 0 + + 408 + 400 + 495 + 562 + + + + myrobot + Unspecified + 0 + 0 + + 266 + 233 + 345 + 447 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4032.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d71a675402fdccd6ff56e4c7722e1e035831e4c4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4032.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4032.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4032.xml new file mode 100644 index 0000000000000000000000000000000000000000..52a3a7e58235082f98c06e133476f3259372247b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4032.xml @@ -0,0 +1,86 @@ + + test + frame4032.jpg + /home/job/workspace/virtuallab/test/frame4032.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 145 + 1 + 238 + 234 + + + + corobot + Unspecified + 0 + 0 + + 91 + 372 + 192 + 541 + + + + corobot + Unspecified + 0 + 0 + + 355 + 163 + 434 + 371 + + + + corobot + Unspecified + 0 + 0 + + 494 + 296 + 595 + 474 + + + + corobot + Unspecified + 0 + 0 + + 397 + 402 + 490 + 577 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 237 + 347 + 449 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4033.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4033.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e76ed36aaf7558cd90ba60f7964f4ab4d919662 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4033.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4033.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4033.xml new file mode 100644 index 0000000000000000000000000000000000000000..751de792a7e0d25547f33b6f96c5cd6a6258f668 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4033.xml @@ -0,0 +1,86 @@ + + test + frame4033.jpg + /home/job/workspace/virtuallab/test/frame4033.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 106 + 1 + 203 + 249 + + + + corobot + Unspecified + 0 + 0 + + 67 + 402 + 169 + 565 + + + + corobot + Unspecified + 0 + 0 + + 335 + 124 + 412 + 319 + + + + corobot + Unspecified + 0 + 0 + + 362 + 424 + 454 + 594 + + + + corobot + Unspecified + 0 + 0 + + 513 + 323 + 625 + 502 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 288 + 342 + 450 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4034.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..466819565d4a0e504ca029236a6fb12fd7474eb0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4034.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4034.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4034.xml new file mode 100644 index 0000000000000000000000000000000000000000..bc4855ac4d6f71d58edc23f09b6d06c87f5fc002 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4034.xml @@ -0,0 +1,86 @@ + + test + frame4034.jpg + /home/job/workspace/virtuallab/test/frame4034.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 162 + 1 + 255 + 226 + + + + corobot + Unspecified + 0 + 0 + + 112 + 350 + 207 + 527 + + + + corobot + Unspecified + 0 + 0 + + 361 + 173 + 449 + 382 + + + + corobot + Unspecified + 0 + 0 + + 488 + 273 + 584 + 428 + + + + corobot + Unspecified + 0 + 0 + + 417 + 423 + 500 + 560 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 241 + 346 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4036.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..652e8ae49f8600ae0f9d481aeb69512cb59c0bbc Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4036.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4036.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4036.xml new file mode 100644 index 0000000000000000000000000000000000000000..acda46a37445bf49c58f1b7e81c0d6e6485cc070 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4036.xml @@ -0,0 +1,86 @@ + + test + frame4036.jpg + /home/job/workspace/virtuallab/test/frame4036.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 117 + 1 + 212 + 248 + + + + corobot + Unspecified + 0 + 0 + + 67 + 402 + 166 + 559 + + + + corobot + Unspecified + 0 + 0 + + 342 + 131 + 414 + 336 + + + + corobot + Unspecified + 0 + 0 + + 365 + 431 + 455 + 591 + + + + corobot + Unspecified + 0 + 0 + + 517 + 329 + 623 + 502 + + + + myrobot + Unspecified + 0 + 0 + + 264 + 240 + 346 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4037.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f3d54b0847064f095ad6d4fa2c17d8d6129f14e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4037.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4037.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4037.xml new file mode 100644 index 0000000000000000000000000000000000000000..33adea9cf9d89125df0c2a6774c454338015c71f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4037.xml @@ -0,0 +1,86 @@ + + test + frame4037.jpg + /home/job/workspace/virtuallab/test/frame4037.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 172 + 1 + 260 + 216 + + + + corobot + Unspecified + 0 + 0 + + 370 + 174 + 449 + 376 + + + + corobot + Unspecified + 0 + 0 + + 114 + 356 + 207 + 528 + + + + corobot + Unspecified + 0 + 0 + + 484 + 277 + 581 + 422 + + + + corobot + Unspecified + 0 + 0 + + 417 + 424 + 504 + 562 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 237 + 348 + 436 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4038.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a98108d8b05824bef1f358bd53a4cc749528a232 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4038.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4038.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4038.xml new file mode 100644 index 0000000000000000000000000000000000000000..1baddd52bc061091e7a57877f56b36c0cde66f48 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4038.xml @@ -0,0 +1,86 @@ + + test + frame4038.jpg + /home/job/workspace/virtuallab/test/frame4038.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 130 + 2 + 221 + 241 + + + + corobot + Unspecified + 0 + 0 + + 73 + 389 + 174 + 555 + + + + corobot + Unspecified + 0 + 0 + + 342 + 135 + 418 + 340 + + + + corobot + Unspecified + 0 + 0 + + 376 + 424 + 463 + 587 + + + + corobot + Unspecified + 0 + 0 + + 509 + 318 + 617 + 495 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 240 + 342 + 445 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4039.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4543fef4f1e227ace07eb86b141cea0c043641aa Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4039.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4039.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4039.xml new file mode 100644 index 0000000000000000000000000000000000000000..2128b752d3c3f9e4573b79bc8a0f3832402f42c9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4039.xml @@ -0,0 +1,86 @@ + + test + frame4039.jpg + /home/job/workspace/virtuallab/test/frame4039.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 129 + 1 + 219 + 242 + + + + corobot + Unspecified + 0 + 0 + + 76 + 391 + 171 + 555 + + + + corobot + Unspecified + 0 + 0 + + 345 + 124 + 419 + 322 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 242 + 343 + 445 + + + + corobot + Unspecified + 0 + 0 + + 374 + 424 + 458 + 584 + + + + corobot + Unspecified + 0 + 0 + + 511 + 319 + 617 + 496 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4041.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4041.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19bb8c1bddc63006eadc3d8d3b5858752ed68e48 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4041.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4041.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4041.xml new file mode 100644 index 0000000000000000000000000000000000000000..7b581d54feab34456e0e3e4d514ec46675a8bcb8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4041.xml @@ -0,0 +1,86 @@ + + test + frame4041.jpg + /home/job/workspace/virtuallab/test/frame4041.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 120 + 1 + 212 + 246 + + + + corobot + Unspecified + 0 + 0 + + 336 + 132 + 411 + 300 + + + + corobot + Unspecified + 0 + 0 + + 67 + 398 + 163 + 564 + + + + corobot + Unspecified + 0 + 0 + + 365 + 432 + 454 + 592 + + + + corobot + Unspecified + 0 + 0 + + 505 + 323 + 606 + 491 + + + + myrobot + Unspecified + 0 + 0 + + 261 + 285 + 344 + 439 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4042.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89fd757b5df292f1964cc806202c38c8eb3acf9d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4042.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4042.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4042.xml new file mode 100644 index 0000000000000000000000000000000000000000..552fa5639c66c1a5522079b366f1c0b5679dce3a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4042.xml @@ -0,0 +1,86 @@ + + test + frame4042.jpg + /home/job/workspace/virtuallab/test/frame4042.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 126 + 2 + 213 + 243 + + + + corobot + Unspecified + 0 + 0 + + 346 + 137 + 422 + 341 + + + + corobot + Unspecified + 0 + 0 + + 71 + 392 + 171 + 557 + + + + corobot + Unspecified + 0 + 0 + + 372 + 422 + 458 + 586 + + + + corobot + Unspecified + 0 + 0 + + 500 + 306 + 595 + 483 + + + + myrobot + Unspecified + 0 + 0 + + 262 + 240 + 344 + 441 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4043.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7f77f3b9ce9f7ae15f6c334320d7626cd63b0a4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4043.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4043.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4043.xml new file mode 100644 index 0000000000000000000000000000000000000000..84a8c32e66fa65acbc4044b2ad464d0c96f94042 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4043.xml @@ -0,0 +1,86 @@ + + test + frame4043.jpg + /home/job/workspace/virtuallab/test/frame4043.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 160 + 1 + 244 + 218 + + + + corobot + Unspecified + 0 + 0 + + 102 + 364 + 197 + 541 + + + + corobot + Unspecified + 0 + 0 + + 372 + 165 + 455 + 373 + + + + corobot + Unspecified + 0 + 0 + + 477 + 276 + 576 + 423 + + + + corobot + Unspecified + 0 + 0 + + 402 + 422 + 489 + 568 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 239 + 345 + 441 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4044.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4044.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59d82e612af04c70a1f14c56fccf80b1ed966103 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4044.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4044.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4044.xml new file mode 100644 index 0000000000000000000000000000000000000000..685e342363ab20698a8c5debef98235dc691c14f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4044.xml @@ -0,0 +1,86 @@ + + test + frame4044.jpg + /home/job/workspace/virtuallab/test/frame4044.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 107 + 2 + 201 + 250 + + + + corobot + Unspecified + 0 + 0 + + 343 + 125 + 414 + 323 + + + + myrobot + Unspecified + 0 + 0 + + 267 + 241 + 344 + 442 + + + + corobot + Unspecified + 0 + 0 + + 353 + 436 + 442 + 596 + + + + corobot + Unspecified + 0 + 0 + + 515 + 323 + 617 + 491 + + + + corobot + Unspecified + 0 + 0 + + 53 + 409 + 157 + 570 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4046.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06ddedafc5d969745db9ef4d74331f2a9d96a1d8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4046.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4046.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4046.xml new file mode 100644 index 0000000000000000000000000000000000000000..5fee77a5da12b07da8d339cfebd3e86899531367 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4046.xml @@ -0,0 +1,86 @@ + + test + frame4046.jpg + /home/job/workspace/virtuallab/test/frame4046.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 150 + 1 + 240 + 223 + + + + corobot + Unspecified + 0 + 0 + + 102 + 363 + 197 + 533 + + + + corobot + Unspecified + 0 + 0 + + 372 + 162 + 451 + 371 + + + + corobot + Unspecified + 0 + 0 + + 478 + 277 + 573 + 444 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 237 + 344 + 441 + + + + corobot + Unspecified + 0 + 0 + + 408 + 404 + 478 + 561 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4047.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4047.jpg new file mode 100644 index 0000000000000000000000000000000000000000..160b30e33f42e25f8fc17c1298167489d1bbadaf Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4047.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4047.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4047.xml new file mode 100644 index 0000000000000000000000000000000000000000..ec759a4dbc0d4ccea44014871330569ec7f4a7ca --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4047.xml @@ -0,0 +1,86 @@ + + test + frame4047.jpg + /home/job/workspace/virtuallab/test/frame4047.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 0 + 0 + + 98 + 4 + 192 + 253 + + + + corobot + Unspecified + 0 + 0 + + 51 + 402 + 151 + 565 + + + + corobot + Unspecified + 0 + 0 + + 335 + 115 + 413 + 299 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 253 + 344 + 441 + + + + corobot + Unspecified + 0 + 0 + + 512 + 327 + 615 + 492 + + + + corobot + Unspecified + 0 + 0 + + 356 + 432 + 439 + 586 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4048.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4048.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f1077d1a4ca0f6659f13f4582941ea38156feec Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4048.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4048.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4048.xml new file mode 100644 index 0000000000000000000000000000000000000000..8f47f90c3b765d557debd9b646cc76f38da4219d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4048.xml @@ -0,0 +1,86 @@ + + test + frame4048.jpg + /home/job/workspace/virtuallab/test/frame4048.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 135 + 1 + 223 + 230 + + + + corobot + Unspecified + 0 + 0 + + 91 + 375 + 185 + 543 + + + + corobot + Unspecified + 0 + 0 + + 363 + 150 + 445 + 367 + + + + corobot + Unspecified + 0 + 0 + + 483 + 283 + 584 + 464 + + + + corobot + Unspecified + 0 + 0 + + 395 + 403 + 474 + 569 + + + + myrobot + Unspecified + 0 + 0 + + 269 + 238 + 344 + 441 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4049.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b15647e0ee84399f4295756a0cdfaa154508fd8b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4049.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4049.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4049.xml new file mode 100644 index 0000000000000000000000000000000000000000..83f382873782219674340a6c3bb34052c8ab4e41 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4049.xml @@ -0,0 +1,86 @@ + + test + frame4049.jpg + /home/job/workspace/virtuallab/test/frame4049.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 141 + 1 + 226 + 228 + + + + corobot + Unspecified + 0 + 0 + + 373 + 162 + 453 + 368 + + + + corobot + Unspecified + 0 + 0 + + 94 + 373 + 187 + 542 + + + + corobot + Unspecified + 0 + 0 + + 481 + 285 + 576 + 458 + + + + corobot + Unspecified + 0 + 0 + + 392 + 401 + 480 + 568 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 238 + 345 + 441 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4051.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acdeca3ef6061b7eca78c94be61d44d9bf76481e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4051.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4051.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4051.xml new file mode 100644 index 0000000000000000000000000000000000000000..ad58229ef7520841a5997931980c63ac42e5d506 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4051.xml @@ -0,0 +1,86 @@ + + test + frame4051.jpg + /home/job/workspace/virtuallab/test/frame4051.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 144 + 1 + 232 + 227 + + + + corobot + Unspecified + 0 + 0 + + 101 + 364 + 196 + 541 + + + + corobot + Unspecified + 0 + 0 + + 377 + 160 + 462 + 374 + + + + corobot + Unspecified + 0 + 0 + + 485 + 288 + 580 + 451 + + + + corobot + Unspecified + 0 + 0 + + 406 + 399 + 485 + 556 + + + + myrobot + Unspecified + 0 + 0 + + 268 + 234 + 344 + 444 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4052.jpg b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4052.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae02cf2c8b4a0f561ed0023efb27ce36d169d756 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4052.jpg differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4052.xml b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4052.xml new file mode 100644 index 0000000000000000000000000000000000000000..ad2758f9fd5e9dc912438e01b20acfa9ff8825e6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/images/train/frame4052.xml @@ -0,0 +1,86 @@ + + test + frame4052.jpg + /home/job/workspace/virtuallab/test/frame4052.jpg + + Unknown + + + 800 + 800 + 3 + + 0 + + corobot + Unspecified + 1 + 0 + + 140 + 1 + 228 + 225 + + + + corobot + Unspecified + 0 + 0 + + 368 + 153 + 450 + 366 + + + + corobot + Unspecified + 0 + 0 + + 100 + 368 + 191 + 537 + + + + corobot + Unspecified + 0 + 0 + + 401 + 403 + 486 + 565 + + + + corobot + Unspecified + 0 + 0 + + 491 + 298 + 587 + 471 + + + + myrobot + Unspecified + 0 + 0 + + 270 + 237 + 349 + 446 + + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/detection_inference.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/detection_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..b395cd7e74b093551a05f5e37875ccf06de4ccc9 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/detection_inference.py @@ -0,0 +1,141 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for detection inference.""" +from __future__ import division + +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields + + +def build_input(tfrecord_paths): + """Builds the graph's input. + + Args: + tfrecord_paths: List of paths to the input TFRecords + + Returns: + serialized_example_tensor: The next serialized example. String scalar Tensor + image_tensor: The decoded image of the example. Uint8 tensor, + shape=[1, None, None,3] + """ + filename_queue = tf.train.string_input_producer( + tfrecord_paths, shuffle=False, num_epochs=1) + + tf_record_reader = tf.TFRecordReader() + _, serialized_example_tensor = tf_record_reader.read(filename_queue) + features = tf.parse_single_example( + serialized_example_tensor, + features={ + standard_fields.TfExampleFields.image_encoded: + tf.FixedLenFeature([], tf.string), + }) + encoded_image = features[standard_fields.TfExampleFields.image_encoded] + image_tensor = tf.image.decode_image(encoded_image, channels=3) + image_tensor.set_shape([None, None, 3]) + image_tensor = tf.expand_dims(image_tensor, 0) + + return serialized_example_tensor, image_tensor + + +def build_inference_graph(image_tensor, inference_graph_path): + """Loads the inference graph and connects it to the input image. + + Args: + image_tensor: The input image. uint8 tensor, shape=[1, None, None, 3] + inference_graph_path: Path to the inference graph with embedded weights + + Returns: + detected_boxes_tensor: Detected boxes. Float tensor, + shape=[num_detections, 4] + detected_scores_tensor: Detected scores. Float tensor, + shape=[num_detections] + detected_labels_tensor: Detected labels. Int64 tensor, + shape=[num_detections] + """ + with tf.gfile.Open(inference_graph_path, 'rb') as graph_def_file: + graph_content = graph_def_file.read() + graph_def = tf.GraphDef() + graph_def.MergeFromString(graph_content) + + tf.import_graph_def( + graph_def, name='', input_map={'image_tensor': image_tensor}) + + g = tf.get_default_graph() + + num_detections_tensor = tf.squeeze( + g.get_tensor_by_name('num_detections:0'), 0) + num_detections_tensor = tf.cast(num_detections_tensor, tf.int32) + + detected_boxes_tensor = tf.squeeze( + g.get_tensor_by_name('detection_boxes:0'), 0) + detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor] + + detected_scores_tensor = tf.squeeze( + g.get_tensor_by_name('detection_scores:0'), 0) + detected_scores_tensor = detected_scores_tensor[:num_detections_tensor] + + detected_labels_tensor = tf.squeeze( + g.get_tensor_by_name('detection_classes:0'), 0) + detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64) + detected_labels_tensor = detected_labels_tensor[:num_detections_tensor] + + return detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor + + +def infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor, discard_image_pixels): + """Runs the supplied tensors and adds the inferred detections to the example. + + Args: + serialized_example_tensor: Serialized TF example. Scalar string tensor + detected_boxes_tensor: Detected boxes. Float tensor, + shape=[num_detections, 4] + detected_scores_tensor: Detected scores. Float tensor, + shape=[num_detections] + detected_labels_tensor: Detected labels. Int64 tensor, + shape=[num_detections] + discard_image_pixels: If true, discards the image from the result + Returns: + The de-serialized TF example augmented with the inferred detections. + """ + tf_example = tf.train.Example() + (serialized_example, detected_boxes, detected_scores, + detected_classes) = tf.get_default_session().run([ + serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor + ]) + detected_boxes = detected_boxes.T + + tf_example.ParseFromString(serialized_example) + feature = tf_example.features.feature + feature[standard_fields.TfExampleFields. + detection_score].float_list.value[:] = detected_scores + feature[standard_fields.TfExampleFields. + detection_bbox_ymin].float_list.value[:] = detected_boxes[0] + feature[standard_fields.TfExampleFields. + detection_bbox_xmin].float_list.value[:] = detected_boxes[1] + feature[standard_fields.TfExampleFields. + detection_bbox_ymax].float_list.value[:] = detected_boxes[2] + feature[standard_fields.TfExampleFields. + detection_bbox_xmax].float_list.value[:] = detected_boxes[3] + feature[standard_fields.TfExampleFields. + detection_class_label].int64_list.value[:] = detected_classes + + if discard_image_pixels: + del feature[standard_fields.TfExampleFields.image_encoded] + + return tf_example diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/detection_inference_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/detection_inference_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..899da1298765425c667fbcdfd341fad713724d9f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/detection_inference_tf1_test.py @@ -0,0 +1,177 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Tests for detection_inference.py.""" + +import os +import unittest +import numpy as np +from PIL import Image +import six +import tensorflow.compat.v1 as tf +from google.protobuf import text_format + +from object_detection.core import standard_fields +from object_detection.inference import detection_inference +from object_detection.utils import dataset_util +from object_detection.utils import tf_version + + +def get_mock_tfrecord_path(): + return os.path.join(tf.test.get_temp_dir(), 'mock.tfrec') + + +def create_mock_tfrecord(): + pil_image = Image.fromarray(np.array([[[123, 0, 0]]], dtype=np.uint8), 'RGB') + image_output_stream = six.BytesIO() + pil_image.save(image_output_stream, format='png') + encoded_image = image_output_stream.getvalue() + + feature_map = { + 'test_field': + dataset_util.float_list_feature([1, 2, 3, 4]), + standard_fields.TfExampleFields.image_encoded: + dataset_util.bytes_feature(encoded_image), + } + + tf_example = tf.train.Example(features=tf.train.Features(feature=feature_map)) + with tf.python_io.TFRecordWriter(get_mock_tfrecord_path()) as writer: + writer.write(tf_example.SerializeToString()) + return encoded_image + + +def get_mock_graph_path(): + return os.path.join(tf.test.get_temp_dir(), 'mock_graph.pb') + + +def create_mock_graph(): + g = tf.Graph() + with g.as_default(): + in_image_tensor = tf.placeholder( + tf.uint8, shape=[1, None, None, 3], name='image_tensor') + tf.constant([2.0], name='num_detections') + tf.constant( + [[[0, 0.8, 0.7, 1], [0.1, 0.2, 0.8, 0.9], [0.2, 0.3, 0.4, 0.5]]], + name='detection_boxes') + tf.constant([[0.1, 0.2, 0.3]], name='detection_scores') + tf.identity( + tf.constant([[1.0, 2.0, 3.0]]) * + tf.reduce_sum(tf.cast(in_image_tensor, dtype=tf.float32)), + name='detection_classes') + graph_def = g.as_graph_def() + + with tf.gfile.Open(get_mock_graph_path(), 'w') as fl: + fl.write(graph_def.SerializeToString()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class InferDetectionsTests(tf.test.TestCase): + + def test_simple(self): + create_mock_graph() + encoded_image = create_mock_tfrecord() + + serialized_example_tensor, image_tensor = detection_inference.build_input( + [get_mock_tfrecord_path()]) + self.assertAllEqual(image_tensor.get_shape().as_list(), [1, None, None, 3]) + + (detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor) = detection_inference.build_inference_graph( + image_tensor, get_mock_graph_path()) + + with self.test_session(use_gpu=False) as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + tf.train.start_queue_runners() + + tf_example = detection_inference.infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, + detected_scores_tensor, detected_labels_tensor, False) + expected_example = tf.train.Example() + text_format.Merge(r""" + features { + feature { + key: "image/detection/bbox/ymin" + value { float_list { value: [0.0, 0.1] } } } + feature { + key: "image/detection/bbox/xmin" + value { float_list { value: [0.8, 0.2] } } } + feature { + key: "image/detection/bbox/ymax" + value { float_list { value: [0.7, 0.8] } } } + feature { + key: "image/detection/bbox/xmax" + value { float_list { value: [1.0, 0.9] } } } + feature { + key: "image/detection/label" + value { int64_list { value: [123, 246] } } } + feature { + key: "image/detection/score" + value { float_list { value: [0.1, 0.2] } } } + feature { + key: "test_field" + value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }""", + expected_example) + expected_example.features.feature[ + standard_fields.TfExampleFields + .image_encoded].CopyFrom(dataset_util.bytes_feature(encoded_image)) + self.assertProtoEquals(expected_example, tf_example) + + def test_discard_image(self): + create_mock_graph() + create_mock_tfrecord() + + serialized_example_tensor, image_tensor = detection_inference.build_input( + [get_mock_tfrecord_path()]) + (detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor) = detection_inference.build_inference_graph( + image_tensor, get_mock_graph_path()) + + with self.test_session(use_gpu=False) as sess: + sess.run(tf.global_variables_initializer()) + sess.run(tf.local_variables_initializer()) + tf.train.start_queue_runners() + + tf_example = detection_inference.infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, + detected_scores_tensor, detected_labels_tensor, True) + + self.assertProtoEquals(r""" + features { + feature { + key: "image/detection/bbox/ymin" + value { float_list { value: [0.0, 0.1] } } } + feature { + key: "image/detection/bbox/xmin" + value { float_list { value: [0.8, 0.2] } } } + feature { + key: "image/detection/bbox/ymax" + value { float_list { value: [0.7, 0.8] } } } + feature { + key: "image/detection/bbox/xmax" + value { float_list { value: [1.0, 0.9] } } } + feature { + key: "image/detection/label" + value { int64_list { value: [123, 246] } } } + feature { + key: "image/detection/score" + value { float_list { value: [0.1, 0.2] } } } + feature { + key: "test_field" + value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } } + """, tf_example) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/infer_detections.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/infer_detections.py new file mode 100644 index 0000000000000000000000000000000000000000..7bc662f4297436024dd2f9632fdd92133116d482 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inference/infer_detections.py @@ -0,0 +1,96 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Infers detections on a TFRecord of TFExamples given an inference graph. + +Example usage: + ./infer_detections \ + --input_tfrecord_paths=/path/to/input/tfrecord1,/path/to/input/tfrecord2 \ + --output_tfrecord_path=/path/to/output/detections.tfrecord \ + --inference_graph=/path/to/frozen_weights_inference_graph.pb + +The output is a TFRecord of TFExamples. Each TFExample from the input is first +augmented with detections from the inference graph and then copied to the +output. + +The input and output nodes of the inference graph are expected to have the same +types, shapes, and semantics, as the input and output nodes of graphs produced +by export_inference_graph.py, when run with --input_type=image_tensor. + +The script can also discard the image pixels in the output. This greatly +reduces the output size and can potentially accelerate reading data in +subsequent processing steps that don't require the images (e.g. computing +metrics). +""" + +import itertools +import tensorflow.compat.v1 as tf +from object_detection.inference import detection_inference + +tf.flags.DEFINE_string('input_tfrecord_paths', None, + 'A comma separated list of paths to input TFRecords.') +tf.flags.DEFINE_string('output_tfrecord_path', None, + 'Path to the output TFRecord.') +tf.flags.DEFINE_string('inference_graph', None, + 'Path to the inference graph with embedded weights.') +tf.flags.DEFINE_boolean('discard_image_pixels', False, + 'Discards the images in the output TFExamples. This' + ' significantly reduces the output size and is useful' + ' if the subsequent tools don\'t need access to the' + ' images (e.g. when computing evaluation measures).') + +FLAGS = tf.flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + + required_flags = ['input_tfrecord_paths', 'output_tfrecord_path', + 'inference_graph'] + for flag_name in required_flags: + if not getattr(FLAGS, flag_name): + raise ValueError('Flag --{} is required'.format(flag_name)) + + with tf.Session() as sess: + input_tfrecord_paths = [ + v for v in FLAGS.input_tfrecord_paths.split(',') if v] + tf.logging.info('Reading input from %d files', len(input_tfrecord_paths)) + serialized_example_tensor, image_tensor = detection_inference.build_input( + input_tfrecord_paths) + tf.logging.info('Reading graph and building model...') + (detected_boxes_tensor, detected_scores_tensor, + detected_labels_tensor) = detection_inference.build_inference_graph( + image_tensor, FLAGS.inference_graph) + + tf.logging.info('Running inference and writing output to {}'.format( + FLAGS.output_tfrecord_path)) + sess.run(tf.local_variables_initializer()) + tf.train.start_queue_runners() + with tf.python_io.TFRecordWriter( + FLAGS.output_tfrecord_path) as tf_record_writer: + try: + for counter in itertools.count(): + tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10, + counter) + tf_example = detection_inference.infer_detections_and_add_to_example( + serialized_example_tensor, detected_boxes_tensor, + detected_scores_tensor, detected_labels_tensor, + FLAGS.discard_image_pixels) + tf_record_writer.write(tf_example.SerializeToString()) + except tf.errors.OutOfRangeError: + tf.logging.info('Finished processing records') + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inputs.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inputs.py new file mode 100644 index 0000000000000000000000000000000000000000..7294e84d1bab0689f790921f599b19bc7b972934 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inputs.py @@ -0,0 +1,1143 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Model input function for tf-learn object detection model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import tensorflow.compat.v1 as tf +from object_detection.builders import dataset_builder +from object_detection.builders import image_resizer_builder +from object_detection.builders import model_builder +from object_detection.builders import preprocessor_builder +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import densepose_ops +from object_detection.core import keypoint_ops +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.data_decoders import tf_example_decoder +from object_detection.protos import eval_pb2 +from object_detection.protos import image_resizer_pb2 +from object_detection.protos import input_reader_pb2 +from object_detection.protos import model_pb2 +from object_detection.protos import train_pb2 +from object_detection.utils import config_util +from object_detection.utils import ops as util_ops +from object_detection.utils import shape_utils + +HASH_KEY = 'hash' +HASH_BINS = 1 << 31 +SERVING_FED_EXAMPLE_KEY = 'serialized_example' +_LABEL_OFFSET = 1 + +# A map of names to methods that help build the input pipeline. +INPUT_BUILDER_UTIL_MAP = { + 'dataset_build': dataset_builder.build, + 'model_build': model_builder.build, +} + + +def _multiclass_scores_or_one_hot_labels(multiclass_scores, + groundtruth_boxes, + groundtruth_classes, num_classes): + """Returns one-hot encoding of classes when multiclass_scores is empty.""" + # Replace groundtruth_classes tensor with multiclass_scores tensor when its + # non-empty. If multiclass_scores is empty fall back on groundtruth_classes + # tensor. + def true_fn(): + return tf.reshape(multiclass_scores, + [tf.shape(groundtruth_boxes)[0], num_classes]) + def false_fn(): + return tf.one_hot(groundtruth_classes, num_classes) + return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn) + + +def _convert_labeled_classes_to_k_hot(groundtruth_labeled_classes, num_classes): + """Returns k-hot encoding of the labeled classes.""" + + # If the input labeled_classes is empty, it assumes all classes are + # exhaustively labeled, thus returning an all-one encoding. + def true_fn(): + return tf.sparse_to_dense( + groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes], + tf.constant(1, dtype=tf.float32), + validate_indices=False) + + def false_fn(): + return tf.ones(num_classes, dtype=tf.float32) + + return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn) + + +def _remove_unrecognized_classes(class_ids, unrecognized_label): + """Returns class ids with unrecognized classes filtered out.""" + + recognized_indices = tf.squeeze( + tf.where(tf.greater(class_ids, unrecognized_label)), -1) + return tf.gather(class_ids, recognized_indices) + + +def assert_or_prune_invalid_boxes(boxes): + """Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin). + + When the hardware supports assertions, the function raises an error when + boxes have an invalid size. If assertions are not supported (e.g. on TPU), + boxes with invalid sizes are filtered out. + + Args: + boxes: float tensor of shape [num_boxes, 4] + + Returns: + boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes + filtered out. + + Raises: + tf.errors.InvalidArgumentError: When we detect boxes with invalid size. + This is not supported on TPUs. + """ + + ymin, xmin, ymax, xmax = tf.split( + boxes, num_or_size_splits=4, axis=1) + + height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax]) + width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax]) + + with tf.control_dependencies([height_check, width_check]): + boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1) + boxlist = box_list.BoxList(boxes_tensor) + # TODO(b/149221748) Remove pruning when XLA supports assertions. + boxlist = box_list_ops.prune_small_boxes(boxlist, 0) + + return boxlist.get() + + +def transform_input_data(tensor_dict, + model_preprocess_fn, + image_resizer_fn, + num_classes, + data_augmentation_fn=None, + merge_multiple_boxes=False, + retain_original_image=False, + use_multiclass_scores=False, + use_bfloat16=False, + retain_original_image_additional_channels=False, + keypoint_type_weight=None): + """A single function that is responsible for all input data transformations. + + Data transformation functions are applied in the following order. + 1. If key fields.InputDataFields.image_additional_channels is present in + tensor_dict, the additional channels will be merged into + fields.InputDataFields.image. + 2. data_augmentation_fn (optional): applied on tensor_dict. + 3. model_preprocess_fn: applied only on image tensor in tensor_dict. + 4. keypoint_type_weight (optional): If groundtruth keypoints are in + the tensor dictionary, per-keypoint weights are produced. These weights are + initialized by `keypoint_type_weight` (or ones if left None). + Then, for all keypoints that are not visible, the weights are set to 0 (to + avoid penalizing the model in a loss function). + 5. image_resizer_fn: applied on original image and instance mask tensor in + tensor_dict. + 6. one_hot_encoding: applied to classes tensor in tensor_dict. + 7. merge_multiple_boxes (optional): when groundtruth boxes are exactly the + same they can be merged into a single box with an associated k-hot class + label. + + Args: + tensor_dict: dictionary containing input tensors keyed by + fields.InputDataFields. + model_preprocess_fn: model's preprocess function to apply on image tensor. + This function must take in a 4-D float tensor and return a 4-D preprocess + float tensor and a tensor containing the true image shape. + image_resizer_fn: image resizer function to apply on groundtruth instance + `masks. This function must take a 3-D float tensor of an image and a 3-D + tensor of instance masks and return a resized version of these along with + the true shapes. + num_classes: number of max classes to one-hot (or k-hot) encode the class + labels. + data_augmentation_fn: (optional) data augmentation function to apply on + input `tensor_dict`. + merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes + and classes for a given image if the boxes are exactly the same. + retain_original_image: (optional) whether to retain original image in the + output dictionary. + use_multiclass_scores: whether to use multiclass scores as class targets + instead of one-hot encoding of `groundtruth_classes`. When + this is True and multiclass_scores is empty, one-hot encoding of + `groundtruth_classes` is used as a fallback. + use_bfloat16: (optional) a bool, whether to use bfloat16 in training. + retain_original_image_additional_channels: (optional) Whether to retain + original image additional channels in the output dictionary. + keypoint_type_weight: A list (of length num_keypoints) containing + groundtruth loss weights to use for each keypoint. If None, will use a + weight of 1. + + Returns: + A dictionary keyed by fields.InputDataFields containing the tensors obtained + after applying all the transformations. + + Raises: + KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes + are provided by the decoder in tensor_dict since both fields are + considered to contain the same information. + """ + out_tensor_dict = tensor_dict.copy() + + input_fields = fields.InputDataFields + labeled_classes_field = input_fields.groundtruth_labeled_classes + image_classes_field = input_fields.groundtruth_image_classes + verified_neg_classes_field = input_fields.groundtruth_verified_neg_classes + not_exhaustive_field = input_fields.groundtruth_not_exhaustive_classes + + if (labeled_classes_field in out_tensor_dict and + image_classes_field in out_tensor_dict): + raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes' + 'are provided by the decoder, but only one should be set.') + + for field in [labeled_classes_field, + image_classes_field, + verified_neg_classes_field, + not_exhaustive_field]: + if field in out_tensor_dict: + out_tensor_dict[field] = _remove_unrecognized_classes( + out_tensor_dict[field], unrecognized_label=-1) + out_tensor_dict[field] = _convert_labeled_classes_to_k_hot( + out_tensor_dict[field], num_classes) + + if input_fields.multiclass_scores in out_tensor_dict: + out_tensor_dict[ + input_fields + .multiclass_scores] = _multiclass_scores_or_one_hot_labels( + out_tensor_dict[input_fields.multiclass_scores], + out_tensor_dict[input_fields.groundtruth_boxes], + out_tensor_dict[input_fields.groundtruth_classes], + num_classes) + + if input_fields.groundtruth_boxes in out_tensor_dict: + out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates( + out_tensor_dict) + out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict) + + if retain_original_image: + out_tensor_dict[input_fields.original_image] = tf.cast( + image_resizer_fn(out_tensor_dict[input_fields.image], + None)[0], tf.uint8) + + if input_fields.image_additional_channels in out_tensor_dict: + channels = out_tensor_dict[input_fields.image_additional_channels] + out_tensor_dict[input_fields.image] = tf.concat( + [out_tensor_dict[input_fields.image], channels], axis=2) + if retain_original_image_additional_channels: + out_tensor_dict[ + input_fields.image_additional_channels] = tf.cast( + image_resizer_fn(channels, None)[0], tf.uint8) + + # Apply data augmentation ops. + if data_augmentation_fn is not None: + out_tensor_dict = data_augmentation_fn(out_tensor_dict) + + # Apply model preprocessing ops and resize instance masks. + image = out_tensor_dict[input_fields.image] + preprocessed_resized_image, true_image_shape = model_preprocess_fn( + tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0)) + + preprocessed_shape = tf.shape(preprocessed_resized_image) + new_height, new_width = preprocessed_shape[1], preprocessed_shape[2] + + im_box = tf.stack([ + 0.0, 0.0, + tf.to_float(new_height) / tf.to_float(true_image_shape[0, 0]), + tf.to_float(new_width) / tf.to_float(true_image_shape[0, 1]) + ]) + + if input_fields.groundtruth_boxes in tensor_dict: + bboxes = out_tensor_dict[input_fields.groundtruth_boxes] + boxlist = box_list.BoxList(bboxes) + realigned_bboxes = box_list_ops.change_coordinate_frame(boxlist, im_box) + + realigned_boxes_tensor = realigned_bboxes.get() + valid_boxes_tensor = assert_or_prune_invalid_boxes(realigned_boxes_tensor) + out_tensor_dict[ + input_fields.groundtruth_boxes] = valid_boxes_tensor + + if input_fields.groundtruth_keypoints in tensor_dict: + keypoints = out_tensor_dict[input_fields.groundtruth_keypoints] + realigned_keypoints = keypoint_ops.change_coordinate_frame(keypoints, + im_box) + out_tensor_dict[ + input_fields.groundtruth_keypoints] = realigned_keypoints + flds_gt_kpt = input_fields.groundtruth_keypoints + flds_gt_kpt_vis = input_fields.groundtruth_keypoint_visibilities + flds_gt_kpt_weights = input_fields.groundtruth_keypoint_weights + if flds_gt_kpt_vis not in out_tensor_dict: + out_tensor_dict[flds_gt_kpt_vis] = tf.ones_like( + out_tensor_dict[flds_gt_kpt][:, :, 0], + dtype=tf.bool) + out_tensor_dict[flds_gt_kpt_weights] = ( + keypoint_ops.keypoint_weights_from_visibilities( + out_tensor_dict[flds_gt_kpt_vis], + keypoint_type_weight)) + + dp_surface_coords_fld = input_fields.groundtruth_dp_surface_coords + if dp_surface_coords_fld in tensor_dict: + dp_surface_coords = out_tensor_dict[dp_surface_coords_fld] + realigned_dp_surface_coords = densepose_ops.change_coordinate_frame( + dp_surface_coords, im_box) + out_tensor_dict[dp_surface_coords_fld] = realigned_dp_surface_coords + + if use_bfloat16: + preprocessed_resized_image = tf.cast( + preprocessed_resized_image, tf.bfloat16) + if input_fields.context_features in out_tensor_dict: + out_tensor_dict[input_fields.context_features] = tf.cast( + out_tensor_dict[input_fields.context_features], tf.bfloat16) + out_tensor_dict[input_fields.image] = tf.squeeze( + preprocessed_resized_image, axis=0) + out_tensor_dict[input_fields.true_image_shape] = tf.squeeze( + true_image_shape, axis=0) + if input_fields.groundtruth_instance_masks in out_tensor_dict: + masks = out_tensor_dict[input_fields.groundtruth_instance_masks] + _, resized_masks, _ = image_resizer_fn(image, masks) + if use_bfloat16: + resized_masks = tf.cast(resized_masks, tf.bfloat16) + out_tensor_dict[ + input_fields.groundtruth_instance_masks] = resized_masks + + zero_indexed_groundtruth_classes = out_tensor_dict[ + input_fields.groundtruth_classes] - _LABEL_OFFSET + if use_multiclass_scores: + out_tensor_dict[ + input_fields.groundtruth_classes] = out_tensor_dict[ + input_fields.multiclass_scores] + else: + out_tensor_dict[input_fields.groundtruth_classes] = tf.one_hot( + zero_indexed_groundtruth_classes, num_classes) + out_tensor_dict.pop(input_fields.multiclass_scores, None) + + if input_fields.groundtruth_confidences in out_tensor_dict: + groundtruth_confidences = out_tensor_dict[ + input_fields.groundtruth_confidences] + # Map the confidences to the one-hot encoding of classes + out_tensor_dict[input_fields.groundtruth_confidences] = ( + tf.reshape(groundtruth_confidences, [-1, 1]) * + out_tensor_dict[input_fields.groundtruth_classes]) + else: + groundtruth_confidences = tf.ones_like( + zero_indexed_groundtruth_classes, dtype=tf.float32) + out_tensor_dict[input_fields.groundtruth_confidences] = ( + out_tensor_dict[input_fields.groundtruth_classes]) + + if merge_multiple_boxes: + merged_boxes, merged_classes, merged_confidences, _ = ( + util_ops.merge_boxes_with_multiple_labels( + out_tensor_dict[input_fields.groundtruth_boxes], + zero_indexed_groundtruth_classes, + groundtruth_confidences, + num_classes)) + merged_classes = tf.cast(merged_classes, tf.float32) + out_tensor_dict[input_fields.groundtruth_boxes] = merged_boxes + out_tensor_dict[input_fields.groundtruth_classes] = merged_classes + out_tensor_dict[input_fields.groundtruth_confidences] = ( + merged_confidences) + if input_fields.groundtruth_boxes in out_tensor_dict: + out_tensor_dict[input_fields.num_groundtruth_boxes] = tf.shape( + out_tensor_dict[input_fields.groundtruth_boxes])[0] + + return out_tensor_dict + + +def pad_input_data_to_static_shapes(tensor_dict, + max_num_boxes, + num_classes, + spatial_image_shape=None, + max_num_context_features=None, + context_feature_length=None, + max_dp_points=336): + """Pads input tensors to static shapes. + + In case num_additional_channels > 0, we assume that the additional channels + have already been concatenated to the base image. + + Args: + tensor_dict: Tensor dictionary of input data + max_num_boxes: Max number of groundtruth boxes needed to compute shapes for + padding. + num_classes: Number of classes in the dataset needed to compute shapes for + padding. + spatial_image_shape: A list of two integers of the form [height, width] + containing expected spatial shape of the image. + max_num_context_features (optional): The maximum number of context + features needed to compute shapes padding. + context_feature_length (optional): The length of the context feature. + max_dp_points (optional): The maximum number of DensePose sampled points per + instance. The default (336) is selected since the original DensePose paper + (https://arxiv.org/pdf/1802.00434.pdf) indicates that the maximum number + of samples per part is 14, and therefore 24 * 14 = 336 is the maximum + sampler per instance. + + Returns: + A dictionary keyed by fields.InputDataFields containing padding shapes for + tensors in the dataset. + + Raises: + ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we + detect that additional channels have not been concatenated yet, or if + max_num_context_features is not specified and context_features is in the + tensor dict. + """ + + if not spatial_image_shape or spatial_image_shape == [-1, -1]: + height, width = None, None + else: + height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence + + input_fields = fields.InputDataFields + num_additional_channels = 0 + if input_fields.image_additional_channels in tensor_dict: + num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[ + input_fields.image_additional_channels].shape[2]) + + # We assume that if num_additional_channels > 0, then it has already been + # concatenated to the base image (but not the ground truth). + num_channels = 3 + if input_fields.image in tensor_dict: + num_channels = shape_utils.get_dim_as_int( + tensor_dict[input_fields.image].shape[2]) + + if num_additional_channels: + if num_additional_channels >= num_channels: + raise ValueError( + 'Image must be already concatenated with additional channels.') + + if (input_fields.original_image in tensor_dict and + shape_utils.get_dim_as_int( + tensor_dict[input_fields.original_image].shape[2]) == + num_channels): + raise ValueError( + 'Image must be already concatenated with additional channels.') + + if input_fields.context_features in tensor_dict and ( + max_num_context_features is None): + raise ValueError('max_num_context_features must be specified in the model ' + 'config if include_context is specified in the input ' + 'config') + + padding_shapes = { + input_fields.image: [height, width, num_channels], + input_fields.original_image_spatial_shape: [2], + input_fields.image_additional_channels: [ + height, width, num_additional_channels + ], + input_fields.source_id: [], + input_fields.filename: [], + input_fields.key: [], + input_fields.groundtruth_difficult: [max_num_boxes], + input_fields.groundtruth_boxes: [max_num_boxes, 4], + input_fields.groundtruth_classes: [max_num_boxes, num_classes], + input_fields.groundtruth_instance_masks: [ + max_num_boxes, height, width + ], + input_fields.groundtruth_is_crowd: [max_num_boxes], + input_fields.groundtruth_group_of: [max_num_boxes], + input_fields.groundtruth_area: [max_num_boxes], + input_fields.groundtruth_weights: [max_num_boxes], + input_fields.groundtruth_confidences: [ + max_num_boxes, num_classes + ], + input_fields.num_groundtruth_boxes: [], + input_fields.groundtruth_label_types: [max_num_boxes], + input_fields.groundtruth_label_weights: [max_num_boxes], + input_fields.true_image_shape: [3], + input_fields.groundtruth_image_classes: [num_classes], + input_fields.groundtruth_image_confidences: [num_classes], + input_fields.groundtruth_labeled_classes: [num_classes], + } + + if input_fields.original_image in tensor_dict: + padding_shapes[input_fields.original_image] = [ + height, width, + shape_utils.get_dim_as_int(tensor_dict[input_fields. + original_image].shape[2]) + ] + if input_fields.groundtruth_keypoints in tensor_dict: + tensor_shape = ( + tensor_dict[input_fields.groundtruth_keypoints].shape) + padding_shape = [max_num_boxes, + shape_utils.get_dim_as_int(tensor_shape[1]), + shape_utils.get_dim_as_int(tensor_shape[2])] + padding_shapes[input_fields.groundtruth_keypoints] = padding_shape + if input_fields.groundtruth_keypoint_visibilities in tensor_dict: + tensor_shape = tensor_dict[input_fields. + groundtruth_keypoint_visibilities].shape + padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] + padding_shapes[input_fields. + groundtruth_keypoint_visibilities] = padding_shape + + if input_fields.groundtruth_keypoint_weights in tensor_dict: + tensor_shape = ( + tensor_dict[input_fields.groundtruth_keypoint_weights].shape) + padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] + padding_shapes[input_fields. + groundtruth_keypoint_weights] = padding_shape + if input_fields.groundtruth_dp_num_points in tensor_dict: + padding_shapes[ + input_fields.groundtruth_dp_num_points] = [max_num_boxes] + padding_shapes[ + input_fields.groundtruth_dp_part_ids] = [ + max_num_boxes, max_dp_points] + padding_shapes[ + input_fields.groundtruth_dp_surface_coords] = [ + max_num_boxes, max_dp_points, 4] + if input_fields.groundtruth_track_ids in tensor_dict: + padding_shapes[ + input_fields.groundtruth_track_ids] = [max_num_boxes] + + if input_fields.groundtruth_verified_neg_classes in tensor_dict: + padding_shapes[ + input_fields.groundtruth_verified_neg_classes] = [num_classes] + if input_fields.groundtruth_not_exhaustive_classes in tensor_dict: + padding_shapes[ + input_fields.groundtruth_not_exhaustive_classes] = [num_classes] + + # Prepare for ContextRCNN related fields. + if input_fields.context_features in tensor_dict: + padding_shape = [max_num_context_features, context_feature_length] + padding_shapes[input_fields.context_features] = padding_shape + + tensor_shape = tf.shape( + tensor_dict[input_fields.context_features]) + tensor_dict[input_fields.valid_context_size] = tensor_shape[0] + padding_shapes[input_fields.valid_context_size] = [] + if input_fields.context_feature_length in tensor_dict: + padding_shapes[input_fields.context_feature_length] = [] + + if input_fields.is_annotated in tensor_dict: + padding_shapes[input_fields.is_annotated] = [] + + padded_tensor_dict = {} + for tensor_name in tensor_dict: + padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd( + tensor_dict[tensor_name], padding_shapes[tensor_name]) + + # Make sure that the number of groundtruth boxes now reflects the + # padded/clipped tensors. + if input_fields.num_groundtruth_boxes in padded_tensor_dict: + padded_tensor_dict[input_fields.num_groundtruth_boxes] = ( + tf.minimum( + padded_tensor_dict[input_fields.num_groundtruth_boxes], + max_num_boxes)) + return padded_tensor_dict + + +def augment_input_data(tensor_dict, data_augmentation_options): + """Applies data augmentation ops to input tensors. + + Args: + tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. + data_augmentation_options: A list of tuples, where each tuple contains a + function and a dictionary that contains arguments and their values. + Usually, this is the output of core/preprocessor.build. + + Returns: + A dictionary of tensors obtained by applying data augmentation ops to the + input tensor dictionary. + """ + tensor_dict[fields.InputDataFields.image] = tf.expand_dims( + tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0) + + include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks + in tensor_dict) + include_keypoints = (fields.InputDataFields.groundtruth_keypoints + in tensor_dict) + include_keypoint_visibilities = ( + fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict) + include_label_weights = (fields.InputDataFields.groundtruth_weights + in tensor_dict) + include_label_confidences = (fields.InputDataFields.groundtruth_confidences + in tensor_dict) + include_multiclass_scores = (fields.InputDataFields.multiclass_scores in + tensor_dict) + dense_pose_fields = [fields.InputDataFields.groundtruth_dp_num_points, + fields.InputDataFields.groundtruth_dp_part_ids, + fields.InputDataFields.groundtruth_dp_surface_coords] + include_dense_pose = all(field in tensor_dict for field in dense_pose_fields) + tensor_dict = preprocessor.preprocess( + tensor_dict, data_augmentation_options, + func_arg_map=preprocessor.get_default_func_arg_map( + include_label_weights=include_label_weights, + include_label_confidences=include_label_confidences, + include_multiclass_scores=include_multiclass_scores, + include_instance_masks=include_instance_masks, + include_keypoints=include_keypoints, + include_keypoint_visibilities=include_keypoint_visibilities, + include_dense_pose=include_dense_pose)) + tensor_dict[fields.InputDataFields.image] = tf.squeeze( + tensor_dict[fields.InputDataFields.image], axis=0) + return tensor_dict + + +def _get_labels_dict(input_dict): + """Extracts labels dict from input dict.""" + required_label_keys = [ + fields.InputDataFields.num_groundtruth_boxes, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + fields.InputDataFields.groundtruth_weights, + ] + labels_dict = {} + for key in required_label_keys: + labels_dict[key] = input_dict[key] + + optional_label_keys = [ + fields.InputDataFields.groundtruth_confidences, + fields.InputDataFields.groundtruth_labeled_classes, + fields.InputDataFields.groundtruth_keypoints, + fields.InputDataFields.groundtruth_instance_masks, + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_group_of, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_keypoint_visibilities, + fields.InputDataFields.groundtruth_keypoint_weights, + fields.InputDataFields.groundtruth_dp_num_points, + fields.InputDataFields.groundtruth_dp_part_ids, + fields.InputDataFields.groundtruth_dp_surface_coords, + fields.InputDataFields.groundtruth_track_ids, + fields.InputDataFields.groundtruth_verified_neg_classes, + fields.InputDataFields.groundtruth_not_exhaustive_classes + ] + + for key in optional_label_keys: + if key in input_dict: + labels_dict[key] = input_dict[key] + if fields.InputDataFields.groundtruth_difficult in labels_dict: + labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( + labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) + return labels_dict + + +def _replace_empty_string_with_random_number(string_tensor): + """Returns string unchanged if non-empty, and random string tensor otherwise. + + The random string is an integer 0 and 2**63 - 1, casted as string. + + + Args: + string_tensor: A tf.tensor of dtype string. + + Returns: + out_string: A tf.tensor of dtype string. If string_tensor contains the empty + string, out_string will contain a random integer casted to a string. + Otherwise string_tensor is returned unchanged. + + """ + + empty_string = tf.constant('', dtype=tf.string, name='EmptyString') + + random_source_id = tf.as_string( + tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64)) + + out_string = tf.cond( + tf.equal(string_tensor, empty_string), + true_fn=lambda: random_source_id, + false_fn=lambda: string_tensor) + + return out_string + + +def _get_features_dict(input_dict, include_source_id=False): + """Extracts features dict from input dict.""" + + source_id = _replace_empty_string_with_random_number( + input_dict[fields.InputDataFields.source_id]) + + hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS) + features = { + fields.InputDataFields.image: + input_dict[fields.InputDataFields.image], + HASH_KEY: tf.cast(hash_from_source_id, tf.int32), + fields.InputDataFields.true_image_shape: + input_dict[fields.InputDataFields.true_image_shape], + fields.InputDataFields.original_image_spatial_shape: + input_dict[fields.InputDataFields.original_image_spatial_shape] + } + if include_source_id: + features[fields.InputDataFields.source_id] = source_id + if fields.InputDataFields.original_image in input_dict: + features[fields.InputDataFields.original_image] = input_dict[ + fields.InputDataFields.original_image] + if fields.InputDataFields.image_additional_channels in input_dict: + features[fields.InputDataFields.image_additional_channels] = input_dict[ + fields.InputDataFields.image_additional_channels] + if fields.InputDataFields.context_features in input_dict: + features[fields.InputDataFields.context_features] = input_dict[ + fields.InputDataFields.context_features] + if fields.InputDataFields.valid_context_size in input_dict: + features[fields.InputDataFields.valid_context_size] = input_dict[ + fields.InputDataFields.valid_context_size] + return features + + +def create_train_input_fn(train_config, train_input_config, + model_config): + """Creates a train `input` function for `Estimator`. + + Args: + train_config: A train_pb2.TrainConfig. + train_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + + Returns: + `input_fn` for `Estimator` in TRAIN mode. + """ + + def _train_input_fn(params=None): + return train_input(train_config, train_input_config, model_config, + params=params) + + return _train_input_fn + + +def train_input(train_config, train_input_config, + model_config, model=None, params=None, input_context=None): + """Returns `features` and `labels` tensor dictionaries for training. + + Args: + train_config: A train_pb2.TrainConfig. + train_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + model: A pre-constructed Detection Model. + If None, one will be created from the config. + params: Parameter dictionary passed from the estimator. + input_context: optional, A tf.distribute.InputContext object used to + shard filenames and compute per-replica batch_size when this function + is being called per-replica. + + Returns: + A tf.data.Dataset that holds (features, labels) tuple. + + features: Dictionary of feature tensors. + features[fields.InputDataFields.image] is a [batch_size, H, W, C] + float32 tensor with preprocessed images. + features[HASH_KEY] is a [batch_size] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] (optional) is a + [batch_size, H, W, C] float32 tensor with original images. + labels: Dictionary of groundtruth tensors. + labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] + int32 tensor indicating the number of groundtruth boxes. + labels[fields.InputDataFields.groundtruth_boxes] is a + [batch_size, num_boxes, 4] float32 tensor containing the corners of + the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a + [batch_size, num_boxes, num_classes] float32 one-hot tensor of + classes. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes] float32 tensor containing groundtruth weights + for the boxes. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + [batch_size, num_boxes, H, W] float32 tensor containing only binary + values, which represent instance masks for objects. + labels[fields.InputDataFields.groundtruth_keypoints] is a + [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing + keypoints for each box. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes, num_keypoints] float32 tensor containing + groundtruth weights for the keypoints. + labels[fields.InputDataFields.groundtruth_visibilities] is a + [batch_size, num_boxes, num_keypoints] bool tensor containing + groundtruth visibilities for each keypoint. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a + [batch_size, num_classes] float32 k-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_dp_num_points] is a + [batch_size, num_boxes] int32 tensor with the number of sampled + DensePose points per object. + labels[fields.InputDataFields.groundtruth_dp_part_ids] is a + [batch_size, num_boxes, max_sampled_points] int32 tensor with the + DensePose part ids (0-indexed) per object. + labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a + [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the + DensePose surface coordinates. The format is (y, x, v, u), where (y, x) + are normalized image coordinates and (v, u) are normalized surface part + coordinates. + labels[fields.InputDataFields.groundtruth_track_ids] is a + [batch_size, num_boxes] int32 tensor with the track ID for each object. + + Raises: + TypeError: if the `train_config`, `train_input_config` or `model_config` + are not of the correct type. + """ + if not isinstance(train_config, train_pb2.TrainConfig): + raise TypeError('For training mode, the `train_config` must be a ' + 'train_pb2.TrainConfig.') + if not isinstance(train_input_config, input_reader_pb2.InputReader): + raise TypeError('The `train_input_config` must be a ' + 'input_reader_pb2.InputReader.') + if not isinstance(model_config, model_pb2.DetectionModel): + raise TypeError('The `model_config` must be a ' + 'model_pb2.DetectionModel.') + + if model is None: + model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( + model_config, is_training=True).preprocess + else: + model_preprocess_fn = model.preprocess + + num_classes = config_util.get_number_of_classes(model_config) + + def transform_and_pad_input_data_fn(tensor_dict): + """Combines transform and pad operation.""" + data_augmentation_options = [ + preprocessor_builder.build(step) + for step in train_config.data_augmentation_options + ] + data_augmentation_fn = functools.partial( + augment_input_data, + data_augmentation_options=data_augmentation_options) + + image_resizer_config = config_util.get_image_resizer_config(model_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + keypoint_type_weight = train_input_config.keypoint_type_weight or None + transform_data_fn = functools.partial( + transform_input_data, model_preprocess_fn=model_preprocess_fn, + image_resizer_fn=image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=data_augmentation_fn, + merge_multiple_boxes=train_config.merge_multiple_label_boxes, + retain_original_image=train_config.retain_original_images, + use_multiclass_scores=train_config.use_multiclass_scores, + use_bfloat16=train_config.use_bfloat16, + keypoint_type_weight=keypoint_type_weight) + + tensor_dict = pad_input_data_to_static_shapes( + tensor_dict=transform_data_fn(tensor_dict), + max_num_boxes=train_input_config.max_number_of_boxes, + num_classes=num_classes, + spatial_image_shape=config_util.get_spatial_image_size( + image_resizer_config), + max_num_context_features=config_util.get_max_num_context_features( + model_config), + context_feature_length=config_util.get_context_feature_length( + model_config)) + include_source_id = train_input_config.include_source_id + return (_get_features_dict(tensor_dict, include_source_id), + _get_labels_dict(tensor_dict)) + reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True) + + dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( + train_input_config, + transform_input_data_fn=transform_and_pad_input_data_fn, + batch_size=params['batch_size'] if params else train_config.batch_size, + input_context=input_context, + reduce_to_frame_fn=reduce_to_frame_fn) + return dataset + + +def create_eval_input_fn(eval_config, eval_input_config, model_config): + """Creates an eval `input` function for `Estimator`. + + Args: + eval_config: An eval_pb2.EvalConfig. + eval_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + + Returns: + `input_fn` for `Estimator` in EVAL mode. + """ + + def _eval_input_fn(params=None): + return eval_input(eval_config, eval_input_config, model_config, + params=params) + + return _eval_input_fn + + +def eval_input(eval_config, eval_input_config, model_config, + model=None, params=None): + """Returns `features` and `labels` tensor dictionaries for evaluation. + + Args: + eval_config: An eval_pb2.EvalConfig. + eval_input_config: An input_reader_pb2.InputReader. + model_config: A model_pb2.DetectionModel. + model: A pre-constructed Detection Model. + If None, one will be created from the config. + params: Parameter dictionary passed from the estimator. + + Returns: + A tf.data.Dataset that holds (features, labels) tuple. + + features: Dictionary of feature tensors. + features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor + with preprocessed images. + features[HASH_KEY] is a [1] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [1, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] is a [1, H', W', C] + float32 tensor with the original image. + labels: Dictionary of groundtruth tensors. + labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4] + float32 tensor containing the corners of the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a + [num_boxes, num_classes] float32 one-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes] + float32 tensor containing object areas. + labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes] + bool tensor indicating if the boxes enclose a crowd. + labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes] + int32 tensor indicating if the boxes represent difficult instances. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + [1, num_boxes, H, W] float32 tensor containing only binary values, + which represent instance masks for objects. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes, num_keypoints] float32 tensor containing + groundtruth weights for the keypoints. + labels[fields.InputDataFields.groundtruth_visibilities] is a + [batch_size, num_boxes, num_keypoints] bool tensor containing + groundtruth visibilities for each keypoint. + labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes] + bool tensor indicating if the box covers more than 5 instances of the + same class which heavily occlude each other. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a + [num_boxes, num_classes] float32 k-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_dp_num_points] is a + [batch_size, num_boxes] int32 tensor with the number of sampled + DensePose points per object. + labels[fields.InputDataFields.groundtruth_dp_part_ids] is a + [batch_size, num_boxes, max_sampled_points] int32 tensor with the + DensePose part ids (0-indexed) per object. + labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a + [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the + DensePose surface coordinates. The format is (y, x, v, u), where (y, x) + are normalized image coordinates and (v, u) are normalized surface part + coordinates. + labels[fields.InputDataFields.groundtruth_track_ids] is a + [batch_size, num_boxes] int32 tensor with the track ID for each object. + + Raises: + TypeError: if the `eval_config`, `eval_input_config` or `model_config` + are not of the correct type. + """ + params = params or {} + if not isinstance(eval_config, eval_pb2.EvalConfig): + raise TypeError('For eval mode, the `eval_config` must be a ' + 'train_pb2.EvalConfig.') + if not isinstance(eval_input_config, input_reader_pb2.InputReader): + raise TypeError('The `eval_input_config` must be a ' + 'input_reader_pb2.InputReader.') + if not isinstance(model_config, model_pb2.DetectionModel): + raise TypeError('The `model_config` must be a ' + 'model_pb2.DetectionModel.') + + if eval_config.force_no_resize: + arch = model_config.WhichOneof('model') + arch_config = getattr(model_config, arch) + image_resizer_proto = image_resizer_pb2.ImageResizer() + image_resizer_proto.identity_resizer.CopyFrom( + image_resizer_pb2.IdentityResizer()) + arch_config.image_resizer.CopyFrom(image_resizer_proto) + + if model is None: + model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( + model_config, is_training=False).preprocess + else: + model_preprocess_fn = model.preprocess + + def transform_and_pad_input_data_fn(tensor_dict): + """Combines transform and pad operation.""" + num_classes = config_util.get_number_of_classes(model_config) + + image_resizer_config = config_util.get_image_resizer_config(model_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + keypoint_type_weight = eval_input_config.keypoint_type_weight or None + + transform_data_fn = functools.partial( + transform_input_data, model_preprocess_fn=model_preprocess_fn, + image_resizer_fn=image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=None, + retain_original_image=eval_config.retain_original_images, + retain_original_image_additional_channels= + eval_config.retain_original_image_additional_channels, + keypoint_type_weight=keypoint_type_weight) + tensor_dict = pad_input_data_to_static_shapes( + tensor_dict=transform_data_fn(tensor_dict), + max_num_boxes=eval_input_config.max_number_of_boxes, + num_classes=config_util.get_number_of_classes(model_config), + spatial_image_shape=config_util.get_spatial_image_size( + image_resizer_config), + max_num_context_features=config_util.get_max_num_context_features( + model_config), + context_feature_length=config_util.get_context_feature_length( + model_config)) + include_source_id = eval_input_config.include_source_id + return (_get_features_dict(tensor_dict, include_source_id), + _get_labels_dict(tensor_dict)) + + reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False) + + dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( + eval_input_config, + batch_size=params['batch_size'] if params else eval_config.batch_size, + transform_input_data_fn=transform_and_pad_input_data_fn, + reduce_to_frame_fn=reduce_to_frame_fn) + return dataset + + +def create_predict_input_fn(model_config, predict_input_config): + """Creates a predict `input` function for `Estimator`. + + Args: + model_config: A model_pb2.DetectionModel. + predict_input_config: An input_reader_pb2.InputReader. + + Returns: + `input_fn` for `Estimator` in PREDICT mode. + """ + + def _predict_input_fn(params=None): + """Decodes serialized tf.Examples and returns `ServingInputReceiver`. + + Args: + params: Parameter dictionary passed from the estimator. + + Returns: + `ServingInputReceiver`. + """ + del params + example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example') + + num_classes = config_util.get_number_of_classes(model_config) + model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( + model_config, is_training=False).preprocess + + image_resizer_config = config_util.get_image_resizer_config(model_config) + image_resizer_fn = image_resizer_builder.build(image_resizer_config) + + transform_fn = functools.partial( + transform_input_data, model_preprocess_fn=model_preprocess_fn, + image_resizer_fn=image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=None) + + decoder = tf_example_decoder.TfExampleDecoder( + load_instance_masks=False, + num_additional_channels=predict_input_config.num_additional_channels) + input_dict = transform_fn(decoder.decode(example)) + images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32) + images = tf.expand_dims(images, axis=0) + true_image_shape = tf.expand_dims( + input_dict[fields.InputDataFields.true_image_shape], axis=0) + + return tf.estimator.export.ServingInputReceiver( + features={ + fields.InputDataFields.image: images, + fields.InputDataFields.true_image_shape: true_image_shape}, + receiver_tensors={SERVING_FED_EXAMPLE_KEY: example}) + + return _predict_input_fn + + +def get_reduce_to_frame_fn(input_reader_config, is_training): + """Returns a function reducing sequence tensors to single frame tensors. + + If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through + this function unchanged. Otherwise, when in training mode, a single frame is + selected at random from the sequence example, and the tensors for that frame + are converted to single frame tensors, with all associated context features. + In evaluation mode all frames are converted to single frame tensors with + copied context tensors. After the sequence example tensors are converted into + one or many single frame tensors, the images from each frame are decoded. + + Args: + input_reader_config: An input_reader_pb2.InputReader. + is_training: Whether we are in training mode. + + Returns: + `reduce_to_frame_fn` for the dataset builder + """ + if input_reader_config.input_type != ( + input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE')): + return lambda dataset, dataset_map_fn, batch_size, config: dataset + else: + def reduce_to_frame(dataset, dataset_map_fn, batch_size, + input_reader_config): + """Returns a function reducing sequence tensors to single frame tensors. + + Args: + dataset: A tf dataset containing sequence tensors. + dataset_map_fn: A function that handles whether to + map_with_legacy_function for this dataset + batch_size: used if map_with_legacy_function is true to determine + num_parallel_calls + input_reader_config: used if map_with_legacy_function is true to + determine num_parallel_calls + + Returns: + A tf dataset containing single frame tensors. + """ + if is_training: + def get_single_frame(tensor_dict): + """Returns a random frame from a sequence. + + Picks a random frame and returns slices of sequence tensors + corresponding to the random frame. Returns non-sequence tensors + unchanged. + + Args: + tensor_dict: A dictionary containing sequence tensors. + + Returns: + Tensors for a single random frame within the sequence. + """ + num_frames = tf.cast( + tf.shape(tensor_dict[fields.InputDataFields.source_id])[0], + dtype=tf.int32) + if input_reader_config.frame_index == -1: + frame_index = tf.random.uniform((), minval=0, maxval=num_frames, + dtype=tf.int32) + else: + frame_index = tf.constant(input_reader_config.frame_index, + dtype=tf.int32) + out_tensor_dict = {} + for key in tensor_dict: + if key in fields.SEQUENCE_FIELDS: + # Slice random frame from sequence tensors + out_tensor_dict[key] = tensor_dict[key][frame_index] + else: + # Copy all context tensors. + out_tensor_dict[key] = tensor_dict[key] + return out_tensor_dict + dataset = dataset_map_fn(dataset, get_single_frame, batch_size, + input_reader_config) + else: + dataset = dataset_map_fn(dataset, util_ops.tile_context_tensors, + batch_size, input_reader_config) + dataset = dataset.unbatch() + # Decode frame here as SequenceExample tensors contain encoded images. + dataset = dataset_map_fn(dataset, util_ops.decode_image, batch_size, + input_reader_config) + return dataset + return reduce_to_frame diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inputs_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inputs_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9a0430a529c9f888f3444670ad1a0b89bc85e22d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/inputs_test.py @@ -0,0 +1,1665 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.tflearn.inputs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import unittest +from absl import logging +from absl.testing import parameterized +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from object_detection import inputs +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.utils import config_util +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-import-not-at-top, g-importing-member + +FLAGS = tf.flags.FLAGS + + +def _get_configs_for_model(model_name): + """Returns configurations for model.""" + fname = os.path.join(tf.resource_loader.get_data_files_path(), + 'samples/configs/' + model_name + '.config') + label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), + 'data/pet_label_map.pbtxt') + data_path = os.path.join(tf.resource_loader.get_data_files_path(), + 'test_data/pets_examples.record') + configs = config_util.get_configs_from_pipeline_file(fname) + override_dict = { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + return config_util.merge_external_params_with_configs( + configs, kwargs_dict=override_dict) + + +def _get_configs_for_model_sequence_example(model_name, frame_index=-1): + """Returns configurations for model.""" + fname = os.path.join(tf.resource_loader.get_data_files_path(), + 'test_data/' + model_name + '.config') + label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), + 'data/snapshot_serengeti_label_map.pbtxt') + data_path = os.path.join( + tf.resource_loader.get_data_files_path(), + 'test_data/snapshot_serengeti_sequence_examples.record') + configs = config_util.get_configs_from_pipeline_file(fname) + override_dict = { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path, + 'frame_index': frame_index + } + return config_util.merge_external_params_with_configs( + configs, kwargs_dict=override_dict) + + +def _make_initializable_iterator(dataset): + """Creates an iterator, and initializes tables. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.data.Iterator`. + """ + iterator = tf.data.make_initializable_iterator(dataset) + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests under TF2.X.') +class InputFnTest(test_case.TestCase, parameterized.TestCase): + + def test_faster_rcnn_resnet50_train_input(self): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model('faster_rcnn_resnet50_pets') + model_config = configs['model'] + model_config.faster_rcnn.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + configs['train_config'], configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([1, None, None, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([1], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [1, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [1, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + def test_faster_rcnn_resnet50_train_input_with_additional_channels(self): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model('faster_rcnn_resnet50_pets') + model_config = configs['model'] + configs['train_input_config'].num_additional_channels = 2 + configs['train_config'].retain_original_images = True + model_config.faster_rcnn.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + configs['train_config'], configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([1, None, None, 5], + features[fields.InputDataFields.image].shape.as_list()) + self.assertAllEqual( + [1, None, None, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([1], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [1, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [1, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [1, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + @parameterized.parameters( + {'eval_batch_size': 1}, + {'eval_batch_size': 8} + ) + def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1): + """Tests the eval input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model('faster_rcnn_resnet50_pets') + model_config = configs['model'] + model_config.faster_rcnn.num_classes = 37 + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, None, None, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, None, None, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_area].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_area].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) + self.assertEqual( + tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) + self.assertEqual( + tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) + + def test_context_rcnn_resnet50_train_input_with_sequence_example( + self, train_batch_size=8): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model_sequence_example( + 'context_rcnn_camera_trap') + model_config = configs['model'] + train_config = configs['train_config'] + train_config.batch_size = train_batch_size + train_input_fn = inputs.create_train_input_fn( + train_config, configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([train_batch_size, 640, 640, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([train_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [train_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [train_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [train_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [train_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + def test_context_rcnn_resnet50_eval_input_with_sequence_example( + self, eval_batch_size=8): + """Tests the eval input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model_sequence_example( + 'context_rcnn_camera_trap') + model_config = configs['model'] + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, 640, 640, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, 640, 640, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + + def test_context_rcnn_resnet50_train_input_with_sequence_example_frame_index( + self, train_batch_size=8): + """Tests the training input function for FasterRcnnResnet50.""" + configs = _get_configs_for_model_sequence_example( + 'context_rcnn_camera_trap', frame_index=2) + model_config = configs['model'] + train_config = configs['train_config'] + train_config.batch_size = train_batch_size + train_input_fn = inputs.create_train_input_fn( + train_config, configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([train_batch_size, 640, 640, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([train_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [train_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [train_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [train_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [train_batch_size, 100, model_config.faster_rcnn.num_classes], + labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_confidences].dtype) + + def test_ssd_inceptionV2_train_input(self): + """Tests the training input function for SSDInceptionV2.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + model_config = configs['model'] + model_config.ssd.num_classes = 37 + batch_size = configs['train_config'].batch_size + train_input_fn = inputs.create_train_input_fn( + configs['train_config'], configs['train_input_config'], model_config) + features, labels = _make_initializable_iterator(train_input_fn()).get_next() + + self.assertAllEqual([batch_size, 300, 300, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual([batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [batch_size], + labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.int32, + labels[fields.InputDataFields.num_groundtruth_boxes].dtype) + self.assertAllEqual( + [batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [batch_size, 100, model_config.ssd.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [batch_size, 100], + labels[ + fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + + @parameterized.parameters( + {'eval_batch_size': 1}, + {'eval_batch_size': 8} + ) + def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1): + """Tests the eval input function for SSDInceptionV2.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + model_config = configs['model'] + model_config.ssd.num_classes = 37 + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, 300, 300, 3], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, 300, 300, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.ssd.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[ + fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual( + tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_area].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_area].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) + self.assertEqual( + tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) + self.assertEqual( + tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) + + def test_ssd_inceptionV2_eval_input_with_additional_channels( + self, eval_batch_size=1): + """Tests the eval input function for SSDInceptionV2 with additional channel. + + Args: + eval_batch_size: Batch size for eval set. + """ + configs = _get_configs_for_model('ssd_inception_v2_pets') + model_config = configs['model'] + model_config.ssd.num_classes = 37 + configs['eval_input_configs'][0].num_additional_channels = 1 + eval_config = configs['eval_config'] + eval_config.batch_size = eval_batch_size + eval_config.retain_original_image_additional_channels = True + eval_input_fn = inputs.create_eval_input_fn( + eval_config, configs['eval_input_configs'][0], model_config) + features, labels = _make_initializable_iterator(eval_input_fn()).get_next() + self.assertAllEqual([eval_batch_size, 300, 300, 4], + features[fields.InputDataFields.image].shape.as_list()) + self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) + self.assertAllEqual( + [eval_batch_size, 300, 300, 3], + features[fields.InputDataFields.original_image].shape.as_list()) + self.assertEqual(tf.uint8, + features[fields.InputDataFields.original_image].dtype) + self.assertAllEqual([eval_batch_size, 300, 300, 1], features[ + fields.InputDataFields.image_additional_channels].shape.as_list()) + self.assertEqual( + tf.uint8, + features[fields.InputDataFields.image_additional_channels].dtype) + self.assertAllEqual([eval_batch_size], + features[inputs.HASH_KEY].shape.as_list()) + self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) + self.assertAllEqual( + [eval_batch_size, 100, 4], + labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_boxes].dtype) + self.assertAllEqual( + [eval_batch_size, 100, model_config.ssd.num_classes], + labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_classes].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_weights].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_area].shape.as_list()) + self.assertEqual(tf.float32, + labels[fields.InputDataFields.groundtruth_area].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) + self.assertEqual(tf.bool, + labels[fields.InputDataFields.groundtruth_is_crowd].dtype) + self.assertAllEqual( + [eval_batch_size, 100], + labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) + self.assertEqual(tf.int32, + labels[fields.InputDataFields.groundtruth_difficult].dtype) + + def test_predict_input(self): + """Tests the predict input function.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + predict_input_fn = inputs.create_predict_input_fn( + model_config=configs['model'], + predict_input_config=configs['eval_input_configs'][0]) + serving_input_receiver = predict_input_fn() + + image = serving_input_receiver.features[fields.InputDataFields.image] + receiver_tensors = serving_input_receiver.receiver_tensors[ + inputs.SERVING_FED_EXAMPLE_KEY] + self.assertEqual([1, 300, 300, 3], image.shape.as_list()) + self.assertEqual(tf.float32, image.dtype) + self.assertEqual(tf.string, receiver_tensors.dtype) + + def test_predict_input_with_additional_channels(self): + """Tests the predict input function with additional channels.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['eval_input_configs'][0].num_additional_channels = 2 + predict_input_fn = inputs.create_predict_input_fn( + model_config=configs['model'], + predict_input_config=configs['eval_input_configs'][0]) + serving_input_receiver = predict_input_fn() + + image = serving_input_receiver.features[fields.InputDataFields.image] + receiver_tensors = serving_input_receiver.receiver_tensors[ + inputs.SERVING_FED_EXAMPLE_KEY] + # RGB + 2 additional channels = 5 channels. + self.assertEqual([1, 300, 300, 5], image.shape.as_list()) + self.assertEqual(tf.float32, image.dtype) + self.assertEqual(tf.string, receiver_tensors.dtype) + + def test_error_with_bad_train_config(self): + """Tests that a TypeError is raised with improper train config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + train_config=configs['eval_config'], # Expecting `TrainConfig`. + train_input_config=configs['train_input_config'], + model_config=configs['model']) + with self.assertRaises(TypeError): + train_input_fn() + + def test_error_with_bad_train_input_config(self): + """Tests that a TypeError is raised with improper train input config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + train_config=configs['train_config'], + train_input_config=configs['model'], # Expecting `InputReader`. + model_config=configs['model']) + with self.assertRaises(TypeError): + train_input_fn() + + def test_error_with_bad_train_model_config(self): + """Tests that a TypeError is raised with improper train model config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + train_input_fn = inputs.create_train_input_fn( + train_config=configs['train_config'], + train_input_config=configs['train_input_config'], + model_config=configs['train_config']) # Expecting `DetectionModel`. + with self.assertRaises(TypeError): + train_input_fn() + + def test_error_with_bad_eval_config(self): + """Tests that a TypeError is raised with improper eval config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['train_config'], # Expecting `EvalConfig`. + eval_input_config=configs['eval_input_configs'][0], + model_config=configs['model']) + with self.assertRaises(TypeError): + eval_input_fn() + + def test_error_with_bad_eval_input_config(self): + """Tests that a TypeError is raised with improper eval input config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['eval_config'], + eval_input_config=configs['model'], # Expecting `InputReader`. + model_config=configs['model']) + with self.assertRaises(TypeError): + eval_input_fn() + + def test_error_with_bad_eval_model_config(self): + """Tests that a TypeError is raised with improper eval model config.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['model'].ssd.num_classes = 37 + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['eval_config'], + eval_input_config=configs['eval_input_configs'][0], + model_config=configs['eval_config']) # Expecting `DetectionModel`. + with self.assertRaises(TypeError): + eval_input_fn() + + def test_output_equal_in_replace_empty_string_with_random_number(self): + string_placeholder = tf.placeholder(tf.string, shape=[]) + replaced_string = inputs._replace_empty_string_with_random_number( + string_placeholder) + + test_string = b'hello world' + feed_dict = {string_placeholder: test_string} + + with self.test_session() as sess: + out_string = sess.run(replaced_string, feed_dict=feed_dict) + + self.assertEqual(test_string, out_string) + + def test_output_is_integer_in_replace_empty_string_with_random_number(self): + + string_placeholder = tf.placeholder(tf.string, shape=[]) + replaced_string = inputs._replace_empty_string_with_random_number( + string_placeholder) + + empty_string = '' + feed_dict = {string_placeholder: empty_string} + with self.test_session() as sess: + out_string = sess.run(replaced_string, feed_dict=feed_dict) + + is_integer = True + try: + # Test whether out_string is a string which represents an integer, the + # casting below will throw an error if out_string is not castable to int. + int(out_string) + except ValueError: + is_integer = False + + self.assertTrue(is_integer) + + def test_force_no_resize(self): + """Tests the functionality of force_no_reisze option.""" + configs = _get_configs_for_model('ssd_inception_v2_pets') + configs['eval_config'].force_no_resize = True + + eval_input_fn = inputs.create_eval_input_fn( + eval_config=configs['eval_config'], + eval_input_config=configs['eval_input_configs'][0], + model_config=configs['model'] + ) + train_input_fn = inputs.create_train_input_fn( + train_config=configs['train_config'], + train_input_config=configs['train_input_config'], + model_config=configs['model'] + ) + + features_train, _ = _make_initializable_iterator( + train_input_fn()).get_next() + + features_eval, _ = _make_initializable_iterator( + eval_input_fn()).get_next() + + images_train, images_eval = features_train['image'], features_eval['image'] + + self.assertEqual([1, None, None, 3], images_eval.shape.as_list()) + self.assertEqual([24, 300, 300, 3], images_train.shape.as_list()) + + +class DataAugmentationFnTest(test_case.TestCase): + + def test_apply_image_and_box_augmentation(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }), + (preprocessor.scale_boxes_to_pixel_coordinates, {}), + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)) + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields. + groundtruth_boxes]) + image, groundtruth_boxes = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]]) + + def test_apply_image_and_box_augmentation_with_scores(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }), + (preprocessor.scale_boxes_to_pixel_coordinates, {}), + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1.0], np.float32)), + fields.InputDataFields.groundtruth_weights: + tf.constant(np.array([0.8], np.float32)), + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes], + augmented_tensor_dict[fields.InputDataFields.groundtruth_classes], + augmented_tensor_dict[fields.InputDataFields.groundtruth_weights]) + (image, groundtruth_boxes, + groundtruth_classes, groundtruth_weights) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]]) + self.assertAllClose(groundtruth_classes.shape, [1.0]) + self.assertAllClose(groundtruth_weights, [0.8]) + + def test_include_masks_in_data_augmentation(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }) + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_instance_masks: + tf.constant(np.zeros([2, 10, 10], np.uint8)) + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields. + groundtruth_instance_masks]) + image, masks = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllEqual(masks.shape, [2, 20, 20]) + + def test_include_keypoints_in_data_augmentation(self): + data_augmentation_options = [ + (preprocessor.resize_image, { + 'new_height': 20, + 'new_width': 20, + 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR + }), + (preprocessor.scale_boxes_to_pixel_coordinates, {}), + ] + data_augmentation_fn = functools.partial( + inputs.augment_input_data, + data_augmentation_options=data_augmentation_options) + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32)) + } + augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) + return (augmented_tensor_dict[fields.InputDataFields.image], + augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes], + augmented_tensor_dict[fields.InputDataFields. + groundtruth_keypoints]) + image, boxes, keypoints = self.execute_cpu(graph_fn, []) + self.assertAllEqual(image.shape, [20, 20, 3]) + self.assertAllClose(boxes, [[10, 10, 20, 20]]) + self.assertAllClose(keypoints, [[[10, 20], [10, 10]]]) + + +def _fake_model_preprocessor_fn(image): + return (image, tf.expand_dims(tf.shape(image)[1:], axis=0)) + + +def _fake_image_resizer_fn(image, mask): + return (image, mask, tf.shape(image)) + + +def _fake_resize50_preprocess_fn(image): + image = image[0] + image, shape = preprocessor.resize_to_range( + image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True) + + return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0) + + +class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase): + + def test_combine_additional_channels_if_present(self): + image = np.random.rand(4, 4, 3).astype(np.float32) + additional_channels = np.random.rand(4, 4, 2).astype(np.float32) + def graph_fn(image, additional_channels): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.image_additional_channels: additional_channels, + fields.InputDataFields.groundtruth_classes: + tf.constant([1, 1], tf.int32) + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=1) + out_tensors = input_transformation_fn(tensor_dict=tensor_dict) + return out_tensors[fields.InputDataFields.image] + out_image = self.execute_cpu(graph_fn, [image, additional_channels]) + self.assertAllEqual(out_image.dtype, tf.float32) + self.assertAllEqual(out_image.shape, [4, 4, 5]) + self.assertAllClose(out_image, np.concatenate((image, additional_channels), + axis=2)) + + def test_use_multiclass_scores_when_present(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3). + astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.multiclass_scores: + tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)) + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=3, use_multiclass_scores=True) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return transformed_inputs[fields.InputDataFields.groundtruth_classes] + groundtruth_classes = self.execute_cpu(graph_fn, []) + self.assertAllClose( + np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32), + groundtruth_classes) + + @unittest.skipIf(tf_version.is_tf2(), ('Skipping due to different behaviour ' + 'in TF 2.X')) + def test_use_multiclass_scores_when_not_present(self): + def graph_fn(): + zero_num_elements = tf.random.uniform([], minval=0, maxval=1, + dtype=tf.int32) + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.multiclass_scores: tf.zeros(zero_num_elements), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)) + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=3, use_multiclass_scores=True) + + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return transformed_inputs[fields.InputDataFields.groundtruth_classes] + groundtruth_classes = self.execute_cpu(graph_fn, []) + self.assertAllClose( + np.array([[0, 1, 0], [0, 0, 1]], np.float32), + groundtruth_classes) + + @parameterized.parameters( + {'labeled_classes': [1, 2]}, + {'labeled_classes': []}, + {'labeled_classes': [1, -1, 2]} # -1 denotes an unrecognized class + ) + def test_use_labeled_classes(self, labeled_classes): + + def compute_fn(image, groundtruth_boxes, groundtruth_classes, + groundtruth_labeled_classes): + tensor_dict = { + fields.InputDataFields.image: + image, + fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes, + fields.InputDataFields.groundtruth_classes: + groundtruth_classes, + fields.InputDataFields.groundtruth_labeled_classes: + groundtruth_labeled_classes + } + + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=3) + return input_transformation_fn(tensor_dict=tensor_dict) + + image = np.random.rand(4, 4, 3).astype(np.float32) + groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32) + groundtruth_classes = np.array([1, 2], np.int32) + groundtruth_labeled_classes = np.array(labeled_classes, np.int32) + + transformed_inputs = self.execute_cpu(compute_fn, [ + image, groundtruth_boxes, groundtruth_classes, + groundtruth_labeled_classes + ]) + + if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]: + transformed_labeled_classes = [1, 1, 0] + elif not labeled_classes: + transformed_labeled_classes = [1, 1, 1] + else: + logging.exception('Unexpected labeled_classes %r', labeled_classes) + + self.assertAllEqual( + np.array(transformed_labeled_classes, np.float32), + transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes]) + + def test_returns_correct_class_label_encodings(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences]) + (groundtruth_classes, groundtruth_confidences) = self.execute_cpu(graph_fn, + []) + self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) + self.assertAllClose(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]]) + + def test_returns_correct_labels_with_unrecognized_class(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant( + np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.groundtruth_area: + tf.constant(np.array([.5, .4, .3])), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, -1, 1], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant( + np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]], + np.float32)), + fields.InputDataFields.groundtruth_keypoint_visibilities: + tf.constant([[True, True], [False, False], [True, True]]), + fields.InputDataFields.groundtruth_instance_masks: + tf.constant(np.random.rand(3, 4, 4).astype(np.float32)), + fields.InputDataFields.groundtruth_is_crowd: + tf.constant([False, True, False]), + fields.InputDataFields.groundtruth_difficult: + tf.constant(np.array([0, 0, 1], np.int32)) + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields.num_groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_area], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences], + transformed_inputs[fields.InputDataFields.groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_keypoints], + transformed_inputs[fields.InputDataFields. + groundtruth_keypoint_visibilities], + transformed_inputs[fields.InputDataFields. + groundtruth_instance_masks], + transformed_inputs[fields.InputDataFields.groundtruth_is_crowd], + transformed_inputs[fields.InputDataFields.groundtruth_difficult]) + (groundtruth_classes, num_groundtruth_boxes, groundtruth_area, + groundtruth_confidences, groundtruth_boxes, groundtruth_keypoints, + groundtruth_keypoint_visibilities, groundtruth_instance_masks, + groundtruth_is_crowd, groundtruth_difficult) = self.execute_cpu(graph_fn, + []) + + self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) + self.assertAllEqual(num_groundtruth_boxes, 2) + self.assertAllClose(groundtruth_area, [.5, .3]) + self.assertAllEqual(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]]) + self.assertAllClose(groundtruth_boxes, [[0, 0, 1, 1], [.5, .5, 1, 1]]) + self.assertAllClose(groundtruth_keypoints, [[[.1, .1]], [[.5, .5]]]) + self.assertAllEqual(groundtruth_keypoint_visibilities, + [[True, True], [True, True]]) + self.assertAllEqual(groundtruth_instance_masks.shape, [2, 4, 4]) + self.assertAllEqual(groundtruth_is_crowd, [False, False]) + self.assertAllEqual(groundtruth_difficult, [0, 1]) + + def test_returns_correct_merged_boxes(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + merge_multiple_boxes=True) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences], + transformed_inputs[fields.InputDataFields.num_groundtruth_boxes]) + (groundtruth_boxes, groundtruth_classes, groundtruth_confidences, + num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) + self.assertAllClose( + groundtruth_boxes, + [[.5, .5, 1., 1.]]) + self.assertAllClose( + groundtruth_classes, + [[1, 0, 1]]) + self.assertAllClose( + groundtruth_confidences, + [[1, 0, 1]]) + self.assertAllClose( + num_groundtruth_boxes, + 1) + + def test_returns_correct_groundtruth_confidences_when_input_present(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)), + fields.InputDataFields.groundtruth_confidences: + tf.constant(np.array([1.0, -1.0], np.float32)) + } + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_classes], + transformed_inputs[fields.InputDataFields. + groundtruth_confidences]) + groundtruth_classes, groundtruth_confidences = self.execute_cpu(graph_fn, + []) + self.assertAllClose( + groundtruth_classes, + [[0, 0, 1], [1, 0, 0]]) + self.assertAllClose( + groundtruth_confidences, + [[0, 0, 1], [-1, 0, 0]]) + + def test_returns_resized_masks(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_instance_masks: + tf.constant(np.random.rand(2, 4, 4).astype(np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)), + fields.InputDataFields.original_image_spatial_shape: + tf.constant(np.array([4, 4], np.int32)) + } + + def fake_image_resizer_fn(image, masks=None): + resized_image = tf.image.resize_images(image, [8, 8]) + results = [resized_image] + if masks is not None: + resized_masks = tf.transpose( + tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]), + [2, 0, 1]) + results.append(resized_masks) + results.append(tf.shape(resized_image)) + return results + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=fake_image_resizer_fn, + num_classes=num_classes, + retain_original_image=True) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.original_image], + transformed_inputs[fields.InputDataFields. + original_image_spatial_shape], + transformed_inputs[fields.InputDataFields. + groundtruth_instance_masks]) + (original_image, original_image_shape, + groundtruth_instance_masks) = self.execute_cpu(graph_fn, []) + self.assertEqual(original_image.dtype, np.uint8) + self.assertAllEqual(original_image_shape, [4, 4]) + self.assertAllEqual(original_image.shape, [8, 8, 3]) + self.assertAllEqual(groundtruth_instance_masks.shape, [2, 8, 8]) + + def test_applies_model_preprocess_fn_to_image_tensor(self): + np_image = np.random.randint(256, size=(4, 4, 3)) + def graph_fn(image): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + def fake_model_preprocessor_fn(image): + return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0)) + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.image], + transformed_inputs[fields.InputDataFields.true_image_shape]) + image, true_image_shape = self.execute_cpu(graph_fn, [np_image]) + self.assertAllClose(image, np_image / 255.) + self.assertAllClose(true_image_shape, [4, 4, 3]) + + def test_applies_data_augmentation_fn_to_tensor_dict(self): + np_image = np.random.randint(256, size=(4, 4, 3)) + def graph_fn(image): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + def add_one_data_augmentation_fn(tensor_dict): + return {key: value + 1 for key, value in tensor_dict.items()} + + num_classes = 4 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=add_one_data_augmentation_fn) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.image], + transformed_inputs[fields.InputDataFields.groundtruth_classes]) + image, groundtruth_classes = self.execute_cpu(graph_fn, [np_image]) + self.assertAllEqual(image, np_image + 1) + self.assertAllEqual( + groundtruth_classes, + [[0, 0, 0, 1], [0, 1, 0, 0]]) + + def test_applies_data_augmentation_fn_before_model_preprocess_fn(self): + np_image = np.random.randint(256, size=(4, 4, 3)) + def graph_fn(image): + tensor_dict = { + fields.InputDataFields.image: image, + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([3, 1], np.int32)) + } + + def mul_two_model_preprocessor_fn(image): + return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0)) + + def add_five_to_image_data_augmentation_fn(tensor_dict): + tensor_dict[fields.InputDataFields.image] += 5 + return tensor_dict + + num_classes = 4 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=mul_two_model_preprocessor_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + data_augmentation_fn=add_five_to_image_data_augmentation_fn) + transformed_inputs = input_transformation_fn(tensor_dict) + return transformed_inputs[fields.InputDataFields.image] + image = self.execute_cpu(graph_fn, [np_image]) + self.assertAllEqual(image, (np_image + 5) * 2) + + def test_resize_with_padding(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant([[[0.1, 0.2]], [[0.3, 0.4]]]), + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes,) + transformed_inputs = input_transformation_fn(tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_boxes], + transformed_inputs[fields.InputDataFields.groundtruth_keypoints]) + groundtruth_boxes, groundtruth_keypoints = self.execute_cpu(graph_fn, []) + self.assertAllClose( + groundtruth_boxes, + [[.5, .25, 1., .5], [.0, .0, .5, .25]]) + self.assertAllClose( + groundtruth_keypoints, + [[[.1, .1]], [[.3, .2]]]) + + def test_groundtruth_keypoint_weights(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant([[[0.1, 0.2], [0.3, 0.4]], + [[0.5, 0.6], [0.7, 0.8]]]), + fields.InputDataFields.groundtruth_keypoint_visibilities: + tf.constant([[True, False], [True, True]]), + } + + num_classes = 3 + keypoint_type_weight = [1.0, 2.0] + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes, + keypoint_type_weight=keypoint_type_weight) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints], + transformed_inputs[fields.InputDataFields. + groundtruth_keypoint_weights]) + + groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu( + graph_fn, []) + self.assertAllClose( + groundtruth_keypoints, + [[[0.1, 0.1], [0.3, 0.2]], + [[0.5, 0.3], [0.7, 0.4]]]) + self.assertAllClose( + groundtruth_keypoint_weights, + [[1.0, 0.0], [1.0, 2.0]]) + + def test_groundtruth_keypoint_weights_default(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_keypoints: + tf.constant([[[0.1, 0.2], [0.3, 0.4]], + [[0.5, 0.6], [0.7, 0.8]]]), + } + + num_classes = 3 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints], + transformed_inputs[fields.InputDataFields. + groundtruth_keypoint_weights]) + groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu( + graph_fn, []) + self.assertAllClose( + groundtruth_keypoints, + [[[0.1, 0.1], [0.3, 0.2]], + [[0.5, 0.3], [0.7, 0.4]]]) + self.assertAllClose( + groundtruth_keypoint_weights, + [[1.0, 1.0], [1.0, 1.0]]) + + def test_groundtruth_dense_pose(self): + def graph_fn(): + tensor_dict = { + fields.InputDataFields.image: + tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), + fields.InputDataFields.groundtruth_boxes: + tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], + np.float32)), + fields.InputDataFields.groundtruth_classes: + tf.constant(np.array([1, 2], np.int32)), + fields.InputDataFields.groundtruth_dp_num_points: + tf.constant([0, 2], dtype=tf.int32), + fields.InputDataFields.groundtruth_dp_part_ids: + tf.constant([[0, 0], [4, 23]], dtype=tf.int32), + fields.InputDataFields.groundtruth_dp_surface_coords: + tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]], + [[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]], + dtype=tf.float32), + } + + num_classes = 1 + input_transformation_fn = functools.partial( + inputs.transform_input_data, + model_preprocess_fn=_fake_resize50_preprocess_fn, + image_resizer_fn=_fake_image_resizer_fn, + num_classes=num_classes) + transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) + transformed_dp_num_points = transformed_inputs[ + fields.InputDataFields.groundtruth_dp_num_points] + transformed_dp_part_ids = transformed_inputs[ + fields.InputDataFields.groundtruth_dp_part_ids] + transformed_dp_surface_coords = transformed_inputs[ + fields.InputDataFields.groundtruth_dp_surface_coords] + return (transformed_dp_num_points, transformed_dp_part_ids, + transformed_dp_surface_coords) + + dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu( + graph_fn, []) + self.assertAllEqual(dp_num_points, [0, 2]) + self.assertAllEqual(dp_part_ids, [[0, 0], [4, 23]]) + self.assertAllClose( + dp_surface_coords, + [[[0., 0., 0., 0.,], [0., 0., 0., 0.,]], + [[0.1, 0.1, 0.3, 0.4,], [0.6, 0.4, 0.6, 0.7,]]]) + + +class PadInputDataToStaticShapesFnTest(test_case.TestCase): + + def test_pad_images_boxes_and_classes(self): + input_tensor_dict = { + fields.InputDataFields.image: + tf.random.uniform([3, 3, 3]), + fields.InputDataFields.groundtruth_boxes: + tf.random.uniform([2, 4]), + fields.InputDataFields.groundtruth_classes: + tf.random.uniform([2, 3], minval=0, maxval=2, dtype=tf.int32), + fields.InputDataFields.true_image_shape: + tf.constant([3, 3, 3]), + fields.InputDataFields.original_image_spatial_shape: + tf.constant([3, 3]) + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.true_image_shape] + .shape.as_list(), [3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape] + .shape.as_list(), [2]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_boxes] + .shape.as_list(), [3, 4]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_classes] + .shape.as_list(), [3, 3]) + + def test_clip_boxes_and_classes(self): + def graph_fn(): + input_tensor_dict = { + fields.InputDataFields.groundtruth_boxes: + tf.random.uniform([5, 4]), + fields.InputDataFields.groundtruth_classes: + tf.random.uniform([2, 3], maxval=10, dtype=tf.int32), + fields.InputDataFields.num_groundtruth_boxes: + tf.constant(5) + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + return (padded_tensor_dict[fields.InputDataFields.groundtruth_boxes], + padded_tensor_dict[fields.InputDataFields.groundtruth_classes], + padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes]) + (groundtruth_boxes, groundtruth_classes, + num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) + self.assertAllEqual(groundtruth_boxes.shape, [3, 4]) + self.assertAllEqual(groundtruth_classes.shape, [3, 3]) + self.assertEqual(num_groundtruth_boxes, 3) + + def test_images_and_additional_channels(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(4, 3, 5), + fields.InputDataFields.image_additional_channels: + test_utils.image_with_dynamic_shape(4, 3, 2), + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + # pad_input_data_to_static_shape assumes that image is already concatenated + # with additional channels. + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 5]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image_additional_channels] + .shape.as_list(), [5, 6, 2]) + + def test_images_and_additional_channels_errors(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(10, 10, 3), + fields.InputDataFields.image_additional_channels: + test_utils.image_with_dynamic_shape(10, 10, 2), + fields.InputDataFields.original_image: + test_utils.image_with_dynamic_shape(10, 10, 3), + } + with self.assertRaises(ValueError): + _ = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + def test_gray_images(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(4, 4, 1), + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 1]) + + def test_gray_images_and_additional_channels(self): + input_tensor_dict = { + fields.InputDataFields.image: + test_utils.image_with_dynamic_shape(4, 4, 3), + fields.InputDataFields.image_additional_channels: + test_utils.image_with_dynamic_shape(4, 4, 2), + } + # pad_input_data_to_static_shape assumes that image is already concatenated + # with additional channels. + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), + [5, 6, 3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.image_additional_channels] + .shape.as_list(), [5, 6, 2]) + + def test_keypoints(self): + keypoints = test_utils.keypoints_with_dynamic_shape(10, 16, 4) + visibilities = tf.cast(tf.random.uniform(tf.shape(keypoints)[:-1], minval=0, + maxval=2, dtype=tf.int32), tf.bool) + input_tensor_dict = { + fields.InputDataFields.groundtruth_keypoints: + test_utils.keypoints_with_dynamic_shape(10, 16, 4), + fields.InputDataFields.groundtruth_keypoint_visibilities: + visibilities + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints] + .shape.as_list(), [3, 16, 4]) + self.assertAllEqual( + padded_tensor_dict[ + fields.InputDataFields.groundtruth_keypoint_visibilities] + .shape.as_list(), [3, 16]) + + def test_dense_pose(self): + input_tensor_dict = { + fields.InputDataFields.groundtruth_dp_num_points: + tf.constant([0, 2], dtype=tf.int32), + fields.InputDataFields.groundtruth_dp_part_ids: + tf.constant([[0, 0], [4, 23]], dtype=tf.int32), + fields.InputDataFields.groundtruth_dp_surface_coords: + tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]], + [[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]], + dtype=tf.float32), + } + + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=1, + spatial_image_shape=[128, 128], + max_dp_points=200) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] + .shape.as_list(), [3]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] + .shape.as_list(), [3, 200]) + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_dp_surface_coords] + .shape.as_list(), [3, 200, 4]) + + def test_pad_input_data_to_static_shapes_for_trackid(self): + input_tensor_dict = { + fields.InputDataFields.groundtruth_track_ids: + tf.constant([0, 1], dtype=tf.int32), + } + + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=1, + spatial_image_shape=[128, 128]) + + self.assertAllEqual( + padded_tensor_dict[fields.InputDataFields.groundtruth_track_ids] + .shape.as_list(), [3]) + + def test_context_features(self): + context_memory_size = 8 + context_feature_length = 10 + max_num_context_features = 20 + def graph_fn(): + input_tensor_dict = { + fields.InputDataFields.context_features: + tf.ones([context_memory_size, context_feature_length]), + fields.InputDataFields.context_feature_length: + tf.constant(context_feature_length) + } + padded_tensor_dict = inputs.pad_input_data_to_static_shapes( + tensor_dict=input_tensor_dict, + max_num_boxes=3, + num_classes=3, + spatial_image_shape=[5, 6], + max_num_context_features=max_num_context_features, + context_feature_length=context_feature_length) + + self.assertAllEqual( + padded_tensor_dict[ + fields.InputDataFields.context_features].shape.as_list(), + [max_num_context_features, context_feature_length]) + return padded_tensor_dict[fields.InputDataFields.valid_context_size] + + valid_context_size = self.execute_cpu(graph_fn, []) + self.assertEqual(valid_context_size, context_memory_size) + + +class NegativeSizeTest(test_case.TestCase): + """Test for inputs and related funcitons.""" + + def test_negative_size_error(self): + """Test that error is raised for negative size boxes.""" + + def graph_fn(): + tensors = { + fields.InputDataFields.image: tf.zeros((128, 128, 3)), + fields.InputDataFields.groundtruth_classes: + tf.constant([1, 1], tf.int32), + fields.InputDataFields.groundtruth_boxes: + tf.constant([[0.5, 0.5, 0.4, 0.5]], tf.float32) + } + tensors = inputs.transform_input_data( + tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn, + num_classes=10) + return tensors[fields.InputDataFields.groundtruth_boxes] + with self.assertRaises(tf.errors.InvalidArgumentError): + self.execute_cpu(graph_fn, []) + + def test_negative_size_no_assert(self): + """Test that negative size boxes are filtered out without assert. + + This test simulates the behaviour when we run on TPU and Assert ops are + not supported. + """ + + tensors = { + fields.InputDataFields.image: tf.zeros((128, 128, 3)), + fields.InputDataFields.groundtruth_classes: + tf.constant([1, 1], tf.int32), + fields.InputDataFields.groundtruth_boxes: + tf.constant([[0.5, 0.5, 0.4, 0.5], [0.5, 0.5, 0.6, 0.6]], + tf.float32) + } + + with mock.patch.object(tf, 'Assert') as tf_assert: + tf_assert.return_value = tf.no_op() + tensors = inputs.transform_input_data( + tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn, + num_classes=10) + + self.assertAllClose(tensors[fields.InputDataFields.groundtruth_boxes], + [[0.5, 0.5, 0.6, 0.6]]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ee8f49a08776ebec3e12eb0f83bf43d562ac0c4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228203ffef184549b83e23cd2d0b635c5cd9819c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__pycache__/trainer.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__pycache__/trainer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b706112e795bc66a00f2f31dfc43bf6e3397c990 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/__pycache__/trainer.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/eval.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..9a7d8c430fa1069320a78d39464df179e8f6d654 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/eval.py @@ -0,0 +1,142 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Evaluation executable for detection models. + +This executable is used to evaluate DetectionModels. There are two ways of +configuring the eval job. + +1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead. +In this mode, the --eval_training_data flag may be given to force the pipeline +to evaluate on training data instead. + +Example usage: + ./eval \ + --logtostderr \ + --checkpoint_dir=path/to/checkpoint_dir \ + --eval_dir=path/to/eval_dir \ + --pipeline_config_path=pipeline_config.pbtxt + +2) Three configuration files may be provided: a model_pb2.DetectionModel +configuration file to define what type of DetectionModel is being evaluated, an +input_reader_pb2.InputReader file to specify what data the model is evaluating +and an eval_pb2.EvalConfig file to configure evaluation parameters. + +Example usage: + ./eval \ + --logtostderr \ + --checkpoint_dir=path/to/checkpoint_dir \ + --eval_dir=path/to/eval_dir \ + --eval_config_path=eval_config.pbtxt \ + --model_config_path=model_config.pbtxt \ + --input_config_path=eval_input_config.pbtxt +""" +import functools +import os +import tensorflow.compat.v1 as tf +from tensorflow.python.util.deprecation import deprecated +from object_detection.builders import dataset_builder +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.legacy import evaluator +from object_detection.utils import config_util +from object_detection.utils import label_map_util + +tf.logging.set_verbosity(tf.logging.INFO) + +flags = tf.app.flags +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job.') +flags.DEFINE_string( + 'checkpoint_dir', '', + 'Directory containing checkpoints to evaluate, typically ' + 'set to `train_dir` used in the training job.') +flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.') +flags.DEFINE_string( + 'pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file. If provided, other configs are ignored') +flags.DEFINE_string('eval_config_path', '', + 'Path to an eval_pb2.EvalConfig config file.') +flags.DEFINE_string('input_config_path', '', + 'Path to an input_reader_pb2.InputReader config file.') +flags.DEFINE_string('model_config_path', '', + 'Path to a model_pb2.DetectionModel config file.') +flags.DEFINE_boolean( + 'run_once', False, 'Option to only run a single pass of ' + 'evaluation. Overrides the `max_evals` parameter in the ' + 'provided config.') +FLAGS = flags.FLAGS + + +@deprecated(None, 'Use object_detection/model_main.py.') +def main(unused_argv): + assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.' + assert FLAGS.eval_dir, '`eval_dir` is missing.' + tf.gfile.MakeDirs(FLAGS.eval_dir) + if FLAGS.pipeline_config_path: + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + tf.gfile.Copy( + FLAGS.pipeline_config_path, + os.path.join(FLAGS.eval_dir, 'pipeline.config'), + overwrite=True) + else: + configs = config_util.get_configs_from_multiple_files( + model_config_path=FLAGS.model_config_path, + eval_config_path=FLAGS.eval_config_path, + eval_input_config_path=FLAGS.input_config_path) + for name, config in [('model.config', FLAGS.model_config_path), + ('eval.config', FLAGS.eval_config_path), + ('input.config', FLAGS.input_config_path)]: + tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True) + + model_config = configs['model'] + eval_config = configs['eval_config'] + input_config = configs['eval_input_config'] + if FLAGS.eval_training_data: + input_config = configs['train_input_config'] + + model_fn = functools.partial( + model_builder.build, model_config=model_config, is_training=False) + + def get_next(config): + return dataset_builder.make_initializable_iterator( + dataset_builder.build(config)).get_next() + + create_input_dict_fn = functools.partial(get_next, input_config) + + categories = label_map_util.create_categories_from_labelmap( + input_config.label_map_path) + + if FLAGS.run_once: + eval_config.max_evals = 1 + + graph_rewriter_fn = None + if 'graph_rewriter_config' in configs: + graph_rewriter_fn = graph_rewriter_builder.build( + configs['graph_rewriter_config'], is_training=False) + + evaluator.evaluate( + create_input_dict_fn, + model_fn, + eval_config, + categories, + FLAGS.checkpoint_dir, + FLAGS.eval_dir, + graph_hook_fn=graph_rewriter_fn) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/evaluator.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..feeb718876788dd067f9c1026d784aa7a7e15848 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/evaluator.py @@ -0,0 +1,298 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Detection model evaluator. + +This file provides a generic evaluation method that can be used to evaluate a +DetectionModel. +""" + +import logging +import tensorflow.compat.v1 as tf + +from object_detection import eval_util +from object_detection.core import prefetcher +from object_detection.core import standard_fields as fields +from object_detection.metrics import coco_evaluation +from object_detection.utils import object_detection_evaluation + +# A dictionary of metric names to classes that implement the metric. The classes +# in the dictionary must implement +# utils.object_detection_evaluation.DetectionEvaluator interface. +EVAL_METRICS_CLASS_DICT = { + 'pascal_voc_detection_metrics': + object_detection_evaluation.PascalDetectionEvaluator, + 'weighted_pascal_voc_detection_metrics': + object_detection_evaluation.WeightedPascalDetectionEvaluator, + 'pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.PascalInstanceSegmentationEvaluator, + 'weighted_pascal_voc_instance_segmentation_metrics': + object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, + 'oid_V2_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, + # DEPRECATED: please use oid_V2_detection_metrics instead + 'open_images_V2_detection_metrics': + object_detection_evaluation.OpenImagesDetectionEvaluator, + 'coco_detection_metrics': + coco_evaluation.CocoDetectionEvaluator, + 'coco_mask_metrics': + coco_evaluation.CocoMaskEvaluator, + 'oid_challenge_detection_metrics': + object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, + # DEPRECATED: please use oid_challenge_detection_metrics instead + 'oid_challenge_object_detection_metrics': + object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, + 'oid_challenge_segmentation_metrics': + object_detection_evaluation + .OpenImagesInstanceSegmentationChallengeEvaluator, +} + +EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics' + + +def _extract_predictions_and_losses(model, + create_input_dict_fn, + ignore_groundtruth=False): + """Constructs tensorflow detection graph and returns output tensors. + + Args: + model: model to perform predictions with. + create_input_dict_fn: function to create input tensor dictionaries. + ignore_groundtruth: whether groundtruth should be ignored. + + Returns: + prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed + by standard_fields.DetectionResultsFields) and optional groundtruth + tensors (keyed by standard_fields.InputDataFields). + losses_dict: A dictionary containing detection losses. This is empty when + ignore_groundtruth is true. + """ + input_dict = create_input_dict_fn() + prefetch_queue = prefetcher.prefetch(input_dict, capacity=500) + input_dict = prefetch_queue.dequeue() + original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0) + preprocessed_image, true_image_shapes = model.preprocess( + tf.cast(original_image, dtype=tf.float32)) + prediction_dict = model.predict(preprocessed_image, true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + + groundtruth = None + losses_dict = {} + if not ignore_groundtruth: + groundtruth = { + fields.InputDataFields.groundtruth_boxes: + input_dict[fields.InputDataFields.groundtruth_boxes], + fields.InputDataFields.groundtruth_classes: + input_dict[fields.InputDataFields.groundtruth_classes], + fields.InputDataFields.groundtruth_area: + input_dict[fields.InputDataFields.groundtruth_area], + fields.InputDataFields.groundtruth_is_crowd: + input_dict[fields.InputDataFields.groundtruth_is_crowd], + fields.InputDataFields.groundtruth_difficult: + input_dict[fields.InputDataFields.groundtruth_difficult] + } + if fields.InputDataFields.groundtruth_group_of in input_dict: + groundtruth[fields.InputDataFields.groundtruth_group_of] = ( + input_dict[fields.InputDataFields.groundtruth_group_of]) + groundtruth_masks_list = None + if fields.DetectionResultFields.detection_masks in detections: + groundtruth[fields.InputDataFields.groundtruth_instance_masks] = ( + input_dict[fields.InputDataFields.groundtruth_instance_masks]) + groundtruth_masks_list = [ + input_dict[fields.InputDataFields.groundtruth_instance_masks]] + groundtruth_keypoints_list = None + if fields.DetectionResultFields.detection_keypoints in detections: + groundtruth[fields.InputDataFields.groundtruth_keypoints] = ( + input_dict[fields.InputDataFields.groundtruth_keypoints]) + groundtruth_keypoints_list = [ + input_dict[fields.InputDataFields.groundtruth_keypoints]] + label_id_offset = 1 + model.provide_groundtruth( + [input_dict[fields.InputDataFields.groundtruth_boxes]], + [tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes] + - label_id_offset, depth=model.num_classes)], + groundtruth_masks_list, groundtruth_keypoints_list) + losses_dict.update(model.loss(prediction_dict, true_image_shapes)) + + result_dict = eval_util.result_dict_for_single_example( + original_image, + input_dict[fields.InputDataFields.source_id], + detections, + groundtruth, + class_agnostic=( + fields.DetectionResultFields.detection_classes not in detections), + scale_to_absolute=True) + return result_dict, losses_dict + + +def get_evaluators(eval_config, categories): + """Returns the evaluator class according to eval_config, valid for categories. + + Args: + eval_config: evaluation configurations. + categories: a list of categories to evaluate. + Returns: + An list of instances of DetectionEvaluator. + + Raises: + ValueError: if metric is not in the metric class dictionary. + """ + eval_metric_fn_keys = eval_config.metrics_set + if not eval_metric_fn_keys: + eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] + evaluators_list = [] + for eval_metric_fn_key in eval_metric_fn_keys: + if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: + raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) + if eval_metric_fn_key == 'oid_challenge_object_detection_metrics': + logging.warning( + 'oid_challenge_object_detection_metrics is deprecated; ' + 'use oid_challenge_detection_metrics instead' + ) + if eval_metric_fn_key == 'oid_V2_detection_metrics': + logging.warning( + 'open_images_V2_detection_metrics is deprecated; ' + 'use oid_V2_detection_metrics instead' + ) + evaluators_list.append( + EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories)) + return evaluators_list + + +def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories, + checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None): + """Evaluation function for detection models. + + Args: + create_input_dict_fn: a function to create a tensor input dictionary. + create_model_fn: a function that creates a DetectionModel. + eval_config: a eval_pb2.EvalConfig protobuf. + categories: a list of category dictionaries. Each dict in the list should + have an integer 'id' field and string 'name' field. + checkpoint_dir: directory to load the checkpoints to evaluate from. + eval_dir: directory to write evaluation metrics summary to. + graph_hook_fn: Optional function that is called after the training graph is + completely built. This is helpful to perform additional changes to the + training graph such as optimizing batchnorm. The function should modify + the default graph. + evaluator_list: Optional list of instances of DetectionEvaluator. If not + given, this list of metrics is created according to the eval_config. + + Returns: + metrics: A dictionary containing metric names and values from the latest + run. + """ + + model = create_model_fn() + + if eval_config.ignore_groundtruth and not eval_config.export_path: + logging.fatal('If ignore_groundtruth=True then an export_path is ' + 'required. Aborting!!!') + + tensor_dict, losses_dict = _extract_predictions_and_losses( + model=model, + create_input_dict_fn=create_input_dict_fn, + ignore_groundtruth=eval_config.ignore_groundtruth) + + def _process_batch(tensor_dict, sess, batch_index, counters, + losses_dict=None): + """Evaluates tensors in tensor_dict, losses_dict and visualizes examples. + + This function calls sess.run on tensor_dict, evaluating the original_image + tensor only on the first K examples and visualizing detections overlaid + on this original_image. + + Args: + tensor_dict: a dictionary of tensors + sess: tensorflow session + batch_index: the index of the batch amongst all batches in the run. + counters: a dictionary holding 'success' and 'skipped' fields which can + be updated to keep track of number of successful and failed runs, + respectively. If these fields are not updated, then the success/skipped + counter values shown at the end of evaluation will be incorrect. + losses_dict: Optional dictonary of scalar loss tensors. + + Returns: + result_dict: a dictionary of numpy arrays + result_losses_dict: a dictionary of scalar losses. This is empty if input + losses_dict is None. + """ + try: + if not losses_dict: + losses_dict = {} + result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict]) + counters['success'] += 1 + except tf.errors.InvalidArgumentError: + logging.info('Skipping image') + counters['skipped'] += 1 + return {}, {} + global_step = tf.train.global_step(sess, tf.train.get_global_step()) + if batch_index < eval_config.num_visualizations: + tag = 'image-{}'.format(batch_index) + eval_util.visualize_detection_results( + result_dict, + tag, + global_step, + categories=categories, + summary_dir=eval_dir, + export_dir=eval_config.visualization_export_dir, + show_groundtruth=eval_config.visualize_groundtruth_boxes, + groundtruth_box_visualization_color=eval_config. + groundtruth_box_visualization_color, + min_score_thresh=eval_config.min_score_threshold, + max_num_predictions=eval_config.max_num_boxes_to_visualize, + skip_scores=eval_config.skip_scores, + skip_labels=eval_config.skip_labels, + keep_image_id_for_visualization_export=eval_config. + keep_image_id_for_visualization_export) + return result_dict, result_losses_dict + + if graph_hook_fn: graph_hook_fn() + + variables_to_restore = tf.global_variables() + global_step = tf.train.get_or_create_global_step() + variables_to_restore.append(global_step) + + if eval_config.use_moving_averages: + variable_averages = tf.train.ExponentialMovingAverage(0.0) + variables_to_restore = variable_averages.variables_to_restore() + saver = tf.train.Saver(variables_to_restore) + + def _restore_latest_checkpoint(sess): + latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) + saver.restore(sess, latest_checkpoint) + + if not evaluator_list: + evaluator_list = get_evaluators(eval_config, categories) + + metrics = eval_util.repeated_checkpoint_run( + tensor_dict=tensor_dict, + summary_dir=eval_dir, + evaluators=evaluator_list, + batch_processor=_process_batch, + checkpoint_dirs=[checkpoint_dir], + variables_to_restore=None, + restore_fn=_restore_latest_checkpoint, + num_batches=eval_config.num_examples, + eval_interval_secs=eval_config.eval_interval_secs, + max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else + eval_config.max_evals + if eval_config.max_evals else None), + master=eval_config.eval_master, + save_graph=eval_config.save_graph, + save_graph_dir=(eval_dir if eval_config.save_graph else ''), + losses_dict=losses_dict, + eval_export_path=eval_config.export_path) + + return metrics diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/train.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/train.py new file mode 100644 index 0000000000000000000000000000000000000000..615773760a3988dc2d7f5177d1d37c0ec8df7e17 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/train.py @@ -0,0 +1,186 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Training executable for detection models. + +This executable is used to train DetectionModels. There are two ways of +configuring the training job: + +1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file +can be specified by --pipeline_config_path. + +Example usage: + ./train \ + --logtostderr \ + --train_dir=path/to/train_dir \ + --pipeline_config_path=pipeline_config.pbtxt + +2) Three configuration files can be provided: a model_pb2.DetectionModel +configuration file to define what type of DetectionModel is being trained, an +input_reader_pb2.InputReader file to specify what training data will be used and +a train_pb2.TrainConfig file to configure training parameters. + +Example usage: + ./train \ + --logtostderr \ + --train_dir=path/to/train_dir \ + --model_config_path=model_config.pbtxt \ + --train_config_path=train_config.pbtxt \ + --input_config_path=train_input_config.pbtxt +""" + +import functools +import json +import os +import tensorflow.compat.v1 as tf +from tensorflow.python.util.deprecation import deprecated + + +from object_detection.builders import dataset_builder +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.legacy import trainer +from object_detection.utils import config_util + +tf.logging.set_verbosity(tf.logging.INFO) + +flags = tf.app.flags +flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') +flags.DEFINE_integer('task', 0, 'task id') +flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.') +flags.DEFINE_boolean('clone_on_cpu', False, + 'Force clones to be deployed on CPU. Note that even if ' + 'set to False (allowing ops to run on gpu), some ops may ' + 'still be run on the CPU if they have no GPU kernel.') +flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer ' + 'replicas.') +flags.DEFINE_integer('ps_tasks', 0, + 'Number of parameter server tasks. If None, does not use ' + 'a parameter server.') +flags.DEFINE_string('train_dir', '', + 'Directory to save the checkpoints and training summaries.') + +flags.DEFINE_string('pipeline_config_path', '', + 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' + 'file. If provided, other configs are ignored') + +flags.DEFINE_string('train_config_path', '', + 'Path to a train_pb2.TrainConfig config file.') +flags.DEFINE_string('input_config_path', '', + 'Path to an input_reader_pb2.InputReader config file.') +flags.DEFINE_string('model_config_path', '', + 'Path to a model_pb2.DetectionModel config file.') + +FLAGS = flags.FLAGS + + +@deprecated(None, 'Use object_detection/model_main.py.') +def main(_): + assert FLAGS.train_dir, '`train_dir` is missing.' + if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir) + if FLAGS.pipeline_config_path: + configs = config_util.get_configs_from_pipeline_file( + FLAGS.pipeline_config_path) + if FLAGS.task == 0: + tf.gfile.Copy(FLAGS.pipeline_config_path, + os.path.join(FLAGS.train_dir, 'pipeline.config'), + overwrite=True) + else: + configs = config_util.get_configs_from_multiple_files( + model_config_path=FLAGS.model_config_path, + train_config_path=FLAGS.train_config_path, + train_input_config_path=FLAGS.input_config_path) + if FLAGS.task == 0: + for name, config in [('model.config', FLAGS.model_config_path), + ('train.config', FLAGS.train_config_path), + ('input.config', FLAGS.input_config_path)]: + tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name), + overwrite=True) + + model_config = configs['model'] + train_config = configs['train_config'] + input_config = configs['train_input_config'] + + model_fn = functools.partial( + model_builder.build, + model_config=model_config, + is_training=True) + + def get_next(config): + return dataset_builder.make_initializable_iterator( + dataset_builder.build(config)).get_next() + + create_input_dict_fn = functools.partial(get_next, input_config) + + env = json.loads(os.environ.get('TF_CONFIG', '{}')) + cluster_data = env.get('cluster', None) + cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None + task_data = env.get('task', None) or {'type': 'master', 'index': 0} + task_info = type('TaskSpec', (object,), task_data) + + # Parameters for a single worker. + ps_tasks = 0 + worker_replicas = 1 + worker_job_name = 'lonely_worker' + task = 0 + is_chief = True + master = '' + + if cluster_data and 'worker' in cluster_data: + # Number of total worker replicas include "worker"s and the "master". + worker_replicas = len(cluster_data['worker']) + 1 + if cluster_data and 'ps' in cluster_data: + ps_tasks = len(cluster_data['ps']) + + if worker_replicas > 1 and ps_tasks < 1: + raise ValueError('At least 1 ps task is needed for distributed training.') + + if worker_replicas >= 1 and ps_tasks > 0: + # Set up distributed training. + server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc', + job_name=task_info.type, + task_index=task_info.index) + if task_info.type == 'ps': + server.join() + return + + worker_job_name = '%s/task:%d' % (task_info.type, task_info.index) + task = task_info.index + is_chief = (task_info.type == 'master') + master = server.target + + graph_rewriter_fn = None + if 'graph_rewriter_config' in configs: + graph_rewriter_fn = graph_rewriter_builder.build( + configs['graph_rewriter_config'], is_training=True) + + trainer.train( + create_input_dict_fn, + model_fn, + train_config, + master, + task, + FLAGS.num_clones, + worker_replicas, + FLAGS.clone_on_cpu, + ps_tasks, + worker_job_name, + is_chief, + FLAGS.train_dir, + graph_hook_fn=graph_rewriter_fn) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..21f8973d78ce43b3714480e928c6a2f9008ba623 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer.py @@ -0,0 +1,415 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Detection model trainer. + +This file provides a generic training method that can be used to train a +DetectionModel. +""" + +import functools + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.builders import optimizer_builder +from object_detection.builders import preprocessor_builder +from object_detection.core import batcher +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.utils import ops as util_ops +from object_detection.utils import variables_helper +from deployment import model_deploy + + +def create_input_queue(batch_size_per_clone, create_tensor_dict_fn, + batch_queue_capacity, num_batch_queue_threads, + prefetch_queue_capacity, data_augmentation_options): + """Sets up reader, prefetcher and returns input queue. + + Args: + batch_size_per_clone: batch size to use per clone. + create_tensor_dict_fn: function to create tensor dictionary. + batch_queue_capacity: maximum number of elements to store within a queue. + num_batch_queue_threads: number of threads to use for batching. + prefetch_queue_capacity: maximum capacity of the queue used to prefetch + assembled batches. + data_augmentation_options: a list of tuples, where each tuple contains a + data augmentation function and a dictionary containing arguments and their + values (see preprocessor.py). + + Returns: + input queue: a batcher.BatchQueue object holding enqueued tensor_dicts + (which hold images, boxes and targets). To get a batch of tensor_dicts, + call input_queue.Dequeue(). + """ + tensor_dict = create_tensor_dict_fn() + + tensor_dict[fields.InputDataFields.image] = tf.expand_dims( + tensor_dict[fields.InputDataFields.image], 0) + + images = tensor_dict[fields.InputDataFields.image] + float_images = tf.cast(images, dtype=tf.float32) + tensor_dict[fields.InputDataFields.image] = float_images + + include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks + in tensor_dict) + include_keypoints = (fields.InputDataFields.groundtruth_keypoints + in tensor_dict) + include_multiclass_scores = (fields.InputDataFields.multiclass_scores + in tensor_dict) + if data_augmentation_options: + tensor_dict = preprocessor.preprocess( + tensor_dict, data_augmentation_options, + func_arg_map=preprocessor.get_default_func_arg_map( + include_label_weights=True, + include_multiclass_scores=include_multiclass_scores, + include_instance_masks=include_instance_masks, + include_keypoints=include_keypoints)) + + input_queue = batcher.BatchQueue( + tensor_dict, + batch_size=batch_size_per_clone, + batch_queue_capacity=batch_queue_capacity, + num_batch_queue_threads=num_batch_queue_threads, + prefetch_queue_capacity=prefetch_queue_capacity) + return input_queue + + +def get_inputs(input_queue, + num_classes, + merge_multiple_label_boxes=False, + use_multiclass_scores=False): + """Dequeues batch and constructs inputs to object detection model. + + Args: + input_queue: BatchQueue object holding enqueued tensor_dicts. + num_classes: Number of classes. + merge_multiple_label_boxes: Whether to merge boxes with multiple labels + or not. Defaults to false. Merged boxes are represented with a single + box and a k-hot encoding of the multiple labels associated with the + boxes. + use_multiclass_scores: Whether to use multiclass scores instead of + groundtruth_classes. + + Returns: + images: a list of 3-D float tensor of images. + image_keys: a list of string keys for the images. + locations_list: a list of tensors of shape [num_boxes, 4] + containing the corners of the groundtruth boxes. + classes_list: a list of padded one-hot (or K-hot) float32 tensors containing + target classes. + masks_list: a list of 3-D float tensors of shape [num_boxes, image_height, + image_width] containing instance masks for objects if present in the + input_queue. Else returns None. + keypoints_list: a list of 3-D float tensors of shape [num_boxes, + num_keypoints, 2] containing keypoints for objects if present in the + input queue. Else returns None. + weights_lists: a list of 1-D float32 tensors of shape [num_boxes] + containing groundtruth weight for each box. + """ + read_data_list = input_queue.dequeue() + label_id_offset = 1 + def extract_images_and_targets(read_data): + """Extract images and targets from the input dict.""" + image = read_data[fields.InputDataFields.image] + key = '' + if fields.InputDataFields.source_id in read_data: + key = read_data[fields.InputDataFields.source_id] + location_gt = read_data[fields.InputDataFields.groundtruth_boxes] + classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], + tf.int32) + classes_gt -= label_id_offset + + if merge_multiple_label_boxes and use_multiclass_scores: + raise ValueError( + 'Using both merge_multiple_label_boxes and use_multiclass_scores is' + 'not supported' + ) + + if merge_multiple_label_boxes: + location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( + location_gt, classes_gt, num_classes) + classes_gt = tf.cast(classes_gt, tf.float32) + elif use_multiclass_scores: + classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores], + tf.float32) + else: + classes_gt = util_ops.padded_one_hot_encoding( + indices=classes_gt, depth=num_classes, left_pad=0) + masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks) + keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints) + if (merge_multiple_label_boxes and ( + masks_gt is not None or keypoints_gt is not None)): + raise NotImplementedError('Multi-label support is only for boxes.') + weights_gt = read_data.get( + fields.InputDataFields.groundtruth_weights) + return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt, + weights_gt) + + return zip(*map(extract_images_and_targets, read_data_list)) + + +def _create_losses(input_queue, create_model_fn, train_config): + """Creates loss function for a DetectionModel. + + Args: + input_queue: BatchQueue object holding enqueued tensor_dicts. + create_model_fn: A function to create the DetectionModel. + train_config: a train_pb2.TrainConfig protobuf. + """ + detection_model = create_model_fn() + (images, _, groundtruth_boxes_list, groundtruth_classes_list, + groundtruth_masks_list, groundtruth_keypoints_list, + groundtruth_weights_list) = get_inputs( + input_queue, + detection_model.num_classes, + train_config.merge_multiple_label_boxes, + train_config.use_multiclass_scores) + + preprocessed_images = [] + true_image_shapes = [] + for image in images: + resized_image, true_image_shape = detection_model.preprocess(image) + preprocessed_images.append(resized_image) + true_image_shapes.append(true_image_shape) + + images = tf.concat(preprocessed_images, 0) + true_image_shapes = tf.concat(true_image_shapes, 0) + + if any(mask is None for mask in groundtruth_masks_list): + groundtruth_masks_list = None + if any(keypoints is None for keypoints in groundtruth_keypoints_list): + groundtruth_keypoints_list = None + + detection_model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list, + groundtruth_keypoints_list, + groundtruth_weights_list=groundtruth_weights_list) + prediction_dict = detection_model.predict(images, true_image_shapes) + + losses_dict = detection_model.loss(prediction_dict, true_image_shapes) + for loss_tensor in losses_dict.values(): + tf.losses.add_loss(loss_tensor) + + +def train(create_tensor_dict_fn, + create_model_fn, + train_config, + master, + task, + num_clones, + worker_replicas, + clone_on_cpu, + ps_tasks, + worker_job_name, + is_chief, + train_dir, + graph_hook_fn=None): + """Training function for detection models. + + Args: + create_tensor_dict_fn: a function to create a tensor input dictionary. + create_model_fn: a function that creates a DetectionModel and generates + losses. + train_config: a train_pb2.TrainConfig protobuf. + master: BNS name of the TensorFlow master to use. + task: The task id of this training instance. + num_clones: The number of clones to run per machine. + worker_replicas: The number of work replicas to train with. + clone_on_cpu: True if clones should be forced to run on CPU. + ps_tasks: Number of parameter server tasks. + worker_job_name: Name of the worker job. + is_chief: Whether this replica is the chief replica. + train_dir: Directory to write checkpoints and training summaries to. + graph_hook_fn: Optional function that is called after the inference graph is + built (before optimization). This is helpful to perform additional changes + to the training graph such as adding FakeQuant ops. The function should + modify the default graph. + + Raises: + ValueError: If both num_clones > 1 and train_config.sync_replicas is true. + """ + + detection_model = create_model_fn() + data_augmentation_options = [ + preprocessor_builder.build(step) + for step in train_config.data_augmentation_options] + + with tf.Graph().as_default(): + # Build a configuration specifying multi-GPU and multi-replicas. + deploy_config = model_deploy.DeploymentConfig( + num_clones=num_clones, + clone_on_cpu=clone_on_cpu, + replica_id=task, + num_replicas=worker_replicas, + num_ps_tasks=ps_tasks, + worker_job_name=worker_job_name) + + # Place the global step on the device storing the variables. + with tf.device(deploy_config.variables_device()): + global_step = slim.create_global_step() + + if num_clones != 1 and train_config.sync_replicas: + raise ValueError('In Synchronous SGD mode num_clones must ', + 'be 1. Found num_clones: {}'.format(num_clones)) + batch_size = train_config.batch_size // num_clones + if train_config.sync_replicas: + batch_size //= train_config.replicas_to_aggregate + + with tf.device(deploy_config.inputs_device()): + input_queue = create_input_queue( + batch_size, create_tensor_dict_fn, + train_config.batch_queue_capacity, + train_config.num_batch_queue_threads, + train_config.prefetch_queue_capacity, data_augmentation_options) + + # Gather initial summaries. + # TODO(rathodv): See if summaries can be added/extracted from global tf + # collections so that they don't have to be passed around. + summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) + global_summaries = set([]) + + model_fn = functools.partial(_create_losses, + create_model_fn=create_model_fn, + train_config=train_config) + clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue]) + first_clone_scope = clones[0].scope + + if graph_hook_fn: + with tf.device(deploy_config.variables_device()): + graph_hook_fn() + + # Gather update_ops from the first clone. These contain, for example, + # the updates for the batch_norm variables created by model_fn. + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) + + with tf.device(deploy_config.optimizer_device()): + training_optimizer, optimizer_summary_vars = optimizer_builder.build( + train_config.optimizer) + for var in optimizer_summary_vars: + tf.summary.scalar(var.op.name, var, family='LearningRate') + + sync_optimizer = None + if train_config.sync_replicas: + training_optimizer = tf.train.SyncReplicasOptimizer( + training_optimizer, + replicas_to_aggregate=train_config.replicas_to_aggregate, + total_num_replicas=worker_replicas) + sync_optimizer = training_optimizer + + with tf.device(deploy_config.optimizer_device()): + regularization_losses = (None if train_config.add_regularization_loss + else []) + total_loss, grads_and_vars = model_deploy.optimize_clones( + clones, training_optimizer, + regularization_losses=regularization_losses) + total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.') + + # Optionally multiply bias gradients by train_config.bias_grad_multiplier. + if train_config.bias_grad_multiplier: + biases_regex_list = ['.*/biases'] + grads_and_vars = variables_helper.multiply_gradients_matching_regex( + grads_and_vars, + biases_regex_list, + multiplier=train_config.bias_grad_multiplier) + + # Optionally freeze some layers by setting their gradients to be zero. + if train_config.freeze_variables: + grads_and_vars = variables_helper.freeze_gradients_matching_regex( + grads_and_vars, train_config.freeze_variables) + + # Optionally clip gradients + if train_config.gradient_clipping_by_norm > 0: + with tf.name_scope('clip_grads'): + grads_and_vars = slim.learning.clip_gradient_norms( + grads_and_vars, train_config.gradient_clipping_by_norm) + + # Create gradient updates. + grad_updates = training_optimizer.apply_gradients(grads_and_vars, + global_step=global_step) + update_ops.append(grad_updates) + update_op = tf.group(*update_ops, name='update_barrier') + with tf.control_dependencies([update_op]): + train_tensor = tf.identity(total_loss, name='train_op') + + # Add summaries. + for model_var in slim.get_model_variables(): + global_summaries.add(tf.summary.histogram('ModelVars/' + + model_var.op.name, model_var)) + for loss_tensor in tf.losses.get_losses(): + global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name, + loss_tensor)) + global_summaries.add( + tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss())) + + # Add the summaries from the first clone. These contain the summaries + # created by model_fn and either optimize_clones() or _gather_clone_loss(). + summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, + first_clone_scope)) + summaries |= global_summaries + + # Merge all summaries together. + summary_op = tf.summary.merge(list(summaries), name='summary_op') + + # Soft placement allows placing on CPU ops without GPU implementation. + session_config = tf.ConfigProto(allow_soft_placement=True, + log_device_placement=False) + + # Save checkpoints regularly. + keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours + saver = tf.train.Saver( + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) + + # Create ops required to initialize the model from a given checkpoint. + init_fn = None + if train_config.fine_tune_checkpoint: + if not train_config.fine_tune_checkpoint_type: + # train_config.from_detection_checkpoint field is deprecated. For + # backward compatibility, fine_tune_checkpoint_type is set based on + # from_detection_checkpoint. + if train_config.from_detection_checkpoint: + train_config.fine_tune_checkpoint_type = 'detection' + else: + train_config.fine_tune_checkpoint_type = 'classification' + var_map = detection_model.restore_map( + fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, + load_all_detection_checkpoint_vars=( + train_config.load_all_detection_checkpoint_vars)) + available_var_map = (variables_helper. + get_variables_available_in_checkpoint( + var_map, train_config.fine_tune_checkpoint, + include_global_step=False)) + init_saver = tf.train.Saver(available_var_map) + def initializer_fn(sess): + init_saver.restore(sess, train_config.fine_tune_checkpoint) + init_fn = initializer_fn + + slim.learning.train( + train_tensor, + logdir=train_dir, + master=master, + is_chief=is_chief, + session_config=session_config, + startup_delay_steps=train_config.startup_delay_steps, + init_fn=init_fn, + summary_op=summary_op, + number_of_steps=( + train_config.num_steps if train_config.num_steps else None), + save_summaries_secs=120, + sync_optimizer=sync_optimizer, + saver=saver) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer.pyc new file mode 100644 index 0000000000000000000000000000000000000000..399af446c989f08730328d97f62d118611a0d515 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0cde654e6a8bba2cfedea939e67d44698f882e04 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/legacy/trainer_tf1_test.py @@ -0,0 +1,295 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.trainer.""" +import unittest +import tensorflow.compat.v1 as tf +import tf_slim as slim +from google.protobuf import text_format + +from object_detection.core import losses +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.legacy import trainer +from object_detection.protos import train_pb2 +from object_detection.utils import tf_version + + +NUMBER_OF_CLASSES = 2 + + +def get_input_function(): + """A function to get test inputs. Returns an image with one box.""" + image = tf.random_uniform([32, 32, 3], dtype=tf.float32) + key = tf.constant('image_000000') + class_label = tf.random_uniform( + [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32) + box_label = tf.random_uniform( + [1, 4], minval=0.4, maxval=0.6, dtype=tf.float32) + multiclass_scores = tf.random_uniform( + [1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32) + + return { + fields.InputDataFields.image: image, + fields.InputDataFields.key: key, + fields.InputDataFields.groundtruth_classes: class_label, + fields.InputDataFields.groundtruth_boxes: box_label, + fields.InputDataFields.multiclass_scores: multiclass_scores + } + + +class FakeDetectionModel(model.DetectionModel): + """A simple (and poor) DetectionModel for use in test.""" + + def __init__(self): + super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES) + self._classification_loss = losses.WeightedSigmoidClassificationLoss() + self._localization_loss = losses.WeightedSmoothL1LocalizationLoss() + + def preprocess(self, inputs): + """Input preprocessing, resizes images to 28x28. + + Args: + inputs: a [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + true_image_shapes = [inputs.shape[:-1].as_list() + for _ in range(inputs.shape[-1])] + return tf.image.resize_images(inputs, [28, 28]), true_image_shapes + + def predict(self, preprocessed_inputs, true_image_shapes): + """Prediction tensors from inputs tensor. + + Args: + preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + prediction_dict: a dictionary holding prediction tensors to be + passed to the Loss or Postprocess functions. + """ + flattened_inputs = slim.flatten(preprocessed_inputs) + class_prediction = slim.fully_connected(flattened_inputs, self._num_classes) + box_prediction = slim.fully_connected(flattened_inputs, 4) + + return { + 'class_predictions_with_background': tf.reshape( + class_prediction, [-1, 1, self._num_classes]), + 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) + } + + def postprocess(self, prediction_dict, true_image_shapes, **params): + """Convert predicted output tensors to final detections. Unused. + + Args: + prediction_dict: a dictionary holding prediction tensors. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **params: Additional keyword arguments for specific implementations of + DetectionModel. + + Returns: + detections: a dictionary with empty fields. + """ + return { + 'detection_boxes': None, + 'detection_scores': None, + 'detection_classes': None, + 'num_detections': None + } + + def loss(self, prediction_dict, true_image_shapes): + """Compute scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding predicted tensors + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + a dictionary mapping strings (loss names) to scalar tensors representing + loss values. + """ + batch_reg_targets = tf.stack( + self.groundtruth_lists(fields.BoxListFields.boxes)) + batch_cls_targets = tf.stack( + self.groundtruth_lists(fields.BoxListFields.classes)) + weights = tf.constant( + 1.0, dtype=tf.float32, + shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1]) + + location_losses = self._localization_loss( + prediction_dict['box_encodings'], batch_reg_targets, + weights=weights) + cls_losses = self._classification_loss( + prediction_dict['class_predictions_with_background'], batch_cls_targets, + weights=weights) + + loss_dict = { + 'localization_loss': tf.reduce_sum(location_losses), + 'classification_loss': tf.reduce_sum(cls_losses), + } + return loss_dict + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + pass + + def restore_map(self, fine_tune_checkpoint_type='detection'): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + + Returns: + A dict mapping variable names to variables. + """ + return {var.op.name: var for var in tf.global_variables()} + + def restore_from_objects(self, fine_tune_checkpoint_type): + pass + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class TrainerTest(tf.test.TestCase): + + def test_configure_trainer_and_train_two_steps(self): + train_config_text_proto = """ + optimizer { + adam_optimizer { + learning_rate { + constant_learning_rate { + learning_rate: 0.01 + } + } + } + } + data_augmentation_options { + random_adjust_brightness { + max_delta: 0.2 + } + } + data_augmentation_options { + random_adjust_contrast { + min_delta: 0.7 + max_delta: 1.1 + } + } + num_steps: 2 + """ + train_config = train_pb2.TrainConfig() + text_format.Merge(train_config_text_proto, train_config) + + train_dir = self.get_temp_dir() + + trainer.train( + create_tensor_dict_fn=get_input_function, + create_model_fn=FakeDetectionModel, + train_config=train_config, + master='', + task=0, + num_clones=1, + worker_replicas=1, + clone_on_cpu=True, + ps_tasks=0, + worker_job_name='worker', + is_chief=True, + train_dir=train_dir) + + def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self): + train_config_text_proto = """ + optimizer { + adam_optimizer { + learning_rate { + constant_learning_rate { + learning_rate: 0.01 + } + } + } + } + data_augmentation_options { + random_adjust_brightness { + max_delta: 0.2 + } + } + data_augmentation_options { + random_adjust_contrast { + min_delta: 0.7 + max_delta: 1.1 + } + } + num_steps: 2 + use_multiclass_scores: true + """ + train_config = train_pb2.TrainConfig() + text_format.Merge(train_config_text_proto, train_config) + + train_dir = self.get_temp_dir() + + trainer.train(create_tensor_dict_fn=get_input_function, + create_model_fn=FakeDetectionModel, + train_config=train_config, + master='', + task=0, + num_clones=1, + worker_replicas=1, + clone_on_cpu=True, + ps_tasks=0, + worker_job_name='worker', + is_chief=True, + train_dir=train_dir) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a86cfe50e8d0f68a8261098ecdb90b13dac03c84 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b01b1366293640b82ee5d67751935b013bacd1c7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/argmax_matcher.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/argmax_matcher.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7dbefb91860f2f2084f05009a77dd8daf41b702 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/argmax_matcher.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/bipartite_matcher.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/bipartite_matcher.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..873d0fb819d2443982bdf368b8e1f6c030ec7651 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/bipartite_matcher.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/hungarian_matcher.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/hungarian_matcher.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10fac5991f26a8eafde473b26fec816934e7a070 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/__pycache__/hungarian_matcher.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..a347decbd3ccc1c68e9285f34e24a9b0610d83e1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher.py @@ -0,0 +1,208 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Argmax matcher implementation. + +This class takes a similarity matrix and matches columns to rows based on the +maximum value per column. One can specify matched_thresholds and +to prevent columns from matching to rows (generally resulting in a negative +training example) and unmatched_theshold to ignore the match (generally +resulting in neither a positive or negative training example). + +This matcher is used in Fast(er)-RCNN. + +Note: matchers are used in TargetAssigners. There is a create_target_assigner +factory function for popular implementations. +""" +import tensorflow.compat.v1 as tf + +from object_detection.core import matcher +from object_detection.utils import shape_utils + + +class ArgMaxMatcher(matcher.Matcher): + """Matcher based on highest value. + + This class computes matches from a similarity matrix. Each column is matched + to a single row. + + To support object detection target assignment this class enables setting both + matched_threshold (upper threshold) and unmatched_threshold (lower thresholds) + defining three categories of similarity which define whether examples are + positive, negative, or ignored: + (1) similarity >= matched_threshold: Highest similarity. Matched/Positive! + (2) matched_threshold > similarity >= unmatched_threshold: Medium similarity. + Depending on negatives_lower_than_unmatched, this is either + Unmatched/Negative OR Ignore. + (3) unmatched_threshold > similarity: Lowest similarity. Depending on flag + negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore. + For ignored matches this class sets the values in the Match object to -2. + """ + + def __init__(self, + matched_threshold, + unmatched_threshold=None, + negatives_lower_than_unmatched=True, + force_match_for_each_row=False, + use_matmul_gather=False): + """Construct ArgMaxMatcher. + + Args: + matched_threshold: Threshold for positive matches. Positive if + sim >= matched_threshold, where sim is the maximum value of the + similarity matrix for a given column. Set to None for no threshold. + unmatched_threshold: Threshold for negative matches. Negative if + sim < unmatched_threshold. Defaults to matched_threshold + when set to None. + negatives_lower_than_unmatched: Boolean which defaults to True. If True + then negative matches are the ones below the unmatched_threshold, + whereas ignored matches are in between the matched and umatched + threshold. If False, then negative matches are in between the matched + and unmatched threshold, and everything lower than unmatched is ignored. + force_match_for_each_row: If True, ensures that each row is matched to + at least one column (which is not guaranteed otherwise if the + matched_threshold is high). Defaults to False. See + argmax_matcher_test.testMatcherForceMatch() for an example. + use_matmul_gather: Force constructed match objects to use matrix + multiplication based gather instead of standard tf.gather. + (Default: False). + + Raises: + ValueError: if unmatched_threshold is set but matched_threshold is not set + or if unmatched_threshold > matched_threshold. + """ + super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather) + if (matched_threshold is None) and (unmatched_threshold is not None): + raise ValueError('Need to also define matched_threshold when' + 'unmatched_threshold is defined') + self._matched_threshold = matched_threshold + if unmatched_threshold is None: + self._unmatched_threshold = matched_threshold + else: + if unmatched_threshold > matched_threshold: + raise ValueError('unmatched_threshold needs to be smaller or equal' + 'to matched_threshold') + self._unmatched_threshold = unmatched_threshold + if not negatives_lower_than_unmatched: + if self._unmatched_threshold == self._matched_threshold: + raise ValueError('When negatives are in between matched and ' + 'unmatched thresholds, these cannot be of equal ' + 'value. matched: {}, unmatched: {}'.format( + self._matched_threshold, + self._unmatched_threshold)) + self._force_match_for_each_row = force_match_for_each_row + self._negatives_lower_than_unmatched = negatives_lower_than_unmatched + + def _match(self, similarity_matrix, valid_rows): + """Tries to match each column of the similarity matrix to a row. + + Args: + similarity_matrix: tensor of shape [N, M] representing any similarity + metric. + valid_rows: a boolean tensor of shape [N] indicating valid rows. + + Returns: + Match object with corresponding matches for each of M columns. + """ + + def _match_when_rows_are_empty(): + """Performs matching when the rows of similarity matrix are empty. + + When the rows are empty, all detections are false positives. So we return + a tensor of -1's to indicate that the columns do not match to any rows. + + Returns: + matches: int32 tensor indicating the row each column matches to. + """ + similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( + similarity_matrix) + return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32) + + def _match_when_rows_are_non_empty(): + """Performs matching when the rows of similarity matrix are non empty. + + Returns: + matches: int32 tensor indicating the row each column matches to. + """ + # Matches for each column + matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32) + + # Deal with matched and unmatched threshold + if self._matched_threshold is not None: + # Get logical indices of ignored and unmatched columns as tf.int64 + matched_vals = tf.reduce_max(similarity_matrix, 0) + below_unmatched_threshold = tf.greater(self._unmatched_threshold, + matched_vals) + between_thresholds = tf.logical_and( + tf.greater_equal(matched_vals, self._unmatched_threshold), + tf.greater(self._matched_threshold, matched_vals)) + + if self._negatives_lower_than_unmatched: + matches = self._set_values_using_indicator(matches, + below_unmatched_threshold, + -1) + matches = self._set_values_using_indicator(matches, + between_thresholds, + -2) + else: + matches = self._set_values_using_indicator(matches, + below_unmatched_threshold, + -2) + matches = self._set_values_using_indicator(matches, + between_thresholds, + -1) + + if self._force_match_for_each_row: + similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( + similarity_matrix) + force_match_column_ids = tf.argmax(similarity_matrix, 1, + output_type=tf.int32) + force_match_column_indicators = ( + tf.one_hot( + force_match_column_ids, depth=similarity_matrix_shape[1]) * + tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32)) + force_match_row_ids = tf.argmax(force_match_column_indicators, 0, + output_type=tf.int32) + force_match_column_mask = tf.cast( + tf.reduce_max(force_match_column_indicators, 0), tf.bool) + final_matches = tf.where(force_match_column_mask, + force_match_row_ids, matches) + return final_matches + else: + return matches + + if similarity_matrix.shape.is_fully_defined(): + if shape_utils.get_dim_as_int(similarity_matrix.shape[0]) == 0: + return _match_when_rows_are_empty() + else: + return _match_when_rows_are_non_empty() + else: + return tf.cond( + tf.greater(tf.shape(similarity_matrix)[0], 0), + _match_when_rows_are_non_empty, _match_when_rows_are_empty) + + def _set_values_using_indicator(self, x, indicator, val): + """Set the indicated fields of x to val. + + Args: + x: tensor. + indicator: boolean with same shape as x. + val: scalar with value to set. + + Returns: + modified tensor. + """ + indicator = tf.cast(indicator, x.dtype) + return tf.add(tf.multiply(x, 1 - indicator), val * indicator) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4d9422b6e83abd90de8598032f6f81260b84e40 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9305f0a86c893c5265c6a204367c98e91b6e8819 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/argmax_matcher_test.py @@ -0,0 +1,235 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.matchers.argmax_matcher.""" + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.matchers import argmax_matcher +from object_detection.utils import test_case + + +class ArgMaxMatcherTest(test_case.TestCase): + + def test_return_correct_matches_with_default_thresholds(self): + + def graph_fn(similarity_matrix): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) + match = matcher.match(similarity_matrix) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1., 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_rows = np.array([2, 0, 1, 0, 1]) + (res_matched_cols, res_unmatched_cols, + res_match_results) = self.execute(graph_fn, [similarity]) + + self.assertAllEqual(res_match_results[res_matched_cols], + expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4]) + self.assertFalse(np.all(res_unmatched_cols)) + + def test_return_correct_matches_with_empty_rows(self): + + def graph_fn(similarity_matrix): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) + match = matcher.match(similarity_matrix) + return match.unmatched_column_indicator() + similarity = 0.2 * np.ones([0, 5], dtype=np.float32) + res_unmatched_cols = self.execute(graph_fn, [similarity]) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], np.arange(5)) + + def test_return_correct_matches_with_matched_threshold(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3, 4]) + expected_matched_rows = np.array([2, 0, 1]) + expected_unmatched_cols = np.array([1, 2]) + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_with_matched_and_unmatched_threshold(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2.) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3, 4]) + expected_matched_rows = np.array([2, 0, 1]) + expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_negatives_lower_than_unmatched_false(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher( + matched_threshold=3., + unmatched_threshold=2., + negatives_lower_than_unmatched=False) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [2, -1, 2, 0, 4], + [3, 0, -1, 0, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3, 4]) + expected_matched_rows = np.array([2, 0, 1]) + expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_unmatched_row_not_using_force_match(self): + + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2.) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [-1, 0, -2, -2, -1], + [3, 0, -1, 2, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 3]) + expected_matched_rows = np.array([2, 0]) + expected_unmatched_cols = np.array([1, 2, 4]) + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_unmatched_row_while_using_force_match(self): + def graph_fn(similarity): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2., + force_match_for_each_row=True) + match = matcher.match(similarity) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [-1, 0, -2, -2, -1], + [3, 0, -1, 2, 0]], dtype=np.float32) + expected_matched_cols = np.array([0, 1, 3]) + expected_matched_rows = np.array([2, 1, 0]) + expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_return_correct_matches_using_force_match_padded_groundtruth(self): + def graph_fn(similarity, valid_rows): + matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3., + unmatched_threshold=2., + force_match_for_each_row=True) + match = matcher.match(similarity, valid_rows) + matched_cols = match.matched_column_indicator() + unmatched_cols = match.unmatched_column_indicator() + match_results = match.match_results + return (matched_cols, unmatched_cols, match_results) + + similarity = np.array([[1, 1, 1, 3, 1], + [-1, 0, -2, -2, -1], + [0, 0, 0, 0, 0], + [3, 0, -1, 2, 0], + [0, 0, 0, 0, 0]], dtype=np.float32) + valid_rows = np.array([True, True, False, True, False]) + expected_matched_cols = np.array([0, 1, 3]) + expected_matched_rows = np.array([3, 1, 0]) + expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val + + (res_matched_cols, res_unmatched_cols, + match_results) = self.execute(graph_fn, [similarity, valid_rows]) + self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows) + self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols) + self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], + expected_unmatched_cols) + + def test_valid_arguments_corner_case(self): + argmax_matcher.ArgMaxMatcher(matched_threshold=1, + unmatched_threshold=1) + + def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self): + with self.assertRaises(ValueError): + argmax_matcher.ArgMaxMatcher(matched_threshold=1, + unmatched_threshold=1, + negatives_lower_than_unmatched=False) + + def test_invalid_arguments_no_matched_threshold(self): + with self.assertRaises(ValueError): + argmax_matcher.ArgMaxMatcher(matched_threshold=None, + unmatched_threshold=4) + + def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self): + with self.assertRaises(ValueError): + argmax_matcher.ArgMaxMatcher(matched_threshold=1, + unmatched_threshold=2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..f62afe0975f76397e49d06c7c86d5ff76896860b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher.py @@ -0,0 +1,70 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bipartite matcher implementation.""" + +import tensorflow.compat.v1 as tf + +from tensorflow.contrib.image.python.ops import image_ops +from object_detection.core import matcher + + +class GreedyBipartiteMatcher(matcher.Matcher): + """Wraps a Tensorflow greedy bipartite matcher.""" + + def __init__(self, use_matmul_gather=False): + """Constructs a Matcher. + + Args: + use_matmul_gather: Force constructed match objects to use matrix + multiplication based gather instead of standard tf.gather. + (Default: False). + """ + super(GreedyBipartiteMatcher, self).__init__( + use_matmul_gather=use_matmul_gather) + + def _match(self, similarity_matrix, valid_rows): + """Bipartite matches a collection rows and columns. A greedy bi-partite. + + TODO(rathodv): Add num_valid_columns options to match only that many columns + with all the rows. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher values mean more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid. + + Returns: + match_results: int32 tensor of shape [M] with match_results[i]=-1 + meaning that column i is not matched and otherwise that it is matched to + row match_results[i]. + """ + valid_row_sim_matrix = tf.gather(similarity_matrix, + tf.squeeze(tf.where(valid_rows), axis=-1)) + invalid_row_sim_matrix = tf.gather( + similarity_matrix, + tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1)) + similarity_matrix = tf.concat( + [valid_row_sim_matrix, invalid_row_sim_matrix], axis=0) + # Convert similarity matrix to distance matrix as tf.image.bipartite tries + # to find minimum distance matches. + distance_matrix = -1 * similarity_matrix + num_valid_rows = tf.reduce_sum(tf.cast(valid_rows, dtype=tf.float32)) + _, match_results = image_ops.bipartite_match( + distance_matrix, num_valid_rows=num_valid_rows) + match_results = tf.reshape(match_results, [-1]) + match_results = tf.cast(match_results, tf.int32) + return match_results diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b029f0796cf46307df6ed6b43a2ca213925278e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..314546ad4ee507d3024746044d4d4a30bc92e85d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/bipartite_matcher_tf1_test.py @@ -0,0 +1,92 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.bipartite_matcher.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if tf_version.is_tf1(): + from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class GreedyBipartiteMatcherTest(test_case.TestCase): + + def test_get_expected_matches_when_all_rows_are_valid(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.ones([2], dtype=np.bool) + expected_match_results = [-1, 1, 0] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_expected_matches_with_all_rows_be_default(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + expected_match_results = [-1, 1, 0] + def graph_fn(similarity_matrix): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_no_matches_with_zero_valid_rows(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.zeros([2], dtype=np.bool) + expected_match_results = [-1, -1, -1] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.array([True, False], dtype=np.bool) + expected_match_results = [-1, -1, 0] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row_at_bottom(self): + similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]], + dtype=np.float32) + valid_rows = np.array([False, True], dtype=np.bool) + expected_match_results = [-1, -1, 0] + def graph_fn(similarity_matrix, valid_rows): + matcher = bipartite_matcher.GreedyBipartiteMatcher() + match = matcher.match(similarity_matrix, valid_rows=valid_rows) + return match._match_results + match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows]) + self.assertAllEqual(match_results_out, expected_match_results) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..63ee5d9f228a94406b1b3c1707eb493572749a91 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher.py @@ -0,0 +1,58 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Hungarian bipartite matcher implementation.""" + +import numpy as np +from scipy.optimize import linear_sum_assignment + +import tensorflow.compat.v1 as tf +from object_detection.core import matcher + + +class HungarianBipartiteMatcher(matcher.Matcher): + """Wraps a Hungarian bipartite matcher into TensorFlow.""" + + def _match(self, similarity_matrix, valid_rows): + """Optimally bipartite matches a collection rows and columns. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher values mean more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid. + + Returns: + match_results: int32 tensor of shape [M] with match_results[i]=-1 + meaning that column i is not matched and otherwise that it is matched to + row match_results[i]. + """ + valid_row_sim_matrix = tf.gather(similarity_matrix, + tf.squeeze(tf.where(valid_rows), axis=-1)) + distance_matrix = -1 * valid_row_sim_matrix + + def numpy_wrapper(inputs): + def numpy_matching(input_matrix): + row_indices, col_indices = linear_sum_assignment(input_matrix) + match_results = np.full(input_matrix.shape[1], -1) + match_results[col_indices] = row_indices + return match_results.astype(np.int32) + + return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32]) + + matching_result = tf.autograph.experimental.do_not_convert( + numpy_wrapper)([distance_matrix]) + + return tf.reshape(matching_result, [-1]) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c792a55cdeed3f22272462889dc6f50de6369cc Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bbac858a42db5ca53ab89b40d2fd95010d2d18fd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/matchers/hungarian_matcher_tf2_test.py @@ -0,0 +1,105 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.bipartite_matcher.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if tf_version.is_tf2(): + from object_detection.matchers import hungarian_matcher # pylint: disable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class HungarianBipartiteMatcherTest(test_case.TestCase): + + def test_get_expected_matches_when_all_rows_are_valid(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.ones([2], dtype=np.bool) + expected_match_results = [-1, 1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + + def test_get_expected_matches_with_all_rows_be_default(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + expected_match_results = [-1, 1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + + def test_get_no_matches_with_zero_valid_rows(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.zeros([2], dtype=np.bool) + expected_match_results = [-1, -1, -1] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.array([True, False], dtype=np.bool) + expected_match_results = [-1, -1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row_at_bottom(self): + similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]], + dtype=np.float32) + valid_rows = np.array([False, True], dtype=np.bool) + expected_match_results = [-1, -1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + + def test_get_expected_matches_with_two_valid_rows(self): + similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8], + [0.84, 0.32, 0.2]], + dtype=np.float32) + valid_rows = np.array([True, False, True], dtype=np.bool) + expected_match_results = [1, -1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab595ac87d5e91d28cbfdb8e60b830bc9722823e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..100fd28e7c3449f70739adedae0afdd41abd49cb Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/center_net_meta_arch.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/center_net_meta_arch.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5114775ed80458b7347845c63a84f7facfa290f0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/center_net_meta_arch.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_lib.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_lib.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9965626c2aad36e1bd2bdf35847630b561e0e32 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_lib.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_lib_tf2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_lib_tf2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61dde685a30e469b90b43265b9cc2c21bc0883e5 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_lib_tf2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_meta_arch.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_meta_arch.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..997139b92419f46119c28742165fb8324a6d757c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/context_rcnn_meta_arch.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/faster_rcnn_meta_arch.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/faster_rcnn_meta_arch.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f579f10bfca3056f1ab4b6c31925d2c97c4de7d8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/faster_rcnn_meta_arch.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/rfcn_meta_arch.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/rfcn_meta_arch.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..465128d373abd8bd9a8054f2ecbe4bb744da5814 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/rfcn_meta_arch.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/ssd_meta_arch.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/ssd_meta_arch.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73b8cf484e93f8e9fd9e0e59425c063bac018fa0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/__pycache__/ssd_meta_arch.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0d5a1149eb57d12a936eb1f966e26bb5ea14f7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch.py @@ -0,0 +1,3505 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The CenterNet meta architecture as described in the "Objects as Points" paper [1]. + +[1]: https://arxiv.org/abs/1904.07850 + +""" + +import abc +import collections +import functools +import numpy as np +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 + +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import keypoint_ops +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner as cn_assigner +from object_detection.utils import shape_utils + +# Number of channels needed to predict size and offsets. +NUM_OFFSET_CHANNELS = 2 +NUM_SIZE_CHANNELS = 2 + +# Error range for detecting peaks. +PEAK_EPSILON = 1e-6 + +# Constants shared between all keypoint tasks. +UNMATCHED_KEYPOINT_SCORE = 0.1 +KEYPOINT_CANDIDATE_SEARCH_SCALE = 0.3 + + +class CenterNetFeatureExtractor(tf.keras.Model): + """Base class for feature extractors for the CenterNet meta architecture. + + Child classes are expected to override the _output_model property which will + return 1 or more tensors predicted by the feature extractor. + + """ + __metaclass__ = abc.ABCMeta + + def __init__(self, name=None, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Initializes a CenterNet feature extractor. + + Args: + name: str, the name used for the underlying keras model. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. If None or empty, we use 0s. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + If None or empty, we use 1s. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + """ + super(CenterNetFeatureExtractor, self).__init__(name=name) + + if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test + channel_means = [0., 0., 0.] + + if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test + channel_stds = [1., 1., 1.] + + self._channel_means = channel_means + self._channel_stds = channel_stds + self._bgr_ordering = bgr_ordering + + def preprocess(self, inputs): + """Converts a batch of unscaled images to a scale suitable for the model. + + This method normalizes the image using the given `channel_means` and + `channels_stds` values at initialization time while optionally flipping + the channel order if `bgr_ordering` is set. + + Args: + inputs: a [batch, height, width, channels] float32 tensor + + Returns: + outputs: a [batch, height, width, channels] float32 tensor + + """ + + if self._bgr_ordering: + red, green, blue = tf.unstack(inputs, axis=3) + inputs = tf.stack([blue, green, red], axis=3) + + channel_means = tf.reshape(tf.constant(self._channel_means), + [1, 1, 1, -1]) + channel_stds = tf.reshape(tf.constant(self._channel_stds), + [1, 1, 1, -1]) + + return (inputs - channel_means)/channel_stds + + @property + @abc.abstractmethod + def out_stride(self): + """The stride in the output image of the network.""" + pass + + @property + @abc.abstractmethod + def num_feature_outputs(self): + """Ther number of feature outputs returned by the feature extractor.""" + pass + + @property + @abc.abstractmethod + def supported_sub_model_types(self): + """Valid sub model types supported by the get_sub_model function.""" + pass + + @abc.abstractmethod + def get_sub_model(self, sub_model_type): + """Returns the underlying keras model for the given sub_model_type. + + This function is useful when we only want to get a subset of weights to + be restored from a checkpoint. + + Args: + sub_model_type: string, the type of sub model. Currently, CenterNet + feature extractors support 'detection' and 'classification'. + """ + pass + + +def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256, + bias_fill=None, use_depthwise=False, name=None): + """Creates a network to predict the given number of output channels. + + This function is intended to make the prediction heads for the CenterNet + meta architecture. + + Args: + num_out_channels: Number of output channels. + kernel_size: The size of the conv kernel in the intermediate layer + num_filters: The number of filters in the intermediate conv layer. + bias_fill: If not None, is used to initialize the bias in the final conv + layer. + use_depthwise: If true, use SeparableConv2D to construct the Sequential + layers instead of Conv2D. + name: Optional name for the prediction net. + + Returns: + net: A keras module which when called on an input tensor of size + [batch_size, height, width, num_in_channels] returns an output + of size [batch_size, height, width, num_out_channels] + """ + if use_depthwise: + conv_fn = tf.keras.layers.SeparableConv2D + else: + conv_fn = tf.keras.layers.Conv2D + + out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1) + + if bias_fill is not None: + out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill) + + net = tf.keras.Sequential( + [conv_fn(num_filters, kernel_size=kernel_size, padding='same'), + tf.keras.layers.ReLU(), + out_conv], + name=name) + + return net + + +def _to_float32(x): + return tf.cast(x, tf.float32) + + +def _get_shape(tensor, num_dims): + assert len(tensor.shape.as_list()) == num_dims + return shape_utils.combined_static_and_dynamic_shape(tensor) + + +def _flatten_spatial_dimensions(batch_images): + batch_size, height, width, channels = _get_shape(batch_images, 4) + return tf.reshape(batch_images, [batch_size, height * width, + channels]) + + +def _multi_range(limit, + value_repetitions=1, + range_repetitions=1, + dtype=tf.int32): + """Creates a sequence with optional value duplication and range repetition. + + As an example (see the Args section for more details), + _multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns: + + [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1] + + Args: + limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive. + value_repetitions: Integer. The number of times a value in the sequence is + repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..]. + range_repetitions: Integer. The number of times the range is repeated. With + range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..]. + dtype: The type of the elements of the resulting tensor. + + Returns: + A 1-D tensor of type `dtype` and size + [`limit` * `value_repetitions` * `range_repetitions`] that contains the + specified range with given repetitions. + """ + return tf.reshape( + tf.tile( + tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1), + multiples=[range_repetitions, value_repetitions]), [-1]) + + +def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100, + per_channel=False): + """Returns the top k scores and their locations in a feature map. + + Given a feature map, the top k values (based on activation) are returned. If + `per_channel` is True, the top k values **per channel** are returned. Note + that when k equals to 1, ths function uses reduce_max and argmax instead of + top_k to make the logics more efficient. + + The `max_pool_kernel_size` argument allows for selecting local peaks in a + region. This filtering is done per channel, so nothing prevents two values at + the same location to be returned. + + Args: + feature_map: [batch, height, width, channels] float32 feature map. + max_pool_kernel_size: integer, the max pool kernel size to use to pull off + peak score locations in a neighborhood (independently for each channel). + For example, to make sure no two neighboring values (in the same channel) + are returned, set max_pool_kernel_size=3. If None or 1, will not apply max + pooling. + k: The number of highest scoring locations to return. + per_channel: If True, will return the top k scores and locations per + feature map channel. If False, the top k across the entire feature map + (height x width x channels) are returned. + + Returns: + Tuple of + scores: A [batch, N] float32 tensor with scores from the feature map in + descending order. If per_channel is False, N = k. Otherwise, + N = k * channels, and the first k elements correspond to channel 0, the + second k correspond to channel 1, etc. + y_indices: A [batch, N] int tensor with y indices of the top k feature map + locations. If per_channel is False, N = k. Otherwise, + N = k * channels. + x_indices: A [batch, N] int tensor with x indices of the top k feature map + locations. If per_channel is False, N = k. Otherwise, + N = k * channels. + channel_indices: A [batch, N] int tensor with channel indices of the top k + feature map locations. If per_channel is False, N = k. Otherwise, + N = k * channels. + """ + if not max_pool_kernel_size or max_pool_kernel_size == 1: + feature_map_peaks = feature_map + else: + feature_map_max_pool = tf.nn.max_pool( + feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME') + + feature_map_peak_mask = tf.math.abs( + feature_map - feature_map_max_pool) < PEAK_EPSILON + + # Zero out everything that is not a peak. + feature_map_peaks = ( + feature_map * _to_float32(feature_map_peak_mask)) + + batch_size, _, width, num_channels = _get_shape(feature_map, 4) + + if per_channel: + if k == 1: + feature_map_flattened = tf.reshape( + feature_map_peaks, [batch_size, -1, num_channels]) + scores = tf.math.reduce_max(feature_map_flattened, axis=1) + peak_flat_indices = tf.math.argmax( + feature_map_flattened, axis=1, output_type=tf.dtypes.int32) + peak_flat_indices = tf.expand_dims(peak_flat_indices, axis=-1) + else: + # Perform top k over batch and channels. + feature_map_peaks_transposed = tf.transpose(feature_map_peaks, + perm=[0, 3, 1, 2]) + feature_map_peaks_transposed = tf.reshape( + feature_map_peaks_transposed, [batch_size, num_channels, -1]) + scores, peak_flat_indices = tf.math.top_k( + feature_map_peaks_transposed, k=k) + # Convert the indices such that they represent the location in the full + # (flattened) feature map of size [batch, height * width * channels]. + channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis] + peak_flat_indices = num_channels * peak_flat_indices + channel_idx + scores = tf.reshape(scores, [batch_size, -1]) + peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1]) + else: + if k == 1: + feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) + scores = tf.math.reduce_max(feature_map_peaks_flat, axis=1, keepdims=True) + peak_flat_indices = tf.expand_dims(tf.math.argmax( + feature_map_peaks_flat, axis=1, output_type=tf.dtypes.int32), axis=-1) + else: + feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1]) + scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k) + + # Get x, y and channel indices corresponding to the top indices in the flat + # array. + y_indices, x_indices, channel_indices = ( + row_col_channel_indices_from_flattened_indices( + peak_flat_indices, width, num_channels)) + return scores, y_indices, x_indices, channel_indices + + +def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices, + channel_indices, height_width_predictions, + offset_predictions): + """Converts CenterNet class-center, offset and size predictions to boxes. + + Args: + detection_scores: A [batch, num_boxes] float32 tensor with detection + scores in range [0, 1]. + y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to + object center locations (expressed in output coordinate frame). + x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to + object center locations (expressed in output coordinate frame). + channel_indices: A [batch, num_boxes] int32 tensor with channel indices + corresponding to object classes. + height_width_predictions: A float tensor of shape [batch_size, height, + width, 2] representing the height and width of a box centered at each + pixel. + offset_predictions: A float tensor of shape [batch_size, height, width, 2] + representing the y and x offsets of a box centered at each pixel. This + helps reduce the error from downsampling. + + Returns: + detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the + the raw bounding box coordinates of boxes. + detection_classes: An integer tensor of shape [batch_size, num_boxes] + indicating the predicted class for each box. + detection_scores: A float tensor of shape [batch_size, num_boxes] indicating + the score for each box. + num_detections: An integer tensor of shape [batch_size,] indicating the + number of boxes detected for each sample in the batch. + + """ + batch_size, num_boxes = _get_shape(y_indices, 2) + + # TF Lite does not support tf.gather with batch_dims > 0, so we need to use + # tf_gather_nd instead and here we prepare the indices for that. + combined_indices = tf.stack([ + _multi_range(batch_size, value_repetitions=num_boxes), + tf.reshape(y_indices, [-1]), + tf.reshape(x_indices, [-1]) + ], axis=1) + new_height_width = tf.gather_nd(height_width_predictions, combined_indices) + new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, -1]) + + new_offsets = tf.gather_nd(offset_predictions, combined_indices) + offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1]) + + y_indices = _to_float32(y_indices) + x_indices = _to_float32(x_indices) + + height_width = tf.maximum(new_height_width, 0) + heights, widths = tf.unstack(height_width, axis=2) + y_offsets, x_offsets = tf.unstack(offsets, axis=2) + + detection_classes = channel_indices + + num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1) + + boxes = tf.stack([y_indices + y_offsets - heights / 2.0, + x_indices + x_offsets - widths / 2.0, + y_indices + y_offsets + heights / 2.0, + x_indices + x_offsets + widths / 2.0], axis=2) + + return boxes, detection_classes, detection_scores, num_detections + + +def prediction_tensors_to_temporal_offsets( + y_indices, x_indices, offset_predictions): + """Converts CenterNet temporal offset map predictions to batched format. + + This function is similar to the box offset conversion function, as both + temporal offsets and box offsets are size-2 vectors. + + Args: + y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to + object center locations (expressed in output coordinate frame). + x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to + object center locations (expressed in output coordinate frame). + offset_predictions: A float tensor of shape [batch_size, height, width, 2] + representing the y and x offsets of a box's center across adjacent frames. + + Returns: + offsets: A tensor of shape [batch_size, num_boxes, 2] holding the + the object temporal offsets of (y, x) dimensions. + + """ + batch_size, num_boxes = _get_shape(y_indices, 2) + + # TF Lite does not support tf.gather with batch_dims > 0, so we need to use + # tf_gather_nd instead and here we prepare the indices for that. + combined_indices = tf.stack([ + _multi_range(batch_size, value_repetitions=num_boxes), + tf.reshape(y_indices, [-1]), + tf.reshape(x_indices, [-1]) + ], axis=1) + + new_offsets = tf.gather_nd(offset_predictions, combined_indices) + offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1]) + + return offsets + + +def prediction_tensors_to_keypoint_candidates( + keypoint_heatmap_predictions, + keypoint_heatmap_offsets, + keypoint_score_threshold=0.1, + max_pool_kernel_size=1, + max_candidates=20): + """Convert keypoint heatmap predictions and offsets to keypoint candidates. + + Args: + keypoint_heatmap_predictions: A float tensor of shape [batch_size, height, + width, num_keypoints] representing the per-keypoint heatmaps. + keypoint_heatmap_offsets: A float tensor of shape [batch_size, height, + width, 2] (or [batch_size, height, width, 2 * num_keypoints] if + 'per_keypoint_offset' is set True) representing the per-keypoint offsets. + keypoint_score_threshold: float, the threshold for considering a keypoint + a candidate. + max_pool_kernel_size: integer, the max pool kernel size to use to pull off + peak score locations in a neighborhood. For example, to make sure no two + neighboring values for the same keypoint are returned, set + max_pool_kernel_size=3. If None or 1, will not apply any local filtering. + max_candidates: integer, maximum number of keypoint candidates per + keypoint type. + + Returns: + keypoint_candidates: A tensor of shape + [batch_size, max_candidates, num_keypoints, 2] holding the + location of keypoint candidates in [y, x] format (expressed in absolute + coordinates in the output coordinate frame). + keypoint_scores: A float tensor of shape + [batch_size, max_candidates, num_keypoints] with the scores for each + keypoint candidate. The scores come directly from the heatmap predictions. + num_keypoint_candidates: An integer tensor of shape + [batch_size, num_keypoints] with the number of candidates for each + keypoint type, as it's possible to filter some candidates due to the score + threshold. + """ + batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4) + # Get x, y and channel indices corresponding to the top indices in the + # keypoint heatmap predictions. + # Note that the top k candidates are produced for **each keypoint type**. + # Might be worth eventually trying top k in the feature map, independent of + # the keypoint type. + keypoint_scores, y_indices, x_indices, channel_indices = ( + top_k_feature_map_locations(keypoint_heatmap_predictions, + max_pool_kernel_size=max_pool_kernel_size, + k=max_candidates, + per_channel=True)) + + # TF Lite does not support tf.gather with batch_dims > 0, so we need to use + # tf_gather_nd instead and here we prepare the indices for that. + _, num_indices = _get_shape(y_indices, 2) + combined_indices = tf.stack([ + _multi_range(batch_size, value_repetitions=num_indices), + tf.reshape(y_indices, [-1]), + tf.reshape(x_indices, [-1]) + ], axis=1) + + selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets, + combined_indices) + selected_offsets = tf.reshape(selected_offsets_flat, + [batch_size, num_indices, -1]) + + y_indices = _to_float32(y_indices) + x_indices = _to_float32(x_indices) + + _, _, num_channels = _get_shape(selected_offsets, 3) + if num_channels > 2: + # Offsets are per keypoint and the last dimension of selected_offsets + # contains all those offsets, so reshape the offsets to make sure that the + # last dimension contains (y_offset, x_offset) for a single keypoint. + reshaped_offsets = tf.reshape(selected_offsets, + [batch_size, num_indices, -1, 2]) + + # TF Lite does not support tf.gather with batch_dims > 0, so we need to use + # tf_gather_nd instead and here we prepare the indices for that. In this + # case, channel_indices indicates which keypoint to use the offset from. + combined_indices = tf.stack([ + _multi_range(batch_size, value_repetitions=num_indices), + _multi_range(num_indices, range_repetitions=batch_size), + tf.reshape(channel_indices, [-1]) + ], axis=1) + + offsets = tf.gather_nd(reshaped_offsets, combined_indices) + offsets = tf.reshape(offsets, [batch_size, num_indices, -1]) + else: + offsets = selected_offsets + y_offsets, x_offsets = tf.unstack(offsets, axis=2) + + keypoint_candidates = tf.stack([y_indices + y_offsets, + x_indices + x_offsets], axis=2) + keypoint_candidates = tf.reshape( + keypoint_candidates, + [batch_size, num_keypoints, max_candidates, 2]) + keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3]) + keypoint_scores = tf.reshape( + keypoint_scores, + [batch_size, num_keypoints, max_candidates]) + keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1]) + num_candidates = tf.reduce_sum( + tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1) + + return keypoint_candidates, keypoint_scores, num_candidates + + +def regressed_keypoints_at_object_centers(regressed_keypoint_predictions, + y_indices, x_indices): + """Returns the regressed keypoints at specified object centers. + + The original keypoint predictions are regressed relative to each feature map + location. The returned keypoints are expressed in absolute coordinates in the + output frame (i.e. the center offsets are added to each individual regressed + set of keypoints). + + Args: + regressed_keypoint_predictions: A float tensor of shape + [batch_size, height, width, 2 * num_keypoints] holding regressed + keypoints. The last dimension has keypoint coordinates ordered as follows: + [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints. + y_indices: A [batch, num_instances] int tensor holding y indices for object + centers. These indices correspond to locations in the output feature map. + x_indices: A [batch, num_instances] int tensor holding x indices for object + centers. These indices correspond to locations in the output feature map. + + Returns: + A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where + regressed keypoints are gathered at the provided locations, and converted + to absolute coordinates in the output coordinate frame. + """ + batch_size, num_instances = _get_shape(y_indices, 2) + + # TF Lite does not support tf.gather with batch_dims > 0, so we need to use + # tf_gather_nd instead and here we prepare the indices for that. + combined_indices = tf.stack([ + _multi_range(batch_size, value_repetitions=num_instances), + tf.reshape(y_indices, [-1]), + tf.reshape(x_indices, [-1]) + ], axis=1) + + relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions, + combined_indices) + relative_regressed_keypoints = tf.reshape( + relative_regressed_keypoints, + [batch_size, num_instances, -1, 2]) + relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack( + relative_regressed_keypoints, axis=3) + y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1)) + x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1)) + absolute_regressed_keypoints = tf.stack( + [y_indices + relative_regressed_keypoints_y, + x_indices + relative_regressed_keypoints_x], + axis=3) + return tf.reshape(absolute_regressed_keypoints, + [batch_size, num_instances, -1]) + + +def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=None, + unmatched_keypoint_score=0.1, box_scale=1.2, + candidate_search_scale=0.3, + candidate_ranking_mode='min_distance'): + """Refines regressed keypoints by snapping to the nearest candidate keypoints. + + The initial regressed keypoints represent a full set of keypoints regressed + from the centers of the objects. The keypoint candidates are estimated + independently from heatmaps, and are not associated with any object instances. + This function refines the regressed keypoints by "snapping" to the + nearest/highest score/highest score-distance ratio (depending on the + candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose"). + If no candidates are nearby, the regressed keypoint remains unchanged. + + In order to snap a regressed keypoint to a candidate keypoint, the following + must be satisfied: + - the candidate keypoint must be of the same type as the regressed keypoint + - the candidate keypoint must not lie outside the predicted boxes (or the + boxes which encloses the regressed keypoints for the instance if `bboxes` is + not provided). Note that the box is scaled by + `regressed_box_scale` in height and width, to provide some margin around the + keypoints + - the distance to the closest candidate keypoint cannot exceed + candidate_search_scale * max(height, width), where height and width refer to + the bounding box for the instance. + + Note that the same candidate keypoint is allowed to snap to regressed + keypoints in difference instances. + + Args: + regressed_keypoints: A float tensor of shape + [batch_size, num_instances, num_keypoints, 2] with the initial regressed + keypoints. + keypoint_candidates: A tensor of shape + [batch_size, max_candidates, num_keypoints, 2] holding the location of + keypoint candidates in [y, x] format (expressed in absolute coordinates in + the output coordinate frame). + keypoint_scores: A float tensor of shape + [batch_size, max_candidates, num_keypoints] indicating the scores for + keypoint candidates. + num_keypoint_candidates: An integer tensor of shape + [batch_size, num_keypoints] indicating the number of valid candidates for + each keypoint type, as there may be padding (dim 1) of + `keypoint_candidates` and `keypoint_scores`. + bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted + bounding boxes for each instance, expressed in the output coordinate + frame. If not provided, boxes will be computed from regressed keypoints. + unmatched_keypoint_score: float, the default score to use for regressed + keypoints that are not successfully snapped to a nearby candidate. + box_scale: float, the multiplier to expand the bounding boxes (either the + provided boxes or those which tightly cover the regressed keypoints) for + an instance. This scale is typically larger than 1.0 when not providing + `bboxes`. + candidate_search_scale: float, the scale parameter that multiplies the + largest dimension of a bounding box. The resulting distance becomes a + search radius for candidates in the vicinity of each regressed keypoint. + candidate_ranking_mode: A string as one of ['min_distance', + 'score_distance_ratio'] indicating how to select the candidate. If invalid + value is provided, an ValueError will be raised. + + Returns: + A tuple with: + refined_keypoints: A float tensor of shape + [batch_size, num_instances, num_keypoints, 2] with the final, refined + keypoints. + refined_scores: A float tensor of shape + [batch_size, num_instances, num_keypoints] with scores associated with all + instances and keypoints in `refined_keypoints`. + + Raises: + ValueError: if provided candidate_ranking_mode is not one of + ['min_distance', 'score_distance_ratio'] + """ + batch_size, num_instances, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(regressed_keypoints)) + max_candidates = keypoint_candidates.shape[1] + + # Replace all invalid (i.e. padded) keypoint candidates with NaN. + # This will prevent them from being considered. + range_tiled = tf.tile( + tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]), + [batch_size, 1, num_keypoints]) + num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1), + [1, max_candidates, 1]) + invalid_candidates = range_tiled >= num_candidates_tiled + nan_mask = tf.where( + invalid_candidates, + np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32), + tf.ones_like(invalid_candidates, dtype=tf.float32)) + keypoint_candidates_with_nans = tf.math.multiply( + keypoint_candidates, tf.expand_dims(nan_mask, -1)) + + # Pairwise squared distances between regressed keypoints and candidate + # keypoints (for a single keypoint type). + # Shape [batch_size, num_instances, 1, num_keypoints, 2]. + regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints, + axis=2) + # Shape [batch_size, 1, max_candidates, num_keypoints, 2]. + keypoint_candidates_expanded = tf.expand_dims( + keypoint_candidates_with_nans, axis=1) + # Use explicit tensor shape broadcasting (since the tensor dimensions are + # expanded to 5D) to make it tf.lite compatible. + regressed_keypoint_expanded = tf.tile( + regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1]) + keypoint_candidates_expanded = tf.tile( + keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1]) + # Replace tf.math.squared_difference by "-" operator and tf.multiply ops since + # tf.lite convert doesn't support squared_difference with undetermined + # dimension. + diff = regressed_keypoint_expanded - keypoint_candidates_expanded + sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1) + distances = tf.math.sqrt(sqrd_distances) + + # Replace the NaNs with Infs to make sure the following reduce_min/argmin + # behaves properly. + distances = tf.where( + tf.math.is_nan(distances), np.inf * tf.ones_like(distances), distances) + + # Determine the candidates that have the minimum distance to the regressed + # keypoints. Shape [batch_size, num_instances, num_keypoints]. + min_distances = tf.math.reduce_min(distances, axis=2) + if candidate_ranking_mode == 'min_distance': + nearby_candidate_inds = tf.math.argmin(distances, axis=2) + elif candidate_ranking_mode == 'score_distance_ratio': + # tiled_keypoint_scores: + # Shape [batch_size, num_instances, max_candidates, num_keypoints]. + tiled_keypoint_scores = tf.tile( + tf.expand_dims(keypoint_scores, axis=1), + multiples=[1, num_instances, 1, 1]) + ranking_scores = tiled_keypoint_scores / (distances + 1e-6) + nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2) + else: + raise ValueError('Not recognized candidate_ranking_mode: %s' % + candidate_ranking_mode) + + # Gather the coordinates and scores corresponding to the closest candidates. + # Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and + # [batch_size, num_instances, num_keypoints], respectively. + nearby_candidate_coords, nearby_candidate_scores = ( + _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, + nearby_candidate_inds)) + + if bboxes is None: + # Create bboxes from regressed keypoints. + # Shape [batch_size * num_instances, 4]. + regressed_keypoints_flattened = tf.reshape( + regressed_keypoints, [-1, num_keypoints, 2]) + bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes( + regressed_keypoints_flattened) + else: + bboxes_flattened = tf.reshape(bboxes, [-1, 4]) + + # Scale the bounding boxes. + # Shape [batch_size, num_instances, 4]. + boxlist = box_list.BoxList(bboxes_flattened) + boxlist_scaled = box_list_ops.scale_height_width( + boxlist, box_scale, box_scale) + bboxes_scaled = boxlist_scaled.get() + bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4]) + + # Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint. + # Shape [batch_size, num_instances, num_keypoints]. + bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1]) + ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3) + + # Produce a mask that indicates whether the original regressed keypoint + # should be used instead of a candidate keypoint. + # Shape [batch_size, num_instances, num_keypoints]. + search_radius = ( + tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale) + mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) + + tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) + + tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) + + tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) + + # Filter out the chosen candidate with score lower than unmatched + # keypoint score. + tf.cast(nearby_candidate_scores < + unmatched_keypoint_score, tf.int32) + + tf.cast(min_distances > search_radius, tf.int32)) + mask = mask > 0 + + # Create refined keypoints where candidate keypoints replace original + # regressed keypoints if they are in the vicinity of the regressed keypoints. + # Shape [batch_size, num_instances, num_keypoints, 2]. + refined_keypoints = tf.where( + tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]), + regressed_keypoints, + nearby_candidate_coords) + + # Update keypoints scores. In the case where we use the original regressed + # keypoints, we use a default score of `unmatched_keypoint_score`. + # Shape [batch_size, num_instances, num_keypoints]. + refined_scores = tf.where( + mask, + unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores), + nearby_candidate_scores) + + return refined_keypoints, refined_scores + + +def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds, + num_total_keypoints): + """Scatter keypoint elements into tensors with full keypoints dimension. + + Args: + keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 + tensor. + keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 + tensor. + keypoint_inds: a list of integers that indicate the keypoint indices for + this specific keypoint class. These indices are used to scatter into + tensors that have a `num_total_keypoints` dimension. + num_total_keypoints: The total number of keypoints that this model predicts. + + Returns: + A tuple with + keypoint_coords_padded: a + [batch_size, num_instances, num_total_keypoints,2] float32 tensor. + keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints] + float32 tensor. + """ + batch_size, num_instances, _, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) + kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3]) + kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1]) + kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1) + kpt_coords_scattered = tf.scatter_nd( + indices=kpt_inds_tensor, + updates=kpt_coords_transposed, + shape=[num_total_keypoints, batch_size, num_instances, 2]) + kpt_scores_scattered = tf.scatter_nd( + indices=kpt_inds_tensor, + updates=kpt_scores_transposed, + shape=[num_total_keypoints, batch_size, num_instances]) + keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3]) + keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0]) + return keypoint_coords_padded, keypoint_scores_padded + + +def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds, + max_instances): + """Scatter keypoint elements into tensors with full instance dimension. + + Args: + keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32 + tensor. + keypoint_scores: a [batch_size, num_instances, num_keypoints] float32 + tensor. + instance_inds: a list of integers that indicate the instance indices for + these keypoints. These indices are used to scatter into tensors + that have a `max_instances` dimension. + max_instances: The maximum number of instances detected by the model. + + Returns: + A tuple with + keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2] + float32 tensor. + keypoint_scores_padded: a [batch_size, max_instances, num_keypoints] + float32 tensor. + """ + batch_size, _, num_keypoints, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) + kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3]) + kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2]) + instance_inds = tf.expand_dims(instance_inds, axis=-1) + kpt_coords_scattered = tf.scatter_nd( + indices=instance_inds, + updates=kpt_coords_transposed, + shape=[max_instances, batch_size, num_keypoints, 2]) + kpt_scores_scattered = tf.scatter_nd( + indices=instance_inds, + updates=kpt_scores_transposed, + shape=[max_instances, batch_size, num_keypoints]) + keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3]) + keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2]) + return keypoint_coords_padded, keypoint_scores_padded + + +def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores, + indices): + """Gathers keypoint candidate coordinates and scores at indices. + + Args: + keypoint_candidates: a float tensor of shape [batch_size, max_candidates, + num_keypoints, 2] with candidate coordinates. + keypoint_scores: a float tensor of shape [batch_size, max_candidates, + num_keypoints] with keypoint scores. + indices: an integer tensor of shape [batch_size, num_indices, num_keypoints] + with indices. + + Returns: + A tuple with + gathered_keypoint_candidates: a float tensor of shape [batch_size, + num_indices, num_keypoints, 2] with gathered coordinates. + gathered_keypoint_scores: a float tensor of shape [batch_size, + num_indices, num_keypoints, 2]. + """ + batch_size, num_indices, num_keypoints = _get_shape(indices, 3) + + # Transpose tensors so that all batch dimensions are up front. + keypoint_candidates_transposed = tf.transpose(keypoint_candidates, + [0, 2, 1, 3]) + keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1]) + nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1]) + + # TF Lite does not support tf.gather with batch_dims > 0, so we need to use + # tf_gather_nd instead and here we prepare the indices for that. + combined_indices = tf.stack([ + _multi_range( + batch_size, + value_repetitions=num_keypoints * num_indices, + dtype=tf.int64), + _multi_range( + num_keypoints, + value_repetitions=num_indices, + range_repetitions=batch_size, + dtype=tf.int64), + tf.reshape(nearby_candidate_inds_transposed, [-1]) + ], axis=1) + + nearby_candidate_coords_transposed = tf.gather_nd( + keypoint_candidates_transposed, combined_indices) + nearby_candidate_coords_transposed = tf.reshape( + nearby_candidate_coords_transposed, + [batch_size, num_keypoints, num_indices, -1]) + + nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed, + combined_indices) + nearby_candidate_scores_transposed = tf.reshape( + nearby_candidate_scores_transposed, + [batch_size, num_keypoints, num_indices]) + + gathered_keypoint_candidates = tf.transpose( + nearby_candidate_coords_transposed, [0, 2, 1, 3]) + gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed, + [0, 2, 1]) + + return gathered_keypoint_candidates, gathered_keypoint_scores + + +def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols): + """Get the index in a flattened array given row and column indices.""" + return (row_indices * num_cols) + col_indices + + +def row_col_channel_indices_from_flattened_indices(indices, num_cols, + num_channels): + """Computes row, column and channel indices from flattened indices. + + Args: + indices: An integer tensor of any shape holding the indices in the flattened + space. + num_cols: Number of columns in the image (width). + num_channels: Number of channels in the image. + + Returns: + row_indices: The row indices corresponding to each of the input indices. + Same shape as indices. + col_indices: The column indices corresponding to each of the input indices. + Same shape as indices. + channel_indices. The channel indices corresponding to each of the input + indices. + + """ + row_indices = (indices // num_channels) // num_cols + col_indices = (indices // num_channels) % num_cols + channel_indices = indices % num_channels + + return row_indices, col_indices, channel_indices + + +def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height, + width): + """Computes valid anchor weights for an image assuming pixels will be flattened. + + This function is useful when we only want to penalize valid areas in the + image in the case when padding is used. The function assumes that the loss + function will be applied after flattening the spatial dimensions and returns + anchor weights accordingly. + + Args: + true_image_shapes: An integer tensor of shape [batch_size, 3] representing + the true image shape (without padding) for each sample in the batch. + height: height of the prediction from the network. + width: width of the prediction from the network. + + Returns: + valid_anchor_weights: a float tensor of shape [batch_size, height * width] + with 1s in locations where the spatial coordinates fall within the height + and width in true_image_shapes. + """ + + indices = tf.reshape(tf.range(height * width), [1, -1]) + batch_size = tf.shape(true_image_shapes)[0] + batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices + + y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices( + batch_indices, width, 1) + + max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1] + max_x = _to_float32(tf.expand_dims(max_x, 1)) + max_y = _to_float32(tf.expand_dims(max_y, 1)) + + x_coords = _to_float32(x_coords) + y_coords = _to_float32(y_coords) + + valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y) + + return _to_float32(valid_mask) + + +def convert_strided_predictions_to_normalized_boxes(boxes, stride, + true_image_shapes): + """Converts predictions in the output space to normalized boxes. + + Boxes falling outside the valid image boundary are clipped to be on the + boundary. + + Args: + boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw + coordinates of boxes in the model's output space. + stride: The stride in the output space. + true_image_shapes: A tensor of shape [batch_size, 3] representing the true + shape of the input not considering padding. + + Returns: + boxes: A tensor of shape [batch_size, num_boxes, 4] representing the + coordinates of the normalized boxes. + """ + + def _normalize_boxlist(args): + + boxes, height, width = args + boxes = box_list_ops.scale(boxes, stride, stride) + boxes = box_list_ops.to_normalized_coordinates(boxes, height, width) + boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.], + filter_nonoverlapping=False) + return boxes + + box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)] + true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) + + true_heights_list = tf.unstack(true_heights, axis=0) + true_widths_list = tf.unstack(true_widths, axis=0) + + box_lists = list(map(_normalize_boxlist, + zip(box_lists, true_heights_list, true_widths_list))) + boxes = tf.stack([box_list_instance.get() for + box_list_instance in box_lists], axis=0) + + return boxes + + +def convert_strided_predictions_to_normalized_keypoints( + keypoint_coords, keypoint_scores, stride, true_image_shapes, + clip_out_of_frame_keypoints=False): + """Converts predictions in the output space to normalized keypoints. + + If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside + the valid image boundary are normalized but not clipped; If + clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the + valid image boundary are clipped to the closest image boundary and the scores + will be set to 0.0. + + Args: + keypoint_coords: A tensor of shape + [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates + of keypoints in the model's output space. + keypoint_scores: A tensor of shape + [batch_size, num_instances, num_keypoints] holding the keypoint scores. + stride: The stride in the output space. + true_image_shapes: A tensor of shape [batch_size, 3] representing the true + shape of the input not considering padding. + clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside + the image boundary should be clipped. If True, keypoint coords will be + clipped to image boundary. If False, keypoints are normalized but not + filtered based on their location. + + Returns: + keypoint_coords_normalized: A tensor of shape + [batch_size, num_instances, num_keypoints, 2] representing the coordinates + of the normalized keypoints. + keypoint_scores: A tensor of shape + [batch_size, num_instances, num_keypoints] representing the updated + keypoint scores. + """ + # Flatten keypoints and scores. + batch_size, _, _, _ = ( + shape_utils.combined_static_and_dynamic_shape(keypoint_coords)) + + # Scale and normalize keypoints. + true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) + yscale = float(stride) / tf.cast(true_heights, tf.float32) + xscale = float(stride) / tf.cast(true_widths, tf.float32) + yx_scale = tf.stack([yscale, xscale], axis=1) + keypoint_coords_normalized = keypoint_coords * tf.reshape( + yx_scale, [batch_size, 1, 1, 2]) + + if clip_out_of_frame_keypoints: + # Determine the keypoints that are in the true image regions. + valid_indices = tf.logical_and( + tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0, + keypoint_coords_normalized[:, :, :, 0] <= 1.0), + tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0, + keypoint_coords_normalized[:, :, :, 1] <= 1.0)) + batch_window = tf.tile( + tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), + multiples=[batch_size, 1]) + def clip_to_window(inputs): + keypoints, window = inputs + return keypoint_ops.clip_to_window(keypoints, window) + + # Specify the TensorSpec explicitly in the tf.map_fn to make it tf.lite + # compatible. + kpts_dims = _get_shape(keypoint_coords_normalized, 4) + output_spec = tf.TensorSpec( + shape=[kpts_dims[1], kpts_dims[2], kpts_dims[3]], dtype=tf.float32) + keypoint_coords_normalized = tf.map_fn( + clip_to_window, (keypoint_coords_normalized, batch_window), + dtype=tf.float32, back_prop=False, + fn_output_signature=output_spec) + keypoint_scores = tf.where(valid_indices, keypoint_scores, + tf.zeros_like(keypoint_scores)) + return keypoint_coords_normalized, keypoint_scores + + +def convert_strided_predictions_to_instance_masks( + boxes, classes, masks, true_image_shapes, + densepose_part_heatmap=None, densepose_surface_coords=None, stride=4, + mask_height=256, mask_width=256, score_threshold=0.5, + densepose_class_index=-1): + """Converts predicted full-image masks into instance masks. + + For each predicted detection box: + * Crop and resize the predicted mask (and optionally DensePose coordinates) + based on the detected bounding box coordinates and class prediction. Uses + bilinear resampling. + * Binarize the mask using the provided score threshold. + + Args: + boxes: A tensor of shape [batch, max_detections, 4] holding the predicted + boxes, in normalized coordinates (relative to the true image dimensions). + classes: An integer tensor of shape [batch, max_detections] containing the + detected class for each box (0-indexed). + masks: A [batch, output_height, output_width, num_classes] float32 + tensor with class probabilities. + true_image_shapes: A tensor of shape [batch, 3] representing the true + shape of the inputs not considering padding. + densepose_part_heatmap: (Optional) A [batch, output_height, output_width, + num_parts] float32 tensor with part scores (i.e. logits). + densepose_surface_coords: (Optional) A [batch, output_height, output_width, + 2 * num_parts] float32 tensor with predicted part coordinates (in + vu-format). + stride: The stride in the output space. + mask_height: The desired resized height for instance masks. + mask_width: The desired resized width for instance masks. + score_threshold: The threshold at which to convert predicted mask + into foreground pixels. + densepose_class_index: The class index (0-indexed) corresponding to the + class which has DensePose labels (e.g. person class). + + Returns: + A tuple of masks and surface_coords. + instance_masks: A [batch_size, max_detections, mask_height, mask_width] + uint8 tensor with predicted foreground mask for each + instance. If DensePose tensors are provided, then each pixel value in the + mask encodes the 1-indexed part. + surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2] + float32 tensor with (v, u) coordinates. Note that v, u coordinates are + only defined on instance masks, and the coordinates at each location of + the foreground mask correspond to coordinates on a local part coordinate + system (the specific part can be inferred from the `instance_masks` + output. If DensePose feature maps are not passed to this function, this + output will be None. + + Raises: + ValueError: If one but not both of `densepose_part_heatmap` and + `densepose_surface_coords` is provided. + """ + batch_size, output_height, output_width, _ = ( + shape_utils.combined_static_and_dynamic_shape(masks)) + input_height = stride * output_height + input_width = stride * output_width + + true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) + # If necessary, create dummy DensePose tensors to simplify the map function. + densepose_present = True + if ((densepose_part_heatmap is not None) ^ + (densepose_surface_coords is not None)): + raise ValueError('To use DensePose, both `densepose_part_heatmap` and ' + '`densepose_surface_coords` must be provided') + if densepose_part_heatmap is None and densepose_surface_coords is None: + densepose_present = False + densepose_part_heatmap = tf.zeros( + (batch_size, output_height, output_width, 1), dtype=tf.float32) + densepose_surface_coords = tf.zeros( + (batch_size, output_height, output_width, 2), dtype=tf.float32) + crop_and_threshold_fn = functools.partial( + crop_and_threshold_masks, input_height=input_height, + input_width=input_width, mask_height=mask_height, mask_width=mask_width, + score_threshold=score_threshold, + densepose_class_index=densepose_class_index) + + instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn( + crop_and_threshold_fn, + elems=[boxes, classes, masks, densepose_part_heatmap, + densepose_surface_coords, true_heights, true_widths], + dtype=[tf.uint8, tf.float32], + back_prop=False) + surface_coords = surface_coords if densepose_present else None + return instance_masks, surface_coords + + +def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256, + mask_width=256, score_threshold=0.5, + densepose_class_index=-1): + """Crops and thresholds masks based on detection boxes. + + Args: + elems: A tuple of + boxes - float32 tensor of shape [max_detections, 4] + classes - int32 tensor of shape [max_detections] (0-indexed) + masks - float32 tensor of shape [output_height, output_width, num_classes] + part_heatmap - float32 tensor of shape [output_height, output_width, + num_parts] + surf_coords - float32 tensor of shape [output_height, output_width, + 2 * num_parts] + true_height - scalar int tensor + true_width - scalar int tensor + input_height: Input height to network. + input_width: Input width to network. + mask_height: Height for resizing mask crops. + mask_width: Width for resizing mask crops. + score_threshold: The threshold at which to convert predicted mask + into foreground pixels. + densepose_class_index: scalar int tensor with the class index (0-indexed) + for DensePose. + + Returns: + A tuple of + all_instances: A [max_detections, mask_height, mask_width] uint8 tensor + with a predicted foreground mask for each instance. Background is encoded + as 0, and foreground is encoded as a positive integer. Specific part + indices are encoded as 1-indexed parts (for classes that have part + information). + surface_coords: A [max_detections, mask_height, mask_width, 2] + float32 tensor with (v, u) coordinates. for each part. + """ + (boxes, classes, masks, part_heatmap, surf_coords, true_height, + true_width) = elems + # Boxes are in normalized coordinates relative to true image shapes. Convert + # coordinates to be normalized relative to input image shapes (since masks + # may still have padding). + boxlist = box_list.BoxList(boxes) + y_scale = true_height / input_height + x_scale = true_width / input_width + boxlist = box_list_ops.scale(boxlist, y_scale, x_scale) + boxes = boxlist.get() + # Convert masks from [output_height, output_width, num_classes] to + # [num_classes, output_height, output_width, 1]. + num_classes = tf.shape(masks)[-1] + masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis] + # Tile part and surface coordinate masks for all classes. + part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :], + multiples=[num_classes, 1, 1, 1]) + surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :], + multiples=[num_classes, 1, 1, 1]) + feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d], + axis=-1) + # The following tensor has shape + # [max_detections, mask_height, mask_width, 1 + 3 * num_parts]. + cropped_masks = tf2.image.crop_and_resize( + feature_maps_concat, + boxes=boxes, + box_indices=classes, + crop_size=[mask_height, mask_width], + method='bilinear') + + # Split the cropped masks back into instance masks, part masks, and surface + # coordinates. + num_parts = tf.shape(part_heatmap)[-1] + instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split( + cropped_masks, [1, num_parts, 2 * num_parts], axis=-1) + + # Threshold the instance masks. Resulting tensor has shape + # [max_detections, mask_height, mask_width, 1]. + instance_masks_int = tf.cast( + tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32) + + # Produce a binary mask that is 1.0 only: + # - in the foreground region for an instance + # - in detections corresponding to the DensePose class + det_with_parts = tf.equal(classes, densepose_class_index) + det_with_parts = tf.cast( + tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32) + instance_masks_with_parts = tf.math.multiply(instance_masks_int, + det_with_parts) + + # Similarly, produce a binary mask that holds the foreground masks only for + # instances without parts (i.e. non-DensePose classes). + det_without_parts = 1 - det_with_parts + instance_masks_without_parts = tf.math.multiply(instance_masks_int, + det_without_parts) + + # Assemble a tensor that has standard instance segmentation masks for + # non-DensePose classes (with values in [0, 1]), and part segmentation masks + # for DensePose classes (with vaues in [0, 1, ..., num_parts]). + part_mask_int_zero_indexed = tf.math.argmax( + part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis] + part_mask_int_one_indexed = part_mask_int_zero_indexed + 1 + all_instances = (instance_masks_without_parts + + instance_masks_with_parts * part_mask_int_one_indexed) + + # Gather the surface coordinates for the parts. + surface_coords_cropped = tf.reshape( + surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2]) + surface_coords = gather_surface_coords_for_parts(surface_coords_cropped, + part_mask_int_zero_indexed) + surface_coords = ( + surface_coords * tf.cast(instance_masks_with_parts, tf.float32)) + + return [tf.squeeze(all_instances, axis=3), surface_coords] + + +def gather_surface_coords_for_parts(surface_coords_cropped, + highest_scoring_part): + """Gathers the (v, u) coordinates for the highest scoring DensePose parts. + + Args: + surface_coords_cropped: A [max_detections, height, width, num_parts, 2] + float32 tensor with (v, u) surface coordinates. + highest_scoring_part: A [max_detections, height, width] integer tensor with + the highest scoring part (0-indexed) indices for each location. + + Returns: + A [max_detections, height, width, 2] float32 tensor with the (v, u) + coordinates selected from the highest scoring parts. + """ + max_detections, height, width, num_parts, _ = ( + shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped)) + flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2]) + flattened_part_ids = tf.reshape(highest_scoring_part, [-1]) + + # Produce lookup indices that represent the locations of the highest scoring + # parts in the `flattened_surface_coords` tensor. + flattened_lookup_indices = ( + num_parts * tf.range(max_detections * height * width) + + flattened_part_ids) + + vu_coords_flattened = tf.gather(flattened_surface_coords, + flattened_lookup_indices, axis=0) + return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2]) + + +def predicted_embeddings_at_object_centers(embedding_predictions, + y_indices, x_indices): + """Returns the predicted embeddings at specified object centers. + + Args: + embedding_predictions: A float tensor of shape [batch_size, height, width, + reid_embed_size] holding predicted embeddings. + y_indices: A [batch, num_instances] int tensor holding y indices for object + centers. These indices correspond to locations in the output feature map. + x_indices: A [batch, num_instances] int tensor holding x indices for object + centers. These indices correspond to locations in the output feature map. + + Returns: + A float tensor of shape [batch_size, num_objects, reid_embed_size] where + predicted embeddings are gathered at the provided locations. + """ + batch_size, _, width, _ = _get_shape(embedding_predictions, 4) + flattened_indices = flattened_indices_from_row_col_indices( + y_indices, x_indices, width) + _, num_instances = _get_shape(flattened_indices, 2) + embeddings_flat = _flatten_spatial_dimensions(embedding_predictions) + embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1) + embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1]) + + return embeddings + + +class ObjectDetectionParams( + collections.namedtuple('ObjectDetectionParams', [ + 'localization_loss', 'scale_loss_weight', 'offset_loss_weight', + 'task_loss_weight' + ])): + """Namedtuple to host object detection related parameters. + + This is a wrapper class over the fields that are either the hyper-parameters + or the loss functions needed for the object detection task. The class is + immutable after constructed. Please see the __new__ function for detailed + information for each fields. + """ + + __slots__ = () + + def __new__(cls, + localization_loss, + scale_loss_weight, + offset_loss_weight, + task_loss_weight=1.0): + """Constructor with default values for ObjectDetectionParams. + + Args: + localization_loss: a object_detection.core.losses.Loss object to compute + the loss for the center offset and height/width predictions in + CenterNet. + scale_loss_weight: float, The weight for localizing box size. Note that + the scale loss is dependent on the input image size, since we penalize + the raw height and width. This constant may need to be adjusted + depending on the input size. + offset_loss_weight: float, The weight for localizing center offsets. + task_loss_weight: float, the weight of the object detection loss. + + Returns: + An initialized ObjectDetectionParams namedtuple. + """ + return super(ObjectDetectionParams, + cls).__new__(cls, localization_loss, scale_loss_weight, + offset_loss_weight, task_loss_weight) + + +class KeypointEstimationParams( + collections.namedtuple('KeypointEstimationParams', [ + 'task_name', 'class_id', 'keypoint_indices', 'classification_loss', + 'localization_loss', 'keypoint_labels', 'keypoint_std_dev', + 'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight', + 'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold', + 'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight', + 'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale', + 'candidate_search_scale', 'candidate_ranking_mode', + 'offset_peak_radius', 'per_keypoint_offset' + ])): + """Namedtuple to host object detection related parameters. + + This is a wrapper class over the fields that are either the hyper-parameters + or the loss functions needed for the keypoint estimation task. The class is + immutable after constructed. Please see the __new__ function for detailed + information for each fields. + """ + + __slots__ = () + + def __new__(cls, + task_name, + class_id, + keypoint_indices, + classification_loss, + localization_loss, + keypoint_labels=None, + keypoint_std_dev=None, + keypoint_heatmap_loss_weight=1.0, + keypoint_offset_loss_weight=1.0, + keypoint_regression_loss_weight=1.0, + keypoint_candidate_score_threshold=0.1, + heatmap_bias_init=-2.19, + num_candidates_per_keypoint=100, + task_loss_weight=1.0, + peak_max_pool_kernel_size=3, + unmatched_keypoint_score=0.1, + box_scale=1.2, + candidate_search_scale=0.3, + candidate_ranking_mode='min_distance', + offset_peak_radius=0, + per_keypoint_offset=False): + """Constructor with default values for KeypointEstimationParams. + + Args: + task_name: string, the name of the task this namedtuple corresponds to. + Note that it should be an unique identifier of the task. + class_id: int, the ID of the class that contains the target keypoints to + considered in this task. For example, if the task is human pose + estimation, the class id should correspond to the "human" class. Note + that the ID is 0-based, meaning that class 0 corresponds to the first + non-background object class. + keypoint_indices: A list of integers representing the indicies of the + keypoints to be considered in this task. This is used to retrieve the + subset of the keypoints from gt_keypoints that should be considered in + this task. + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the class predictions in CenterNet. + localization_loss: an object_detection.core.losses.Loss object to compute + the loss for the center offset and height/width predictions in + CenterNet. + keypoint_labels: A list of strings representing the label text of each + keypoint, e.g. "nose", 'left_shoulder". Note that the length of this + list should be equal to keypoint_indices. + keypoint_std_dev: A list of float represent the standard deviation of the + Gaussian kernel used to generate the keypoint heatmap. It is to provide + the flexibility of using different sizes of Gaussian kernel for each + keypoint class. + keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap. + keypoint_offset_loss_weight: float, The weight for the keypoint offsets + loss. + keypoint_regression_loss_weight: float, The weight for keypoint regression + loss. Note that the loss is dependent on the input image size, since we + penalize the raw height and width. This constant may need to be adjusted + depending on the input size. + keypoint_candidate_score_threshold: float, The heatmap score threshold for + a keypoint to become a valid candidate. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the class prediction head. If set to None, the bias is + initialized with zeros. + num_candidates_per_keypoint: The maximum number of candidates to retrieve + for each keypoint. + task_loss_weight: float, the weight of the keypoint estimation loss. + peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak + score locations in a neighborhood (independently for each keypoint + types). + unmatched_keypoint_score: The default score to use for regressed keypoints + that are not successfully snapped to a nearby candidate. + box_scale: The multiplier to expand the bounding boxes (either the + provided boxes or those which tightly cover the regressed keypoints). + candidate_search_scale: The scale parameter that multiplies the largest + dimension of a bounding box. The resulting distance becomes a search + radius for candidates in the vicinity of each regressed keypoint. + candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio'] + indicating how to select the keypoint candidate. + offset_peak_radius: The radius (in the unit of output pixel) around + groundtruth heatmap peak to assign the offset targets. If set 0, then + the offset target will only be assigned to the heatmap peak (same + behavior as the original paper). + per_keypoint_offset: A bool indicates whether to assign offsets for each + keypoint channel separately. If set False, the output offset target has + the shape [batch_size, out_height, out_width, 2] (same behavior as the + original paper). If set True, the output offset target has the shape + [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when + the offset_peak_radius is not zero). + + Returns: + An initialized KeypointEstimationParams namedtuple. + """ + return super(KeypointEstimationParams, cls).__new__( + cls, task_name, class_id, keypoint_indices, classification_loss, + localization_loss, keypoint_labels, keypoint_std_dev, + keypoint_heatmap_loss_weight, keypoint_offset_loss_weight, + keypoint_regression_loss_weight, keypoint_candidate_score_threshold, + heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight, + peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale, + candidate_search_scale, candidate_ranking_mode, offset_peak_radius, + per_keypoint_offset) + + +class ObjectCenterParams( + collections.namedtuple('ObjectCenterParams', [ + 'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init', + 'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes' + ])): + """Namedtuple to store object center prediction related parameters.""" + + __slots__ = () + + def __new__(cls, + classification_loss, + object_center_loss_weight, + heatmap_bias_init=-2.19, + min_box_overlap_iou=0.7, + max_box_predictions=100, + use_labeled_classes=False): + """Constructor with default values for ObjectCenterParams. + + Args: + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the class predictions in CenterNet. + object_center_loss_weight: float, The weight for the object center loss. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the object center prediction head. If set to None, the bias is + initialized with zeros. + min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes + need have with groundtruth boxes to not be penalized. This is used for + computing the class specific center heatmaps. + max_box_predictions: int, the maximum number of boxes to predict. + use_labeled_classes: boolean, compute the loss only labeled classes. + + Returns: + An initialized ObjectCenterParams namedtuple. + """ + return super(ObjectCenterParams, + cls).__new__(cls, classification_loss, + object_center_loss_weight, heatmap_bias_init, + min_box_overlap_iou, max_box_predictions, + use_labeled_classes) + + +class MaskParams( + collections.namedtuple('MaskParams', [ + 'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width', + 'score_threshold', 'heatmap_bias_init' + ])): + """Namedtuple to store mask prediction related parameters.""" + + __slots__ = () + + def __new__(cls, + classification_loss, + task_loss_weight=1.0, + mask_height=256, + mask_width=256, + score_threshold=0.5, + heatmap_bias_init=-2.19): + """Constructor with default values for MaskParams. + + Args: + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the semantic segmentation predictions in CenterNet. + task_loss_weight: float, The loss weight for the segmentation task. + mask_height: The height of the resized instance segmentation mask. + mask_width: The width of the resized instance segmentation mask. + score_threshold: The threshold at which to convert predicted mask + probabilities (after passing through sigmoid) into foreground pixels. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the semantic segmentation prediction head. If set to None, the + bias is initialized with zeros. + + Returns: + An initialized MaskParams namedtuple. + """ + return super(MaskParams, + cls).__new__(cls, classification_loss, + task_loss_weight, mask_height, mask_width, + score_threshold, heatmap_bias_init) + + +class DensePoseParams( + collections.namedtuple('DensePoseParams', [ + 'class_id', 'classification_loss', 'localization_loss', + 'part_loss_weight', 'coordinate_loss_weight', 'num_parts', + 'task_loss_weight', 'upsample_to_input_res', 'upsample_method', + 'heatmap_bias_init' + ])): + """Namedtuple to store DensePose prediction related parameters.""" + + __slots__ = () + + def __new__(cls, + class_id, + classification_loss, + localization_loss, + part_loss_weight=1.0, + coordinate_loss_weight=1.0, + num_parts=24, + task_loss_weight=1.0, + upsample_to_input_res=True, + upsample_method='bilinear', + heatmap_bias_init=-2.19): + """Constructor with default values for DensePoseParams. + + Args: + class_id: the ID of the class that contains the DensePose groundtruth. + This should typically correspond to the "person" class. Note that the ID + is 0-based, meaning that class 0 corresponds to the first non-background + object class. + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the body part predictions in CenterNet. + localization_loss: an object_detection.core.losses.Loss object to compute + the loss for the surface coordinate regression in CenterNet. + part_loss_weight: The loss weight to apply to part prediction. + coordinate_loss_weight: The loss weight to apply to surface coordinate + prediction. + num_parts: The number of DensePose parts to predict. + task_loss_weight: float, the loss weight for the DensePose task. + upsample_to_input_res: Whether to upsample the DensePose feature maps to + the input resolution before applying loss. Note that the prediction + outputs are still at the standard CenterNet output stride. + upsample_method: Method for upsampling DensePose feature maps. Options are + either 'bilinear' or 'nearest'). This takes no effect when + `upsample_to_input_res` is False. + heatmap_bias_init: float, the initial value of bias in the convolutional + kernel of the part prediction head. If set to None, the + bias is initialized with zeros. + + Returns: + An initialized DensePoseParams namedtuple. + """ + return super(DensePoseParams, + cls).__new__(cls, class_id, classification_loss, + localization_loss, part_loss_weight, + coordinate_loss_weight, num_parts, + task_loss_weight, upsample_to_input_res, + upsample_method, heatmap_bias_init) + + +class TrackParams( + collections.namedtuple('TrackParams', [ + 'num_track_ids', 'reid_embed_size', 'num_fc_layers', + 'classification_loss', 'task_loss_weight' + ])): + """Namedtuple to store tracking prediction related parameters.""" + + __slots__ = () + + def __new__(cls, + num_track_ids, + reid_embed_size, + num_fc_layers, + classification_loss, + task_loss_weight=1.0): + """Constructor with default values for TrackParams. + + Args: + num_track_ids: int. The maximum track ID in the dataset. Used for ReID + embedding classification task. + reid_embed_size: int. The embedding size for ReID task. + num_fc_layers: int. The number of (fully-connected, batch-norm, relu) + layers for track ID classification head. + classification_loss: an object_detection.core.losses.Loss object to + compute the loss for the ReID embedding in CenterNet. + task_loss_weight: float, the loss weight for the tracking task. + + Returns: + An initialized TrackParams namedtuple. + """ + return super(TrackParams, + cls).__new__(cls, num_track_ids, reid_embed_size, + num_fc_layers, classification_loss, + task_loss_weight) + + +class TemporalOffsetParams( + collections.namedtuple('TemporalOffsetParams', [ + 'localization_loss', 'task_loss_weight' + ])): + """Namedtuple to store temporal offset related parameters.""" + + __slots__ = () + + def __new__(cls, + localization_loss, + task_loss_weight=1.0): + """Constructor with default values for TrackParams. + + Args: + localization_loss: an object_detection.core.losses.Loss object to + compute the loss for the temporal offset in CenterNet. + task_loss_weight: float, the loss weight for the temporal offset + task. + + Returns: + An initialized TemporalOffsetParams namedtuple. + """ + return super(TemporalOffsetParams, + cls).__new__(cls, localization_loss, task_loss_weight) + +# The following constants are used to generate the keys of the +# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch +# class. +DETECTION_TASK = 'detection_task' +OBJECT_CENTER = 'object_center' +BOX_SCALE = 'box/scale' +BOX_OFFSET = 'box/offset' +KEYPOINT_REGRESSION = 'keypoint/regression' +KEYPOINT_HEATMAP = 'keypoint/heatmap' +KEYPOINT_OFFSET = 'keypoint/offset' +SEGMENTATION_TASK = 'segmentation_task' +SEGMENTATION_HEATMAP = 'segmentation/heatmap' +DENSEPOSE_TASK = 'densepose_task' +DENSEPOSE_HEATMAP = 'densepose/heatmap' +DENSEPOSE_REGRESSION = 'densepose/regression' +LOSS_KEY_PREFIX = 'Loss' +TRACK_TASK = 'track_task' +TRACK_REID = 'track/reid' +TEMPORALOFFSET_TASK = 'temporal_offset_task' +TEMPORAL_OFFSET = 'track/offset' + + +def get_keypoint_name(task_name, head_name): + return '%s/%s' % (task_name, head_name) + + +def get_num_instances_from_weights(groundtruth_weights_list): + """Computes the number of instances/boxes from the weights in a batch. + + Args: + groundtruth_weights_list: A list of float tensors with shape + [max_num_instances] representing whether there is an actual instance in + the image (with non-zero value) or is padded to match the + max_num_instances (with value 0.0). The list represents the batch + dimension. + + Returns: + A scalar integer tensor incidating how many instances/boxes are in the + images in the batch. Note that this function is usually used to normalize + the loss so the minimum return value is 1 to avoid weird behavior. + """ + num_instances = tf.reduce_sum( + [tf.math.count_nonzero(w) for w in groundtruth_weights_list]) + num_instances = tf.maximum(num_instances, 1) + return num_instances + + +class CenterNetMetaArch(model.DetectionModel): + """The CenterNet meta architecture [1]. + + [1]: https://arxiv.org/abs/1904.07850 + """ + + def __init__(self, + is_training, + add_summaries, + num_classes, + feature_extractor, + image_resizer_fn, + object_center_params, + object_detection_params=None, + keypoint_params_dict=None, + mask_params=None, + densepose_params=None, + track_params=None, + temporal_offset_params=None, + use_depthwise=False, + compute_heatmap_sparse=False): + """Initializes a CenterNet model. + + Args: + is_training: Set to True if this model is being built for training. + add_summaries: Whether to add tf summaries in the model. + num_classes: int, The number of classes that the model should predict. + feature_extractor: A CenterNetFeatureExtractor to use to extract features + from an image. + image_resizer_fn: a callable for image resizing. This callable always + takes a rank-3 image tensor (corresponding to a single image) and + returns a rank-3 image tensor, possibly with new spatial dimensions and + a 1-D tensor of shape [3] indicating shape of true image within the + resized image tensor as the resized image tensor could be padded. See + builders/image_resizer_builder.py. + object_center_params: An ObjectCenterParams namedtuple. This object holds + the hyper-parameters for object center prediction. This is required by + either object detection or keypoint estimation tasks. + object_detection_params: An ObjectDetectionParams namedtuple. This object + holds the hyper-parameters necessary for object detection. Please see + the class definition for more details. + keypoint_params_dict: A dictionary that maps from task name to the + corresponding KeypointEstimationParams namedtuple. This object holds the + hyper-parameters necessary for multiple keypoint estimations. Please + see the class definition for more details. + mask_params: A MaskParams namedtuple. This object + holds the hyper-parameters for segmentation. Please see the class + definition for more details. + densepose_params: A DensePoseParams namedtuple. This object holds the + hyper-parameters for DensePose prediction. Please see the class + definition for more details. Note that if this is provided, it is + expected that `mask_params` is also provided. + track_params: A TrackParams namedtuple. This object + holds the hyper-parameters for tracking. Please see the class + definition for more details. + temporal_offset_params: A TemporalOffsetParams namedtuple. This object + holds the hyper-parameters for offset prediction based tracking. + use_depthwise: If true, all task heads will be constructed using + separable_conv. Otherwise, standard convoltuions will be used. + compute_heatmap_sparse: bool, whether or not to use the sparse version of + the Op that computes the center heatmaps. The sparse version scales + better with number of channels in the heatmap, but in some cases is + known to cause an OOM error. See b/170989061. + """ + assert object_detection_params or keypoint_params_dict + # Shorten the name for convenience and better formatting. + self._is_training = is_training + # The Objects as Points paper attaches loss functions to multiple + # (`num_feature_outputs`) feature maps in the the backbone. E.g. + # for the hourglass backbone, `num_feature_outputs` is 2. + self._num_classes = num_classes + self._feature_extractor = feature_extractor + self._num_feature_outputs = feature_extractor.num_feature_outputs + self._stride = self._feature_extractor.out_stride + self._image_resizer_fn = image_resizer_fn + self._center_params = object_center_params + self._od_params = object_detection_params + self._kp_params_dict = keypoint_params_dict + self._mask_params = mask_params + if densepose_params is not None and mask_params is None: + raise ValueError('To run DensePose prediction, `mask_params` must also ' + 'be supplied.') + self._densepose_params = densepose_params + self._track_params = track_params + self._temporal_offset_params = temporal_offset_params + + self._use_depthwise = use_depthwise + self._compute_heatmap_sparse = compute_heatmap_sparse + + # Construct the prediction head nets. + self._prediction_head_dict = self._construct_prediction_heads( + num_classes, + self._num_feature_outputs, + class_prediction_bias_init=self._center_params.heatmap_bias_init) + # Initialize the target assigners. + self._target_assigner_dict = self._initialize_target_assigners( + stride=self._stride, + min_box_overlap_iou=self._center_params.min_box_overlap_iou) + + # Will be used in VOD single_frame_meta_arch for tensor reshape. + self._batched_prediction_tensor_names = [] + + super(CenterNetMetaArch, self).__init__(num_classes) + + @property + def batched_prediction_tensor_names(self): + if not self._batched_prediction_tensor_names: + raise RuntimeError('Must call predict() method to get batched prediction ' + 'tensor names.') + return self._batched_prediction_tensor_names + + def _construct_prediction_heads(self, num_classes, num_feature_outputs, + class_prediction_bias_init): + """Constructs the prediction heads based on the specific parameters. + + Args: + num_classes: An integer indicating how many classes in total to predict. + num_feature_outputs: An integer indicating how many feature outputs to use + for calculating the loss. The Objects as Points paper attaches loss + functions to multiple (`num_feature_outputs`) feature maps in the the + backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2. + class_prediction_bias_init: float, the initial value of bias in the + convolutional kernel of the class prediction head. If set to None, the + bias is initialized with zeros. + + Returns: + A dictionary of keras modules generated by calling make_prediction_net + function. It will also create and set a private member of the class when + learning the tracking task. + """ + prediction_heads = {} + prediction_heads[OBJECT_CENTER] = [ + make_prediction_net(num_classes, bias_fill=class_prediction_bias_init, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + if self._od_params is not None: + prediction_heads[BOX_SCALE] = [ + make_prediction_net( + NUM_SIZE_CHANNELS, use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + prediction_heads[BOX_OFFSET] = [ + make_prediction_net( + NUM_OFFSET_CHANNELS, use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + if self._kp_params_dict is not None: + for task_name, kp_params in self._kp_params_dict.items(): + num_keypoints = len(kp_params.keypoint_indices) + # pylint: disable=g-complex-comprehension + prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [ + make_prediction_net( + num_keypoints, + bias_fill=kp_params.heatmap_bias_init, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + # pylint: enable=g-complex-comprehension + prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [ + make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + if kp_params.per_keypoint_offset: + prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [ + make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + else: + prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [ + make_prediction_net(NUM_OFFSET_CHANNELS, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + # pylint: disable=g-complex-comprehension + if self._mask_params is not None: + prediction_heads[SEGMENTATION_HEATMAP] = [ + make_prediction_net( + num_classes, + bias_fill=self._mask_params.heatmap_bias_init, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs)] + if self._densepose_params is not None: + prediction_heads[DENSEPOSE_HEATMAP] = [ + make_prediction_net( + self._densepose_params.num_parts, + bias_fill=self._densepose_params.heatmap_bias_init, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs)] + prediction_heads[DENSEPOSE_REGRESSION] = [ + make_prediction_net(2 * self._densepose_params.num_parts, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + # pylint: enable=g-complex-comprehension + if self._track_params is not None: + prediction_heads[TRACK_REID] = [ + make_prediction_net(self._track_params.reid_embed_size, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs)] + + # Creates a classification network to train object embeddings by learning + # a projection from embedding space to object track ID space. + self.track_reid_classification_net = tf.keras.Sequential() + for _ in range(self._track_params.num_fc_layers - 1): + self.track_reid_classification_net.add( + tf.keras.layers.Dense(self._track_params.reid_embed_size, + input_shape=( + self._track_params.reid_embed_size,))) + self.track_reid_classification_net.add( + tf.keras.layers.BatchNormalization()) + self.track_reid_classification_net.add(tf.keras.layers.ReLU()) + self.track_reid_classification_net.add( + tf.keras.layers.Dense(self._track_params.num_track_ids, + input_shape=( + self._track_params.reid_embed_size,))) + if self._temporal_offset_params is not None: + prediction_heads[TEMPORAL_OFFSET] = [ + make_prediction_net(NUM_OFFSET_CHANNELS, + use_depthwise=self._use_depthwise) + for _ in range(num_feature_outputs) + ] + return prediction_heads + + def _initialize_target_assigners(self, stride, min_box_overlap_iou): + """Initializes the target assigners and puts them in a dictionary. + + Args: + stride: An integer indicating the stride of the image. + min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes + need have with groundtruth boxes to not be penalized. This is used for + computing the class specific center heatmaps. + + Returns: + A dictionary of initialized target assigners for each task. + """ + target_assigners = {} + target_assigners[OBJECT_CENTER] = ( + cn_assigner.CenterNetCenterHeatmapTargetAssigner( + stride, min_box_overlap_iou, self._compute_heatmap_sparse)) + if self._od_params is not None: + target_assigners[DETECTION_TASK] = ( + cn_assigner.CenterNetBoxTargetAssigner(stride)) + if self._kp_params_dict is not None: + for task_name, kp_params in self._kp_params_dict.items(): + target_assigners[task_name] = ( + cn_assigner.CenterNetKeypointTargetAssigner( + stride=stride, + class_id=kp_params.class_id, + keypoint_indices=kp_params.keypoint_indices, + keypoint_std_dev=kp_params.keypoint_std_dev, + peak_radius=kp_params.offset_peak_radius, + per_keypoint_offset=kp_params.per_keypoint_offset, + compute_heatmap_sparse=self._compute_heatmap_sparse)) + if self._mask_params is not None: + target_assigners[SEGMENTATION_TASK] = ( + cn_assigner.CenterNetMaskTargetAssigner(stride)) + if self._densepose_params is not None: + dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride + target_assigners[DENSEPOSE_TASK] = ( + cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride)) + if self._track_params is not None: + target_assigners[TRACK_TASK] = ( + cn_assigner.CenterNetTrackTargetAssigner( + stride, self._track_params.num_track_ids)) + if self._temporal_offset_params is not None: + target_assigners[TEMPORALOFFSET_TASK] = ( + cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride)) + + return target_assigners + + def _compute_object_center_loss(self, input_height, input_width, + object_center_predictions, per_pixel_weights): + """Computes the object center loss. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + object_center_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, num_classes] representing the object center + feature maps. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A float scalar tensor representing the object center loss per instance. + """ + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + + if self._center_params.use_only_known_classes: + gt_labeled_classes_list = self.groundtruth_lists( + fields.InputDataFields.groundtruth_labeled_classes) + batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0) + batch_labeled_classes_shape = tf.shape(batch_labeled_classes) + batch_labeled_classes = tf.reshape( + batch_labeled_classes, + [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]]) + per_pixel_weights = per_pixel_weights * batch_labeled_classes + + # Convert the groundtruth to targets. + assigner = self._target_assigner_dict[OBJECT_CENTER] + heatmap_targets = assigner.assign_center_targets_from_boxes( + height=input_height, + width=input_width, + gt_boxes_list=gt_boxes_list, + gt_classes_list=gt_classes_list, + gt_weights_list=gt_weights_list) + + flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) + num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) + + loss = 0.0 + object_center_loss = self._center_params.classification_loss + # Loop through each feature output head. + for pred in object_center_predictions: + pred = _flatten_spatial_dimensions(pred) + loss += object_center_loss( + pred, flattened_heatmap_targets, weights=per_pixel_weights) + loss_per_instance = tf.reduce_sum(loss) / ( + float(len(object_center_predictions)) * num_boxes) + return loss_per_instance + + def _compute_object_detection_losses(self, input_height, input_width, + prediction_dict, per_pixel_weights): + """Computes the weighted object detection losses. + + This wrapper function calls the function which computes the losses for + object detection task and applies corresponding weights to the losses. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + prediction_dict: A dictionary holding predicted tensors output by + "predict" function. See "predict" function for more detailed + description. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A dictionary of scalar float tensors representing the weighted losses for + object detection task: + BOX_SCALE: the weighted scale (height/width) loss. + BOX_OFFSET: the weighted object offset loss. + """ + od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss( + scale_predictions=prediction_dict[BOX_SCALE], + offset_predictions=prediction_dict[BOX_OFFSET], + input_height=input_height, + input_width=input_width) + loss_dict = {} + loss_dict[BOX_SCALE] = ( + self._od_params.scale_loss_weight * od_scale_loss) + loss_dict[BOX_OFFSET] = ( + self._od_params.offset_loss_weight * od_offset_loss) + return loss_dict + + def _compute_box_scale_and_offset_loss(self, input_height, input_width, + scale_predictions, offset_predictions): + """Computes the scale loss of the object detection task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + scale_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2] representing the prediction heads of the model + for object scale (i.e height and width). + offset_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2] representing the prediction heads of the model + for object offset. + + Returns: + A tuple of two losses: + scale_loss: A float scalar tensor representing the object height/width + loss normalized by total number of boxes. + offset_loss: A float scalar tensor representing the object offset loss + normalized by total number of boxes + """ + # TODO(vighneshb) Explore a size invariant version of scale loss. + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) + num_predictions = float(len(scale_predictions)) + + assigner = self._target_assigner_dict[DETECTION_TASK] + (batch_indices, batch_height_width_targets, batch_offset_targets, + batch_weights) = assigner.assign_size_and_offset_targets( + height=input_height, + width=input_width, + gt_boxes_list=gt_boxes_list, + gt_weights_list=gt_weights_list) + batch_weights = tf.expand_dims(batch_weights, -1) + + scale_loss = 0 + offset_loss = 0 + localization_loss_fn = self._od_params.localization_loss + for scale_pred, offset_pred in zip(scale_predictions, offset_predictions): + # Compute the scale loss. + scale_pred = cn_assigner.get_batch_predictions_from_indices( + scale_pred, batch_indices) + scale_loss += localization_loss_fn( + scale_pred, batch_height_width_targets, weights=batch_weights) + # Compute the offset loss. + offset_pred = cn_assigner.get_batch_predictions_from_indices( + offset_pred, batch_indices) + offset_loss += localization_loss_fn( + offset_pred, batch_offset_targets, weights=batch_weights) + scale_loss = tf.reduce_sum(scale_loss) / ( + num_predictions * num_boxes) + offset_loss = tf.reduce_sum(offset_loss) / ( + num_predictions * num_boxes) + return scale_loss, offset_loss + + def _compute_keypoint_estimation_losses(self, task_name, input_height, + input_width, prediction_dict, + per_pixel_weights): + """Computes the weighted keypoint losses.""" + kp_params = self._kp_params_dict[task_name] + heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP) + offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET) + regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION) + heatmap_loss = self._compute_kp_heatmap_loss( + input_height=input_height, + input_width=input_width, + task_name=task_name, + heatmap_predictions=prediction_dict[heatmap_key], + classification_loss_fn=kp_params.classification_loss, + per_pixel_weights=per_pixel_weights) + offset_loss = self._compute_kp_offset_loss( + input_height=input_height, + input_width=input_width, + task_name=task_name, + offset_predictions=prediction_dict[offset_key], + localization_loss_fn=kp_params.localization_loss) + reg_loss = self._compute_kp_regression_loss( + input_height=input_height, + input_width=input_width, + task_name=task_name, + regression_predictions=prediction_dict[regression_key], + localization_loss_fn=kp_params.localization_loss) + + loss_dict = {} + loss_dict[heatmap_key] = ( + kp_params.keypoint_heatmap_loss_weight * heatmap_loss) + loss_dict[offset_key] = ( + kp_params.keypoint_offset_loss_weight * offset_loss) + loss_dict[regression_key] = ( + kp_params.keypoint_regression_loss_weight * reg_loss) + return loss_dict + + def _compute_kp_heatmap_loss(self, input_height, input_width, task_name, + heatmap_predictions, classification_loss_fn, + per_pixel_weights): + """Computes the heatmap loss of the keypoint estimation task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + task_name: A string representing the name of the keypoint task. + heatmap_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, num_keypoints] representing the prediction heads + of the model for keypoint heatmap. + classification_loss_fn: An object_detection.core.losses.Loss object to + compute the loss for the class predictions in CenterNet. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + loss: A float scalar tensor representing the object keypoint heatmap loss + normalized by number of instances. + """ + gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + + assigner = self._target_assigner_dict[task_name] + (keypoint_heatmap, num_instances_per_kp_type, + valid_mask_batch) = assigner.assign_keypoint_heatmap_targets( + height=input_height, + width=input_width, + gt_keypoints_list=gt_keypoints_list, + gt_weights_list=gt_weights_list, + gt_classes_list=gt_classes_list, + gt_boxes_list=gt_boxes_list) + flattened_valid_mask = _flatten_spatial_dimensions( + tf.expand_dims(valid_mask_batch, axis=-1)) + flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap) + # Sum over the number of instances per keypoint types to get the total + # number of keypoints. Note that this is used to normalized the loss and we + # keep the minimum value to be 1 to avoid generating weird loss value when + # no keypoint is in the image batch. + num_instances = tf.maximum( + tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32), + 1.0) + loss = 0.0 + # Loop through each feature output head. + for pred in heatmap_predictions: + pred = _flatten_spatial_dimensions(pred) + unweighted_loss = classification_loss_fn( + pred, + flattened_heapmap_targets, + weights=tf.ones_like(per_pixel_weights)) + # Apply the weights after the loss function to have full control over it. + loss += unweighted_loss * per_pixel_weights * flattened_valid_mask + loss = tf.reduce_sum(loss) / ( + float(len(heatmap_predictions)) * num_instances) + return loss + + def _compute_kp_offset_loss(self, input_height, input_width, task_name, + offset_predictions, localization_loss_fn): + """Computes the offset loss of the keypoint estimation task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + task_name: A string representing the name of the keypoint task. + offset_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2] representing the prediction heads of the model + for keypoint offset. + localization_loss_fn: An object_detection.core.losses.Loss object to + compute the loss for the keypoint offset predictions in CenterNet. + + Returns: + loss: A float scalar tensor representing the keypoint offset loss + normalized by number of total keypoints. + """ + gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + + assigner = self._target_assigner_dict[task_name] + (batch_indices, batch_offsets, + batch_weights) = assigner.assign_keypoints_offset_targets( + height=input_height, + width=input_width, + gt_keypoints_list=gt_keypoints_list, + gt_weights_list=gt_weights_list, + gt_classes_list=gt_classes_list) + + # Keypoint offset loss. + loss = 0.0 + for prediction in offset_predictions: + batch_size, out_height, out_width, channels = _get_shape(prediction, 4) + if channels > 2: + prediction = tf.reshape( + prediction, shape=[batch_size, out_height, out_width, -1, 2]) + prediction = cn_assigner.get_batch_predictions_from_indices( + prediction, batch_indices) + # The dimensions passed are not as per the doc string but the loss + # still computes the correct value. + unweighted_loss = localization_loss_fn( + prediction, + batch_offsets, + weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) + # Apply the weights after the loss function to have full control over it. + loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) + + loss = tf.reduce_sum(loss) / ( + float(len(offset_predictions)) * + tf.maximum(tf.reduce_sum(batch_weights), 1.0)) + return loss + + def _compute_kp_regression_loss(self, input_height, input_width, task_name, + regression_predictions, localization_loss_fn): + """Computes the keypoint regression loss of the keypoint estimation task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + task_name: A string representing the name of the keypoint task. + regression_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2 * num_keypoints] representing the prediction + heads of the model for keypoint regression offset. + localization_loss_fn: An object_detection.core.losses.Loss object to + compute the loss for the keypoint regression offset predictions in + CenterNet. + + Returns: + loss: A float scalar tensor representing the keypoint regression offset + loss normalized by number of total keypoints. + """ + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + # keypoint regression offset loss. + assigner = self._target_assigner_dict[task_name] + (batch_indices, batch_regression_offsets, + batch_weights) = assigner.assign_joint_regression_targets( + height=input_height, + width=input_width, + gt_keypoints_list=gt_keypoints_list, + gt_classes_list=gt_classes_list, + gt_weights_list=gt_weights_list, + gt_boxes_list=gt_boxes_list) + + loss = 0.0 + for prediction in regression_predictions: + batch_size, out_height, out_width, _ = _get_shape(prediction, 4) + reshaped_prediction = tf.reshape( + prediction, shape=[batch_size, out_height, out_width, -1, 2]) + reg_prediction = cn_assigner.get_batch_predictions_from_indices( + reshaped_prediction, batch_indices) + unweighted_loss = localization_loss_fn( + reg_prediction, + batch_regression_offsets, + weights=tf.expand_dims(tf.ones_like(batch_weights), -1)) + # Apply the weights after the loss function to have full control over it. + loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1) + + loss = tf.reduce_sum(loss) / ( + float(len(regression_predictions)) * + tf.maximum(tf.reduce_sum(batch_weights), 1.0)) + return loss + + def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights): + """Computes all the losses associated with segmentation. + + Args: + prediction_dict: The dictionary returned from the predict() method. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A dictionary with segmentation losses. + """ + segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP] + mask_loss = self._compute_mask_loss( + segmentation_heatmap, per_pixel_weights) + losses = { + SEGMENTATION_HEATMAP: mask_loss + } + return losses + + def _compute_mask_loss(self, segmentation_predictions, + per_pixel_weights): + """Computes the mask loss. + + Args: + segmentation_predictions: A list of float32 tensors of shape [batch_size, + out_height, out_width, num_classes]. + per_pixel_weights: A float tensor of shape [batch_size, + out_height * out_width, 1] with 1s in locations where the spatial + coordinates fall within the height and width in true_image_shapes. + + Returns: + A float scalar tensor representing the mask loss. + """ + gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks) + gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes) + + # Convert the groundtruth to targets. + assigner = self._target_assigner_dict[SEGMENTATION_TASK] + heatmap_targets = assigner.assign_segmentation_targets( + gt_masks_list=gt_masks_list, + gt_classes_list=gt_classes_list) + + flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets) + + loss = 0.0 + mask_loss_fn = self._mask_params.classification_loss + total_pixels_in_loss = tf.reduce_sum(per_pixel_weights) + + # Loop through each feature output head. + for pred in segmentation_predictions: + pred = _flatten_spatial_dimensions(pred) + loss += mask_loss_fn( + pred, flattened_heatmap_targets, weights=per_pixel_weights) + # TODO(ronnyvotel): Consider other ways to normalize loss. + total_loss = tf.reduce_sum(loss) / ( + float(len(segmentation_predictions)) * total_pixels_in_loss) + return total_loss + + def _compute_densepose_losses(self, input_height, input_width, + prediction_dict): + """Computes the weighted DensePose losses. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + prediction_dict: A dictionary holding predicted tensors output by the + "predict" function. See the "predict" function for more detailed + description. + + Returns: + A dictionary of scalar float tensors representing the weighted losses for + the DensePose task: + DENSEPOSE_HEATMAP: the weighted part segmentation loss. + DENSEPOSE_REGRESSION: the weighted part surface coordinate loss. + """ + dp_heatmap_loss, dp_regression_loss = ( + self._compute_densepose_part_and_coordinate_losses( + input_height=input_height, + input_width=input_width, + part_predictions=prediction_dict[DENSEPOSE_HEATMAP], + surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION])) + loss_dict = {} + loss_dict[DENSEPOSE_HEATMAP] = ( + self._densepose_params.part_loss_weight * dp_heatmap_loss) + loss_dict[DENSEPOSE_REGRESSION] = ( + self._densepose_params.coordinate_loss_weight * dp_regression_loss) + return loss_dict + + def _compute_densepose_part_and_coordinate_losses( + self, input_height, input_width, part_predictions, + surface_coord_predictions): + """Computes the individual losses for the DensePose task. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + part_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, num_parts]. + surface_coord_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, 2 * num_parts]. + + Returns: + A tuple with two scalar loss tensors: part_prediction_loss and + surface_coord_loss. + """ + gt_dp_num_points_list = self.groundtruth_lists( + fields.BoxListFields.densepose_num_points) + gt_dp_part_ids_list = self.groundtruth_lists( + fields.BoxListFields.densepose_part_ids) + gt_dp_surface_coords_list = self.groundtruth_lists( + fields.BoxListFields.densepose_surface_coords) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + + assigner = self._target_assigner_dict[DENSEPOSE_TASK] + batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( + assigner.assign_part_and_coordinate_targets( + height=input_height, + width=input_width, + gt_dp_num_points_list=gt_dp_num_points_list, + gt_dp_part_ids_list=gt_dp_part_ids_list, + gt_dp_surface_coords_list=gt_dp_surface_coords_list, + gt_weights_list=gt_weights_list)) + + part_prediction_loss = 0 + surface_coord_loss = 0 + classification_loss_fn = self._densepose_params.classification_loss + localization_loss_fn = self._densepose_params.localization_loss + num_predictions = float(len(part_predictions)) + num_valid_points = tf.math.count_nonzero(batch_weights) + num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32) + for part_pred, surface_coord_pred in zip(part_predictions, + surface_coord_predictions): + # Potentially upsample the feature maps, so that better quality (i.e. + # higher res) groundtruth can be applied. + if self._densepose_params.upsample_to_input_res: + part_pred = tf.keras.layers.UpSampling2D( + self._stride, interpolation=self._densepose_params.upsample_method)( + part_pred) + surface_coord_pred = tf.keras.layers.UpSampling2D( + self._stride, interpolation=self._densepose_params.upsample_method)( + surface_coord_pred) + # Compute the part prediction loss. + part_pred = cn_assigner.get_batch_predictions_from_indices( + part_pred, batch_indices[:, 0:3]) + part_prediction_loss += classification_loss_fn( + part_pred[:, tf.newaxis, :], + batch_part_ids[:, tf.newaxis, :], + weights=batch_weights[:, tf.newaxis, tf.newaxis]) + # Compute the surface coordinate loss. + batch_size, out_height, out_width, _ = _get_shape( + surface_coord_pred, 4) + surface_coord_pred = tf.reshape( + surface_coord_pred, [batch_size, out_height, out_width, -1, 2]) + surface_coord_pred = cn_assigner.get_batch_predictions_from_indices( + surface_coord_pred, batch_indices) + surface_coord_loss += localization_loss_fn( + surface_coord_pred, + batch_surface_coords, + weights=batch_weights[:, tf.newaxis]) + part_prediction_loss = tf.reduce_sum(part_prediction_loss) / ( + num_predictions * num_valid_points) + surface_coord_loss = tf.reduce_sum(surface_coord_loss) / ( + num_predictions * num_valid_points) + return part_prediction_loss, surface_coord_loss + + def _compute_track_losses(self, input_height, input_width, prediction_dict): + """Computes all the losses associated with tracking. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + prediction_dict: The dictionary returned from the predict() method. + + Returns: + A dictionary with tracking losses. + """ + object_reid_predictions = prediction_dict[TRACK_REID] + embedding_loss = self._compute_track_embedding_loss( + input_height=input_height, + input_width=input_width, + object_reid_predictions=object_reid_predictions) + losses = { + TRACK_REID: embedding_loss + } + return losses + + def _compute_track_embedding_loss(self, input_height, input_width, + object_reid_predictions): + """Computes the object ReID loss. + + The embedding is trained as a classification task where the target is the + ID of each track among all tracks in the whole dataset. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + object_reid_predictions: A list of float tensors of shape [batch_size, + out_height, out_width, reid_embed_size] representing the object + embedding feature maps. + + Returns: + A float scalar tensor representing the object ReID loss per instance. + """ + gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids) + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list)) + + # Convert the groundtruth to targets. + assigner = self._target_assigner_dict[TRACK_TASK] + batch_indices, batch_weights, track_targets = assigner.assign_track_targets( + height=input_height, + width=input_width, + gt_track_ids_list=gt_track_ids_list, + gt_boxes_list=gt_boxes_list, + gt_weights_list=gt_weights_list) + batch_weights = tf.expand_dims(batch_weights, -1) + + loss = 0.0 + object_reid_loss = self._track_params.classification_loss + # Loop through each feature output head. + for pred in object_reid_predictions: + embedding_pred = cn_assigner.get_batch_predictions_from_indices( + pred, batch_indices) + + reid_classification = self.track_reid_classification_net(embedding_pred) + + loss += object_reid_loss( + reid_classification, track_targets, weights=batch_weights) + + loss_per_instance = tf.reduce_sum(loss) / ( + float(len(object_reid_predictions)) * num_boxes) + + return loss_per_instance + + def _compute_temporal_offset_loss(self, input_height, + input_width, prediction_dict): + """Computes the temporal offset loss for tracking. + + Args: + input_height: An integer scalar tensor representing input image height. + input_width: An integer scalar tensor representing input image width. + prediction_dict: The dictionary returned from the predict() method. + + Returns: + A dictionary with track/temporal_offset losses. + """ + gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes) + gt_offsets_list = self.groundtruth_lists( + fields.BoxListFields.temporal_offsets) + gt_match_list = self.groundtruth_lists( + fields.BoxListFields.track_match_flags) + gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights) + num_boxes = tf.cast( + get_num_instances_from_weights(gt_weights_list), tf.float32) + + offset_predictions = prediction_dict[TEMPORAL_OFFSET] + num_predictions = float(len(offset_predictions)) + + assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK] + (batch_indices, batch_offset_targets, + batch_weights) = assigner.assign_temporal_offset_targets( + height=input_height, + width=input_width, + gt_boxes_list=gt_boxes_list, + gt_offsets_list=gt_offsets_list, + gt_match_list=gt_match_list, + gt_weights_list=gt_weights_list) + batch_weights = tf.expand_dims(batch_weights, -1) + + offset_loss_fn = self._temporal_offset_params.localization_loss + loss_dict = {} + offset_loss = 0 + for offset_pred in offset_predictions: + offset_pred = cn_assigner.get_batch_predictions_from_indices( + offset_pred, batch_indices) + offset_loss += offset_loss_fn(offset_pred[:, None], + batch_offset_targets[:, None], + weights=batch_weights) + offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes) + loss_dict[TEMPORAL_OFFSET] = offset_loss + return loss_dict + + def preprocess(self, inputs): + outputs = shape_utils.resize_images_and_return_shapes( + inputs, self._image_resizer_fn) + resized_inputs, true_image_shapes = outputs + + return (self._feature_extractor.preprocess(resized_inputs), + true_image_shapes) + + def predict(self, preprocessed_inputs, _): + """Predicts CenterNet prediction tensors given an input batch. + + Feature extractors are free to produce predictions from multiple feature + maps and therefore we return a dictionary mapping strings to lists. + E.g. the hourglass backbone produces two feature maps. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float32 tensor + representing a batch of images. + + Returns: + prediction_dict: a dictionary holding predicted tensors with + 'preprocessed_inputs' - The input image after being resized and + preprocessed by the feature extractor. + 'object_center' - A list of size num_feature_outputs containing + float tensors of size [batch_size, output_height, output_width, + num_classes] representing the predicted object center heatmap logits. + 'box/scale' - [optional] A list of size num_feature_outputs holding + float tensors of size [batch_size, output_height, output_width, 2] + representing the predicted box height and width at each output + location. This field exists only when object detection task is + specified. + 'box/offset' - [optional] A list of size num_feature_outputs holding + float tensors of size [batch_size, output_height, output_width, 2] + representing the predicted y and x offsets at each output location. + '$TASK_NAME/keypoint_heatmap' - [optional] A list of size + num_feature_outputs holding float tensors of size [batch_size, + output_height, output_width, num_keypoints] representing the predicted + keypoint heatmap logits. + '$TASK_NAME/keypoint_offset' - [optional] A list of size + num_feature_outputs holding float tensors of size [batch_size, + output_height, output_width, 2] representing the predicted keypoint + offsets at each output location. + '$TASK_NAME/keypoint_regression' - [optional] A list of size + num_feature_outputs holding float tensors of size [batch_size, + output_height, output_width, 2 * num_keypoints] representing the + predicted keypoint regression at each output location. + 'segmentation/heatmap' - [optional] A list of size num_feature_outputs + holding float tensors of size [batch_size, output_height, + output_width, num_classes] representing the mask logits. + 'densepose/heatmap' - [optional] A list of size num_feature_outputs + holding float tensors of size [batch_size, output_height, + output_width, num_parts] representing the mask logits for each part. + 'densepose/regression' - [optional] A list of size num_feature_outputs + holding float tensors of size [batch_size, output_height, + output_width, 2 * num_parts] representing the DensePose surface + coordinate predictions. + Note the $TASK_NAME is provided by the KeypointEstimation namedtuple + used to differentiate between different keypoint tasks. + """ + features_list = self._feature_extractor(preprocessed_inputs) + + predictions = {} + for head_name, heads in self._prediction_head_dict.items(): + predictions[head_name] = [ + head(feature) for (feature, head) in zip(features_list, heads) + ] + predictions['preprocessed_inputs'] = preprocessed_inputs + + self._batched_prediction_tensor_names = predictions.keys() + return predictions + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Computes scalar loss tensors with respect to provided groundtruth. + + This function implements the various CenterNet losses. + + Args: + prediction_dict: a dictionary holding predicted tensors returned by + "predict" function. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is of + the form [height, width, channels] indicating the shapes of true images + in the resized images, as resized images can be padded with zeros. + scope: Optional scope name. + + Returns: + A dictionary mapping the keys [ + 'Loss/object_center', + 'Loss/box/scale', (optional) + 'Loss/box/offset', (optional) + 'Loss/$TASK_NAME/keypoint/heatmap', (optional) + 'Loss/$TASK_NAME/keypoint/offset', (optional) + 'Loss/$TASK_NAME/keypoint/regression', (optional) + 'Loss/segmentation/heatmap', (optional) + 'Loss/densepose/heatmap', (optional) + 'Loss/densepose/regression', (optional) + 'Loss/track/reid'] (optional) + 'Loss/track/offset'] (optional) + scalar tensors corresponding to the losses for different tasks. Note the + $TASK_NAME is provided by the KeypointEstimation namedtuple used to + differentiate between different keypoint tasks. + """ + + _, input_height, input_width, _ = _get_shape( + prediction_dict['preprocessed_inputs'], 4) + + output_height, output_width = (input_height // self._stride, + input_width // self._stride) + + # TODO(vighneshb) Explore whether using floor here is safe. + output_true_image_shapes = tf.ceil( + tf.to_float(true_image_shapes) / self._stride) + valid_anchor_weights = get_valid_anchor_weights_in_flattened_image( + output_true_image_shapes, output_height, output_width) + valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2) + + object_center_loss = self._compute_object_center_loss( + object_center_predictions=prediction_dict[OBJECT_CENTER], + input_height=input_height, + input_width=input_width, + per_pixel_weights=valid_anchor_weights) + losses = { + OBJECT_CENTER: + self._center_params.object_center_loss_weight * object_center_loss + } + if self._od_params is not None: + od_losses = self._compute_object_detection_losses( + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict, + per_pixel_weights=valid_anchor_weights) + for key in od_losses: + od_losses[key] = od_losses[key] * self._od_params.task_loss_weight + losses.update(od_losses) + + if self._kp_params_dict is not None: + for task_name, params in self._kp_params_dict.items(): + kp_losses = self._compute_keypoint_estimation_losses( + task_name=task_name, + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict, + per_pixel_weights=valid_anchor_weights) + for key in kp_losses: + kp_losses[key] = kp_losses[key] * params.task_loss_weight + losses.update(kp_losses) + + if self._mask_params is not None: + seg_losses = self._compute_segmentation_losses( + prediction_dict=prediction_dict, + per_pixel_weights=valid_anchor_weights) + for key in seg_losses: + seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight + losses.update(seg_losses) + + if self._densepose_params is not None: + densepose_losses = self._compute_densepose_losses( + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict) + for key in densepose_losses: + densepose_losses[key] = ( + densepose_losses[key] * self._densepose_params.task_loss_weight) + losses.update(densepose_losses) + + if self._track_params is not None: + track_losses = self._compute_track_losses( + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict) + for key in track_losses: + track_losses[key] = ( + track_losses[key] * self._track_params.task_loss_weight) + losses.update(track_losses) + + if self._temporal_offset_params is not None: + offset_losses = self._compute_temporal_offset_loss( + input_height=input_height, + input_width=input_width, + prediction_dict=prediction_dict) + for key in offset_losses: + offset_losses[key] = ( + offset_losses[key] * self._temporal_offset_params.task_loss_weight) + losses.update(offset_losses) + + # Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the + # losses will be grouped together in Tensorboard. + return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val) + for key, val in losses.items()]) + + def postprocess(self, prediction_dict, true_image_shapes, **params): + """Produces boxes given a prediction dict returned by predict(). + + Although predict returns a list of tensors, only the last tensor in + each list is used for making box predictions. + + Args: + prediction_dict: a dictionary holding predicted tensors from "predict" + function. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is of + the form [height, width, channels] indicating the shapes of true images + in the resized images, as resized images can be padded with zeros. + **params: Currently ignored. + + Returns: + detections: a dictionary containing the following fields + detection_boxes - A tensor of shape [batch, max_detections, 4] + holding the predicted boxes. + detection_boxes_strided: A tensor of shape [batch_size, num_detections, + 4] holding the predicted boxes in absolute coordinates of the + feature extractor's final layer output. + detection_scores: A tensor of shape [batch, max_detections] holding + the predicted score for each box. + detection_multiclass_scores: A tensor of shape [batch, max_detection, + num_classes] holding multiclass score for each box. + detection_classes: An integer tensor of shape [batch, max_detections] + containing the detected class for each box. + num_detections: An integer tensor of shape [batch] containing the + number of detected boxes for each sample in the batch. + detection_keypoints: (Optional) A float tensor of shape [batch, + max_detections, num_keypoints, 2] with normalized keypoints. Any + invalid keypoints have their coordinates and scores set to 0.0. + detection_keypoint_scores: (Optional) A float tensor of shape [batch, + max_detection, num_keypoints] with scores for each keypoint. + detection_masks: (Optional) A uint8 tensor of shape [batch, + max_detections, mask_height, mask_width] with masks for each + detection. Background is specified with 0, and foreground is specified + with positive integers (1 for standard instance segmentation mask, and + 1-indexed parts for DensePose task). + detection_surface_coords: (Optional) A float32 tensor of shape [batch, + max_detection, mask_height, mask_width, 2] with DensePose surface + coordinates, in (v, u) format. + detection_embeddings: (Optional) A float tensor of shape [batch, + max_detections, reid_embed_size] containing object embeddings. + """ + object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1]) + # Get x, y and channel indices corresponding to the top indices in the class + # center predictions. + detection_scores, y_indices, x_indices, channel_indices = ( + top_k_feature_map_locations( + object_center_prob, max_pool_kernel_size=3, + k=self._center_params.max_box_predictions)) + multiclass_scores = tf.gather_nd( + object_center_prob, tf.stack([y_indices, x_indices], -1), batch_dims=1) + boxes_strided, classes, scores, num_detections = ( + prediction_tensors_to_boxes( + detection_scores, y_indices, x_indices, channel_indices, + prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1])) + + boxes = convert_strided_predictions_to_normalized_boxes( + boxes_strided, self._stride, true_image_shapes) + + postprocess_dict = { + fields.DetectionResultFields.detection_boxes: boxes, + fields.DetectionResultFields.detection_scores: scores, + fields.DetectionResultFields.detection_multiclass_scores: + multiclass_scores, + fields.DetectionResultFields.detection_classes: classes, + fields.DetectionResultFields.num_detections: num_detections, + 'detection_boxes_strided': boxes_strided + } + + if self._kp_params_dict: + # If the model is trained to predict only one class of object and its + # keypoint, we fall back to a simpler postprocessing function which uses + # the ops that are supported by tf.lite on GPU. + if len(self._kp_params_dict) == 1 and self._num_classes == 1: + # keypoints, keypoint_scores = self._postprocess_keypoints_simple( + # prediction_dict, classes, y_indices, x_indices, + # boxes_strided, num_detections) + keypoints, keypoint_scores = self._postprocess_keypoints_simple( + prediction_dict, classes, y_indices, x_indices, + boxes_strided, num_detections) + # The map_fn used to clip out of frame keypoints creates issues when + # converting to tf.lite model so we disable it and let the users to + # handle those out of frame keypoints. + keypoints, keypoint_scores = ( + convert_strided_predictions_to_normalized_keypoints( + keypoints, keypoint_scores, self._stride, true_image_shapes, + clip_out_of_frame_keypoints=False)) + else: + keypoints, keypoint_scores = self._postprocess_keypoints( + prediction_dict, classes, y_indices, x_indices, + boxes_strided, num_detections) + keypoints, keypoint_scores = ( + convert_strided_predictions_to_normalized_keypoints( + keypoints, keypoint_scores, self._stride, true_image_shapes, + clip_out_of_frame_keypoints=True)) + postprocess_dict.update({ + fields.DetectionResultFields.detection_keypoints: keypoints, + fields.DetectionResultFields.detection_keypoint_scores: + keypoint_scores + }) + + if self._mask_params: + masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1]) + densepose_part_heatmap, densepose_surface_coords = None, None + densepose_class_index = 0 + if self._densepose_params: + densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1] + densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1] + densepose_class_index = self._densepose_params.class_id + instance_masks, surface_coords = ( + convert_strided_predictions_to_instance_masks( + boxes, classes, masks, true_image_shapes, + densepose_part_heatmap, densepose_surface_coords, + stride=self._stride, mask_height=self._mask_params.mask_height, + mask_width=self._mask_params.mask_width, + score_threshold=self._mask_params.score_threshold, + densepose_class_index=densepose_class_index)) + postprocess_dict[ + fields.DetectionResultFields.detection_masks] = instance_masks + if self._densepose_params: + postprocess_dict[ + fields.DetectionResultFields.detection_surface_coords] = ( + surface_coords) + + if self._track_params: + embeddings = self._postprocess_embeddings(prediction_dict, + y_indices, x_indices) + postprocess_dict.update({ + fields.DetectionResultFields.detection_embeddings: embeddings + }) + + if self._temporal_offset_params: + offsets = prediction_tensors_to_temporal_offsets( + y_indices, x_indices, + prediction_dict[TEMPORAL_OFFSET][-1]) + postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets + + return postprocess_dict + + def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices): + """Performs postprocessing on embedding predictions. + + Args: + prediction_dict: a dictionary holding predicted tensors, returned from the + predict() method. This dictionary should contain embedding prediction + feature maps for tracking task. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + + Returns: + embeddings: A [batch_size, max_detection, reid_embed_size] float32 + tensor with L2 normalized embeddings extracted from detection box + centers. + """ + embedding_predictions = prediction_dict[TRACK_REID][-1] + embeddings = predicted_embeddings_at_object_centers( + embedding_predictions, y_indices, x_indices) + embeddings, _ = tf.linalg.normalize(embeddings, axis=-1) + + return embeddings + + def _postprocess_keypoints(self, prediction_dict, classes, y_indices, + x_indices, boxes, num_detections): + """Performs postprocessing on keypoint predictions. + + Args: + prediction_dict: a dictionary holding predicted tensors, returned from the + predict() method. This dictionary should contain keypoint prediction + feature maps for each keypoint task. + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + boxes: A [batch_size, max_detections, 4] float32 tensor with bounding + boxes in (un-normalized) output space. + num_detections: A [batch_size] int tensor with the number of valid + detections for each image. + + Returns: + A tuple of + keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 + tensor with keypoints in the output (strided) coordinate frame. + keypoint_scores: a [batch_size, max_detections, num_total_keypoints] + float32 tensor with keypoint scores. + """ + total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict + in self._kp_params_dict.values()) + batch_size, max_detections, _ = _get_shape(boxes, 3) + kpt_coords_for_example_list = [] + kpt_scores_for_example_list = [] + for ex_ind in range(batch_size): + kpt_coords_for_class_list = [] + kpt_scores_for_class_list = [] + instance_inds_for_class_list = [] + for task_name, kp_params in self._kp_params_dict.items(): + keypoint_heatmap = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] + keypoint_offsets = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] + keypoint_regression = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] + instance_inds = self._get_instance_indices( + classes, num_detections, ex_ind, kp_params.class_id) + num_ind = _get_shape(instance_inds, 1) + + def true_fn( + keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, instance_inds, + ex_ind, kp_params): + """Logics to execute when instance_inds is not an empty set.""" + # Postprocess keypoints and scores for class and single image. Shapes + # are [1, num_instances_i, num_keypoints_i, 2] and + # [1, num_instances_i, num_keypoints_i], respectively. Note that + # num_instances_i and num_keypoints_i refers to the number of + # instances and keypoints for class i, respectively. + kpt_coords_for_class, kpt_scores_for_class = ( + self._postprocess_keypoints_for_class_and_image( + keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, instance_inds, + ex_ind, kp_params)) + # Expand keypoint dimension (with padding) so that coordinates and + # scores have shape [1, num_instances_i, num_total_keypoints, 2] and + # [1, num_instances_i, num_total_keypoints], respectively. + kpts_coords_for_class_padded, kpt_scores_for_class_padded = ( + _pad_to_full_keypoint_dim( + kpt_coords_for_class, kpt_scores_for_class, + kp_params.keypoint_indices, total_num_keypoints)) + return kpts_coords_for_class_padded, kpt_scores_for_class_padded + + def false_fn(): + """Logics to execute when the instance_inds is an empty set.""" + return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32), + tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32)) + + true_fn = functools.partial( + true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, instance_inds, ex_ind, + kp_params) + # Use dimension values instead of tf.size for tf.lite compatibility. + results = tf.cond(num_ind[0] > 0, true_fn, false_fn) + + kpt_coords_for_class_list.append(results[0]) + kpt_scores_for_class_list.append(results[1]) + instance_inds_for_class_list.append(instance_inds) + + # Concatenate all keypoints across all classes (single example). + kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1) + kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1) + instance_inds_for_example = tf.concat(instance_inds_for_class_list, + axis=0) + + # Use dimension values instead of tf.size for tf.lite compatibility. + num_inds = _get_shape(instance_inds_for_example, 1) + if num_inds[0] > 0: + # Scatter into tensor where instances align with original detection + # instances. New shape of keypoint coordinates and scores are + # [1, max_detections, num_total_keypoints, 2] and + # [1, max_detections, num_total_keypoints], respectively. + kpt_coords_for_example_all_det, kpt_scores_for_example_all_det = ( + _pad_to_full_instance_dim( + kpt_coords_for_example, kpt_scores_for_example, + instance_inds_for_example, + self._center_params.max_box_predictions)) + else: + kpt_coords_for_example_all_det = tf.zeros( + [1, max_detections, total_num_keypoints, 2], dtype=tf.float32) + kpt_scores_for_example_all_det = tf.zeros( + [1, max_detections, total_num_keypoints], dtype=tf.float32) + + kpt_coords_for_example_list.append(kpt_coords_for_example_all_det) + kpt_scores_for_example_list.append(kpt_scores_for_example_all_det) + + # Concatenate all keypoints and scores from all examples in the batch. + # Shapes are [batch_size, max_detections, num_total_keypoints, 2] and + # [batch_size, max_detections, num_total_keypoints], respectively. + keypoints = tf.concat(kpt_coords_for_example_list, axis=0) + keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) + + return keypoints, keypoint_scores + + def _postprocess_keypoints_simple(self, prediction_dict, classes, y_indices, + x_indices, boxes, num_detections): + """Performs postprocessing on keypoint predictions (one class only). + + This function handles the special case of keypoint task that the model + predicts only one class of the bounding box/keypoint (e.g. person). By the + assumption, the function uses only tf.lite supported ops and should run + faster. + + Args: + prediction_dict: a dictionary holding predicted tensors, returned from the + predict() method. This dictionary should contain keypoint prediction + feature maps for each keypoint task. + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + boxes: A [batch_size, max_detections, 4] float32 tensor with bounding + boxes in (un-normalized) output space. + num_detections: A [batch_size] int tensor with the number of valid + detections for each image. + + Returns: + A tuple of + keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32 + tensor with keypoints in the output (strided) coordinate frame. + keypoint_scores: a [batch_size, max_detections, num_total_keypoints] + float32 tensor with keypoint scores. + """ + # This function only works when there is only one keypoint task and the + # number of classes equal to one. For more general use cases, please use + # _postprocess_keypoints instead. + assert len(self._kp_params_dict) == 1 and self._num_classes == 1 + task_name, kp_params = next(iter(self._kp_params_dict.items())) + keypoint_heatmap = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1] + keypoint_offsets = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1] + keypoint_regression = prediction_dict[ + get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1] + + batch_size, _, _ = _get_shape(boxes, 3) + kpt_coords_for_example_list = [] + kpt_scores_for_example_list = [] + for ex_ind in range(batch_size): + # Postprocess keypoints and scores for class and single image. Shapes + # are [1, max_detections, num_keypoints, 2] and + # [1, max_detections, num_keypoints], respectively. + kpt_coords_for_class, kpt_scores_for_class = ( + self._postprocess_keypoints_for_class_and_image_simple( + keypoint_heatmap, keypoint_offsets, keypoint_regression, + classes, y_indices, x_indices, boxes, ex_ind, kp_params)) + + kpt_coords_for_example_list.append(kpt_coords_for_class) + kpt_scores_for_example_list.append(kpt_scores_for_class) + + # Concatenate all keypoints and scores from all examples in the batch. + # Shapes are [batch_size, max_detections, num_keypoints, 2] and + # [batch_size, max_detections, num_keypoints], respectively. + keypoints = tf.concat(kpt_coords_for_example_list, axis=0) + keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0) + + return keypoints, keypoint_scores + + def _get_instance_indices(self, classes, num_detections, batch_index, + class_id): + """Gets the instance indices that match the target class ID. + + Args: + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + num_detections: A [batch_size] int tensor with the number of valid + detections for each image. + batch_index: An integer specifying the index for an example in the batch. + class_id: Class id + + Returns: + instance_inds: A [num_instances] int32 tensor where each element indicates + the instance location within the `classes` tensor. This is useful to + associate the refined keypoints with the original detections (i.e. + boxes) + """ + classes = classes[batch_index:batch_index+1, ...] + _, max_detections = shape_utils.combined_static_and_dynamic_shape( + classes) + # Get the detection indices corresponding to the target class. + # Call tf.math.equal with matched tensor shape to make it tf.lite + # compatible. + valid_detections_with_kpt_class = tf.math.logical_and( + tf.range(max_detections) < num_detections[batch_index], + tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id))) + instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0] + # Cast the indices tensor to int32 for tf.lite compatibility. + return tf.cast(instance_inds, tf.int32) + + def _postprocess_keypoints_for_class_and_image( + self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, + y_indices, x_indices, boxes, indices_with_kpt_class, batch_index, + kp_params): + """Postprocess keypoints for a single image and class. + + This function performs the following postprocessing operations on a single + image and single keypoint class: + - Converts keypoints scores to range [0, 1] with sigmoid. + - Determines the detections that correspond to the specified keypoint class. + - Gathers the regressed keypoints at the detection (i.e. box) centers. + - Gathers keypoint candidates from the keypoint heatmaps. + - Snaps regressed keypoints to nearby keypoint candidates. + + Args: + keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32 + tensor with keypoint heatmaps. + keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with + local offsets to keypoint centers. + keypoint_regression: A [batch_size, height, width, 2 * num_keypoints] + float32 tensor with regressed offsets to all keypoints. + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + boxes: A [batch_size, max_detections, 4] float32 tensor with detected + boxes in the output (strided) frame. + indices_with_kpt_class: A [num_instances] int tensor where each element + indicates the instance location within the `classes` tensor. This is + useful to associate the refined keypoints with the original detections + (i.e. boxes) + batch_index: An integer specifying the index for an example in the batch. + kp_params: A `KeypointEstimationParams` object with parameters for a + single keypoint class. + + Returns: + A tuple of + refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor + with refined keypoints for a single class in a single image, expressed + in the output (strided) coordinate frame. Note that `num_instances` is a + dynamic dimension, and corresponds to the number of valid detections + for the specific class. + refined_scores: A [1, num_instances, num_keypoints] float32 tensor with + keypoint scores. + """ + keypoint_indices = kp_params.keypoint_indices + num_keypoints = len(keypoint_indices) + + keypoint_heatmap = tf.nn.sigmoid( + keypoint_heatmap[batch_index:batch_index+1, ...]) + keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...] + keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...] + y_indices = y_indices[batch_index:batch_index+1, ...] + x_indices = x_indices[batch_index:batch_index+1, ...] + boxes_slice = boxes[batch_index:batch_index+1, ...] + + # Gather the feature map locations corresponding to the object class. + y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class, + axis=1) + x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class, + axis=1) + boxes_for_kpt_class = tf.gather(boxes_slice, indices_with_kpt_class, axis=1) + + # Gather the regressed keypoints. Final tensor has shape + # [1, num_instances, num_keypoints, 2]. + regressed_keypoints_for_objects = regressed_keypoints_at_object_centers( + keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class) + regressed_keypoints_for_objects = tf.reshape( + regressed_keypoints_for_objects, [1, -1, num_keypoints, 2]) + + # Get the candidate keypoints and scores. + # The shape of keypoint_candidates and keypoint_scores is: + # [1, num_candidates_per_keypoint, num_keypoints, 2] and + # [1, num_candidates_per_keypoint, num_keypoints], respectively. + keypoint_candidates, keypoint_scores, num_keypoint_candidates = ( + prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, keypoint_offsets, + keypoint_score_threshold=( + kp_params.keypoint_candidate_score_threshold), + max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, + max_candidates=kp_params.num_candidates_per_keypoint)) + + # Get the refined keypoints and scores, of shape + # [1, num_instances, num_keypoints, 2] and + # [1, num_instances, num_keypoints], respectively. + refined_keypoints, refined_scores = refine_keypoints( + regressed_keypoints=regressed_keypoints_for_objects, + keypoint_candidates=keypoint_candidates, + keypoint_scores=keypoint_scores, + num_keypoint_candidates=num_keypoint_candidates, + bboxes=boxes_for_kpt_class, + unmatched_keypoint_score=kp_params.unmatched_keypoint_score, + box_scale=kp_params.box_scale, + candidate_search_scale=kp_params.candidate_search_scale, + candidate_ranking_mode=kp_params.candidate_ranking_mode) + + return refined_keypoints, refined_scores + + def _postprocess_keypoints_for_class_and_image_simple( + self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes, + y_indices, x_indices, boxes, batch_index, kp_params): + """Postprocess keypoints for a single image and class. + + This function is similar to "_postprocess_keypoints_for_class_and_image" + except that it assumes there is only one class of bounding box/keypoint to + be handled. The function is tf.lite compatible. + + Args: + keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32 + tensor with keypoint heatmaps. + keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with + local offsets to keypoint centers. + keypoint_regression: A [batch_size, height, width, 2 * num_keypoints] + float32 tensor with regressed offsets to all keypoints. + classes: A [batch_size, max_detections] int tensor with class indices for + all detected objects. + y_indices: A [batch_size, max_detections] int tensor with y indices for + all object centers. + x_indices: A [batch_size, max_detections] int tensor with x indices for + all object centers. + boxes: A [batch_size, max_detections, 4] float32 tensor with detected + boxes in the output (strided) frame. + batch_index: An integer specifying the index for an example in the batch. + kp_params: A `KeypointEstimationParams` object with parameters for a + single keypoint class. + + Returns: + A tuple of + refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor + with refined keypoints for a single class in a single image, expressed + in the output (strided) coordinate frame. Note that `num_instances` is a + dynamic dimension, and corresponds to the number of valid detections + for the specific class. + refined_scores: A [1, num_instances, num_keypoints] float32 tensor with + keypoint scores. + """ + num_keypoints = len(kp_params.keypoint_indices) + + keypoint_heatmap = tf.nn.sigmoid( + keypoint_heatmap[batch_index:batch_index+1, ...]) + keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...] + keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...] + y_indices = y_indices[batch_index:batch_index+1, ...] + x_indices = x_indices[batch_index:batch_index+1, ...] + boxes_slice = boxes[batch_index:batch_index+1, ...] + + # Gather the regressed keypoints. Final tensor has shape + # [1, num_instances, num_keypoints, 2]. + regressed_keypoints_for_objects = regressed_keypoints_at_object_centers( + keypoint_regression, y_indices, x_indices) + regressed_keypoints_for_objects = tf.reshape( + regressed_keypoints_for_objects, [1, -1, num_keypoints, 2]) + + # Get the candidate keypoints and scores. + # The shape of keypoint_candidates and keypoint_scores is: + # [1, num_candidates_per_keypoint, num_keypoints, 2] and + # [1, num_candidates_per_keypoint, num_keypoints], respectively. + keypoint_candidates, keypoint_scores, num_keypoint_candidates = ( + prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, keypoint_offsets, + keypoint_score_threshold=( + kp_params.keypoint_candidate_score_threshold), + max_pool_kernel_size=kp_params.peak_max_pool_kernel_size, + max_candidates=kp_params.num_candidates_per_keypoint)) + + # Get the refined keypoints and scores, of shape + # [1, num_instances, num_keypoints, 2] and + # [1, num_instances, num_keypoints], respectively. + refined_keypoints, refined_scores = refine_keypoints( + regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=boxes_slice, + unmatched_keypoint_score=kp_params.unmatched_keypoint_score, + box_scale=kp_params.box_scale, + candidate_search_scale=kp_params.candidate_search_scale, + candidate_ranking_mode=kp_params.candidate_ranking_mode) + + return refined_keypoints, refined_scores + + def regularization_losses(self): + return [] + + def restore_map(self, + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False): + raise RuntimeError('CenterNetMetaArch not supported under TF1.x.') + + def restore_from_objects(self, fine_tune_checkpoint_type='detection'): + """Returns a map of Trackable objects to load from a foreign checkpoint. + + Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module + or Checkpoint). This enables the model to initialize based on weights from + another task. For example, the feature extractor variables from a + classification model can be used to bootstrap training of an object + detector. When loading from an object detection model, the checkpoint model + should have the same parameters as this detection model with exception of + the num_classes parameter. + + Note that this function is intended to be used to restore Keras-based + models when running Tensorflow 2, whereas restore_map (not implemented + in CenterNet) is intended to be used to restore Slim-based models when + running Tensorflow 1.x. + + TODO(jonathanhuang): Make this function consistent with other + meta-architectures. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`, `fine_tune`. + Default 'detection'. + 'detection': used when loading models pre-trained on other detection + tasks. With this checkpoint type the weights of the feature extractor + are expected under the attribute 'feature_extractor'. + 'classification': used when loading models pre-trained on an image + classification task. Note that only the encoder section of the network + is loaded and not the upsampling layers. With this checkpoint type, + the weights of only the encoder section are expected under the + attribute 'feature_extractor'. + 'fine_tune': used when loading the entire CenterNet feature extractor + pre-trained on other tasks. The checkpoints saved during CenterNet + model training can be directly loaded using this type. With this + checkpoint type, the weights of the feature extractor are expected + under the attribute 'model._feature_extractor'. + For more details, see the tensorflow section on Loading mechanics. + https://www.tensorflow.org/guide/checkpoint#loading_mechanics + + Returns: + A dict mapping keys to Trackable objects (tf.Module or Checkpoint). + """ + + supported_types = self._feature_extractor.supported_sub_model_types + supported_types += ['fine_tune'] + + if fine_tune_checkpoint_type not in supported_types: + message = ('Checkpoint type "{}" not supported for {}. ' + 'Supported types are {}') + raise ValueError( + message.format(fine_tune_checkpoint_type, + self._feature_extractor.__class__.__name__, + supported_types)) + + elif fine_tune_checkpoint_type == 'fine_tune': + feature_extractor_model = tf.train.Checkpoint( + _feature_extractor=self._feature_extractor) + return {'model': feature_extractor_model} + + else: + return {'feature_extractor': self._feature_extractor.get_sub_model( + fine_tune_checkpoint_type)} + + def updates(self): + raise RuntimeError('This model is intended to be used with model_lib_v2 ' + 'which does not support updates()') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4c490228169f8c1a25f3f8684208b3567dabc7b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c0dacbc6797fac9ca0aa5de5e046072e019666f2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py @@ -0,0 +1,2274 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the CenterNet Meta architecture code.""" + +from __future__ import division + +import functools +import re +import unittest + +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.core import losses +from object_detection.core import preprocessor +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner as cn_assigner +from object_detection.meta_architectures import center_net_meta_arch as cnma +from object_detection.models import center_net_resnet_feature_extractor +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchPredictionHeadTest( + test_case.TestCase, parameterized.TestCase): + """Test CenterNet meta architecture prediction head.""" + + @parameterized.parameters([True, False]) + def test_prediction_head(self, use_depthwise): + head = cnma.make_prediction_net(num_out_channels=7, + use_depthwise=use_depthwise) + output = head(np.zeros((4, 128, 128, 8))) + + self.assertEqual((4, 128, 128, 7), output.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchHelpersTest(test_case.TestCase, parameterized.TestCase): + """Test for CenterNet meta architecture related functions.""" + + def test_row_col_indices_from_flattened_indices(self): + """Tests that the computation of row, col, channel indices is correct.""" + + r_grid, c_grid, ch_grid = (np.zeros((5, 4, 3), dtype=np.int), + np.zeros((5, 4, 3), dtype=np.int), + np.zeros((5, 4, 3), dtype=np.int)) + + r_grid[..., 0] = r_grid[..., 1] = r_grid[..., 2] = np.array( + [[0, 0, 0, 0], + [1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3], + [4, 4, 4, 4]] + ) + + c_grid[..., 0] = c_grid[..., 1] = c_grid[..., 2] = np.array( + [[0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3]] + ) + + for i in range(3): + ch_grid[..., i] = i + + indices = np.arange(60) + ri, ci, chi = cnma.row_col_channel_indices_from_flattened_indices( + indices, 4, 3) + + np.testing.assert_array_equal(ri, r_grid.flatten()) + np.testing.assert_array_equal(ci, c_grid.flatten()) + np.testing.assert_array_equal(chi, ch_grid.flatten()) + + def test_flattened_indices_from_row_col_indices(self): + + r = np.array( + [[0, 0, 0, 0], + [1, 1, 1, 1], + [2, 2, 2, 2]] + ) + + c = np.array( + [[0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3]] + ) + + idx = cnma.flattened_indices_from_row_col_indices(r, c, 4) + np.testing.assert_array_equal(np.arange(12), idx.flatten()) + + def test_get_valid_anchor_weights_in_flattened_image(self): + """Tests that the anchor weights are valid upon flattening out.""" + + valid_weights = np.zeros((2, 5, 5), dtype=np.float) + + valid_weights[0, :3, :4] = 1.0 + valid_weights[1, :2, :2] = 1.0 + + def graph_fn(): + true_image_shapes = tf.constant([[3, 4], [2, 2]]) + w = cnma.get_valid_anchor_weights_in_flattened_image( + true_image_shapes, 5, 5) + return w + + w = self.execute(graph_fn, []) + np.testing.assert_allclose(w, valid_weights.reshape(2, -1)) + self.assertEqual((2, 25), w.shape) + + def test_convert_strided_predictions_to_normalized_boxes(self): + """Tests that boxes have correct coordinates in normalized input space.""" + + def graph_fn(): + boxes = np.zeros((2, 3, 4), dtype=np.float32) + + boxes[0] = [[10, 20, 30, 40], [20, 30, 50, 100], [50, 60, 100, 180]] + boxes[1] = [[-5, -5, 5, 5], [45, 60, 110, 120], [150, 150, 200, 250]] + + true_image_shapes = tf.constant([[100, 90, 3], [150, 150, 3]]) + + clipped_boxes = ( + cnma.convert_strided_predictions_to_normalized_boxes( + boxes, 2, true_image_shapes)) + return clipped_boxes + + clipped_boxes = self.execute(graph_fn, []) + + expected_boxes = np.zeros((2, 3, 4), dtype=np.float32) + expected_boxes[0] = [[0.2, 4./9, 0.6, 8./9], [0.4, 2./3, 1, 1], + [1, 1, 1, 1]] + expected_boxes[1] = [[0., 0, 1./15, 1./15], [3./5, 4./5, 1, 1], + [1, 1, 1, 1]] + + np.testing.assert_allclose(expected_boxes, clipped_boxes) + + @parameterized.parameters( + {'clip_to_window': True}, + {'clip_to_window': False} + ) + def test_convert_strided_predictions_to_normalized_keypoints( + self, clip_to_window): + """Tests that keypoints have correct coordinates in normalized coords.""" + + keypoint_coords_np = np.array( + [ + # Example 0. + [ + [[-10., 8.], [60., 22.], [60., 120.]], + [[20., 20.], [0., 0.], [0., 0.]], + ], + # Example 1. + [ + [[40., 50.], [20., 160.], [200., 150.]], + [[10., 0.], [40., 10.], [0., 0.]], + ], + ], dtype=np.float32) + keypoint_scores_np = np.array( + [ + # Example 0. + [ + [1.0, 0.9, 0.2], + [0.7, 0.0, 0.0], + ], + # Example 1. + [ + [1.0, 1.0, 0.2], + [0.7, 0.6, 0.0], + ], + ], dtype=np.float32) + + def graph_fn(): + keypoint_coords = tf.constant(keypoint_coords_np, dtype=tf.float32) + keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) + true_image_shapes = tf.constant([[320, 400, 3], [640, 640, 3]]) + stride = 4 + + keypoint_coords_out, keypoint_scores_out = ( + cnma.convert_strided_predictions_to_normalized_keypoints( + keypoint_coords, keypoint_scores, stride, true_image_shapes, + clip_to_window)) + return keypoint_coords_out, keypoint_scores_out + + keypoint_coords_out, keypoint_scores_out = self.execute(graph_fn, []) + + if clip_to_window: + expected_keypoint_coords_np = np.array( + [ + # Example 0. + [ + [[0.0, 0.08], [0.75, 0.22], [0.75, 1.0]], + [[0.25, 0.2], [0., 0.], [0.0, 0.0]], + ], + # Example 1. + [ + [[0.25, 0.3125], [0.125, 1.0], [1.0, 0.9375]], + [[0.0625, 0.], [0.25, 0.0625], [0., 0.]], + ], + ], dtype=np.float32) + expected_keypoint_scores_np = np.array( + [ + # Example 0. + [ + [0.0, 0.9, 0.0], + [0.7, 0.0, 0.0], + ], + # Example 1. + [ + [1.0, 1.0, 0.0], + [0.7, 0.6, 0.0], + ], + ], dtype=np.float32) + else: + expected_keypoint_coords_np = np.array( + [ + # Example 0. + [ + [[-0.125, 0.08], [0.75, 0.22], [0.75, 1.2]], + [[0.25, 0.2], [0., 0.], [0., 0.]], + ], + # Example 1. + [ + [[0.25, 0.3125], [0.125, 1.0], [1.25, 0.9375]], + [[0.0625, 0.], [0.25, 0.0625], [0., 0.]], + ], + ], dtype=np.float32) + expected_keypoint_scores_np = np.array( + [ + # Example 0. + [ + [1.0, 0.9, 0.2], + [0.7, 0.0, 0.0], + ], + # Example 1. + [ + [1.0, 1.0, 0.2], + [0.7, 0.6, 0.0], + ], + ], dtype=np.float32) + np.testing.assert_allclose(expected_keypoint_coords_np, keypoint_coords_out) + np.testing.assert_allclose(expected_keypoint_scores_np, keypoint_scores_out) + + def test_convert_strided_predictions_to_instance_masks(self): + + def graph_fn(): + boxes = tf.constant( + [ + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.5, 1.0], + [0.0, 0.0, 0.0, 0.0]], + ], tf.float32) + classes = tf.constant( + [ + [0, 1, 0], + ], tf.int32) + masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32) + masks_np[0, :, 2:, 0] = 1 # Class 0. + masks_np[0, :, :3, 1] = 1 # Class 1. + masks = tf.constant(masks_np) + true_image_shapes = tf.constant([[6, 8, 3]]) + instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks( + boxes, classes, masks, stride=2, mask_height=2, mask_width=2, + true_image_shapes=true_image_shapes) + return instance_masks + + instance_masks = self.execute_cpu(graph_fn, []) + + expected_instance_masks = np.array( + [ + [ + # Mask 0 (class 0). + [[1, 1], + [1, 1]], + # Mask 1 (class 1). + [[1, 0], + [1, 0]], + # Mask 2 (class 0). + [[0, 0], + [0, 0]], + ] + ]) + np.testing.assert_array_equal(expected_instance_masks, instance_masks) + + def test_convert_strided_predictions_raises_error_with_one_tensor(self): + def graph_fn(): + boxes = tf.constant( + [ + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.5, 0.5, 1.0], + [0.0, 0.0, 0.0, 0.0]], + ], tf.float32) + classes = tf.constant( + [ + [0, 1, 0], + ], tf.int32) + masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32) + masks_np[0, :, 2:, 0] = 1 # Class 0. + masks_np[0, :, :3, 1] = 1 # Class 1. + masks = tf.constant(masks_np) + true_image_shapes = tf.constant([[6, 8, 3]]) + densepose_part_heatmap = tf.random.uniform( + [1, 4, 4, 24]) + instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks( + boxes, classes, masks, true_image_shapes, + densepose_part_heatmap=densepose_part_heatmap, + densepose_surface_coords=None) + return instance_masks + + with self.assertRaises(ValueError): + self.execute_cpu(graph_fn, []) + + def test_crop_and_threshold_masks(self): + boxes_np = np.array( + [[0., 0., 0.5, 0.5], + [0.25, 0.25, 1.0, 1.0]], dtype=np.float32) + classes_np = np.array([0, 2], dtype=np.int32) + masks_np = np.zeros((4, 4, _NUM_CLASSES), dtype=np.float32) + masks_np[0, 0, 0] = 0.8 + masks_np[1, 1, 0] = 0.6 + masks_np[3, 3, 2] = 0.7 + part_heatmap_np = np.zeros((4, 4, _DENSEPOSE_NUM_PARTS), dtype=np.float32) + part_heatmap_np[0, 0, 4] = 1 + part_heatmap_np[0, 0, 2] = 0.6 # Lower scoring. + part_heatmap_np[1, 1, 8] = 0.2 + part_heatmap_np[3, 3, 4] = 0.5 + surf_coords_np = np.zeros((4, 4, 2 * _DENSEPOSE_NUM_PARTS), + dtype=np.float32) + surf_coords_np[:, :, 8:10] = 0.2, 0.9 + surf_coords_np[:, :, 16:18] = 0.3, 0.5 + true_height, true_width = 10, 10 + input_height, input_width = 10, 10 + mask_height = 4 + mask_width = 4 + def graph_fn(): + elems = [ + tf.constant(boxes_np), + tf.constant(classes_np), + tf.constant(masks_np), + tf.constant(part_heatmap_np), + tf.constant(surf_coords_np), + tf.constant(true_height, dtype=tf.int32), + tf.constant(true_width, dtype=tf.int32) + ] + part_masks, surface_coords = cnma.crop_and_threshold_masks( + elems, input_height, input_width, mask_height=mask_height, + mask_width=mask_width, densepose_class_index=0) + return part_masks, surface_coords + + part_masks, surface_coords = self.execute_cpu(graph_fn, []) + + expected_part_masks = np.zeros((2, 4, 4), dtype=np.uint8) + expected_part_masks[0, 0, 0] = 5 # Recall classes are 1-indexed in output. + expected_part_masks[0, 2, 2] = 9 # Recall classes are 1-indexed in output. + expected_part_masks[1, 3, 3] = 1 # Standard instance segmentation mask. + expected_surface_coords = np.zeros((2, 4, 4, 2), dtype=np.float32) + expected_surface_coords[0, 0, 0, :] = 0.2, 0.9 + expected_surface_coords[0, 2, 2, :] = 0.3, 0.5 + np.testing.assert_allclose(expected_part_masks, part_masks) + np.testing.assert_allclose(expected_surface_coords, surface_coords) + + def test_gather_surface_coords_for_parts(self): + surface_coords_cropped_np = np.zeros((2, 5, 5, _DENSEPOSE_NUM_PARTS, 2), + dtype=np.float32) + surface_coords_cropped_np[0, 0, 0, 5] = 0.3, 0.4 + surface_coords_cropped_np[0, 1, 0, 9] = 0.5, 0.6 + highest_scoring_part_np = np.zeros((2, 5, 5), dtype=np.int32) + highest_scoring_part_np[0, 0, 0] = 5 + highest_scoring_part_np[0, 1, 0] = 9 + def graph_fn(): + surface_coords_cropped = tf.constant(surface_coords_cropped_np, + tf.float32) + highest_scoring_part = tf.constant(highest_scoring_part_np, tf.int32) + surface_coords_gathered = cnma.gather_surface_coords_for_parts( + surface_coords_cropped, highest_scoring_part) + return surface_coords_gathered + + surface_coords_gathered = self.execute_cpu(graph_fn, []) + + np.testing.assert_allclose([0.3, 0.4], surface_coords_gathered[0, 0, 0]) + np.testing.assert_allclose([0.5, 0.6], surface_coords_gathered[0, 1, 0]) + + def test_top_k_feature_map_locations(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 1] = 1.0 + feature_map_np[0, 2, 1, 1] = 0.9 # Get's filtered due to max pool. + feature_map_np[0, 0, 1, 0] = 0.7 + feature_map_np[0, 2, 2, 0] = 0.5 + feature_map_np[0, 2, 2, 1] = -0.3 + feature_map_np[1, 2, 1, 1] = 0.7 + feature_map_np[1, 1, 0, 0] = 0.4 + feature_map_np[1, 1, 2, 0] = 0.1 + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=3, k=3)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.7, 0.5], scores[0]) + np.testing.assert_array_equal([2, 0, 2], y_inds[0]) + np.testing.assert_array_equal([0, 1, 2], x_inds[0]) + np.testing.assert_array_equal([1, 0, 0], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1]) + np.testing.assert_array_equal([2, 1, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0, 2], x_inds[1]) + np.testing.assert_array_equal([1, 0, 0], channel_inds[1]) + + def test_top_k_feature_map_locations_no_pooling(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 1] = 1.0 + feature_map_np[0, 2, 1, 1] = 0.9 + feature_map_np[0, 0, 1, 0] = 0.7 + feature_map_np[0, 2, 2, 0] = 0.5 + feature_map_np[0, 2, 2, 1] = -0.3 + feature_map_np[1, 2, 1, 1] = 0.7 + feature_map_np[1, 1, 0, 0] = 0.4 + feature_map_np[1, 1, 2, 0] = 0.1 + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=1, k=3)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.9, 0.7], scores[0]) + np.testing.assert_array_equal([2, 2, 0], y_inds[0]) + np.testing.assert_array_equal([0, 1, 1], x_inds[0]) + np.testing.assert_array_equal([1, 1, 0], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1]) + np.testing.assert_array_equal([2, 1, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0, 2], x_inds[1]) + np.testing.assert_array_equal([1, 0, 0], channel_inds[1]) + + def test_top_k_feature_map_locations_per_channel(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 0] = 1.0 # Selected. + feature_map_np[0, 2, 1, 0] = 0.9 # Get's filtered due to max pool. + feature_map_np[0, 0, 1, 0] = 0.7 # Selected. + feature_map_np[0, 2, 2, 1] = 0.5 # Selected. + feature_map_np[0, 0, 0, 1] = 0.3 # Selected. + feature_map_np[1, 2, 1, 0] = 0.7 # Selected. + feature_map_np[1, 1, 0, 0] = 0.4 # Get's filtered due to max pool. + feature_map_np[1, 1, 2, 0] = 0.3 # Get's filtered due to max pool. + feature_map_np[1, 1, 0, 1] = 0.8 # Selected. + feature_map_np[1, 1, 2, 1] = 0.3 # Selected. + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=3, k=2, per_channel=True)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.7, 0.5, 0.3], scores[0]) + np.testing.assert_array_equal([2, 0, 2, 0], y_inds[0]) + np.testing.assert_array_equal([0, 1, 2, 0], x_inds[0]) + np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.0, 0.8, 0.3], scores[1]) + np.testing.assert_array_equal([2, 0, 1, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0, 0, 2], x_inds[1]) + np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[1]) + + def test_top_k_feature_map_locations_k1(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 0] = 1.0 # Selected. + feature_map_np[0, 2, 1, 0] = 0.9 + feature_map_np[0, 0, 1, 0] = 0.7 + feature_map_np[0, 2, 2, 1] = 0.5 + feature_map_np[0, 0, 0, 1] = 0.3 + feature_map_np[1, 2, 1, 0] = 0.7 + feature_map_np[1, 1, 0, 0] = 0.4 + feature_map_np[1, 1, 2, 0] = 0.3 + feature_map_np[1, 1, 0, 1] = 0.8 # Selected. + feature_map_np[1, 1, 2, 1] = 0.3 + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=3, k=1, per_channel=False)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0], scores[0]) + np.testing.assert_array_equal([2], y_inds[0]) + np.testing.assert_array_equal([0], x_inds[0]) + np.testing.assert_array_equal([0], channel_inds[0]) + + np.testing.assert_allclose([0.8], scores[1]) + np.testing.assert_array_equal([1], y_inds[1]) + np.testing.assert_array_equal([0], x_inds[1]) + np.testing.assert_array_equal([1], channel_inds[1]) + + def test_top_k_feature_map_locations_k1_per_channel(self): + feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + feature_map_np[0, 2, 0, 0] = 1.0 # Selected. + feature_map_np[0, 2, 1, 0] = 0.9 + feature_map_np[0, 0, 1, 0] = 0.7 + feature_map_np[0, 2, 2, 1] = 0.5 # Selected. + feature_map_np[0, 0, 0, 1] = 0.3 + feature_map_np[1, 2, 1, 0] = 0.7 # Selected. + feature_map_np[1, 1, 0, 0] = 0.4 + feature_map_np[1, 1, 2, 0] = 0.3 + feature_map_np[1, 1, 0, 1] = 0.8 # Selected. + feature_map_np[1, 1, 2, 1] = 0.3 + + def graph_fn(): + feature_map = tf.constant(feature_map_np) + scores, y_inds, x_inds, channel_inds = ( + cnma.top_k_feature_map_locations( + feature_map, max_pool_kernel_size=3, k=1, per_channel=True)) + return scores, y_inds, x_inds, channel_inds + + scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, []) + + np.testing.assert_allclose([1.0, 0.5], scores[0]) + np.testing.assert_array_equal([2, 2], y_inds[0]) + np.testing.assert_array_equal([0, 2], x_inds[0]) + np.testing.assert_array_equal([0, 1], channel_inds[0]) + + np.testing.assert_allclose([0.7, 0.8], scores[1]) + np.testing.assert_array_equal([2, 1], y_inds[1]) + np.testing.assert_array_equal([1, 0], x_inds[1]) + np.testing.assert_array_equal([0, 1], channel_inds[1]) + + def test_box_prediction(self): + + class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32) + hw_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) + offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) + + # Sample 1, 2 boxes + class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0] + hw_pred[0, 10, 20] = [40, 60] + offset_pred[0, 10, 20] = [1, 2] + + class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45] + hw_pred[0, 50, 60] = [50, 50] + offset_pred[0, 50, 60] = [0, 0] + + # Sample 2, 2 boxes (at same location) + class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0] + hw_pred[1, 100, 100] = [10, 10] + offset_pred[1, 100, 100] = [1, 3] + + # Sample 3, 3 boxes + class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8] + hw_pred[2, 60, 90] = [40, 30] + offset_pred[2, 60, 90] = [0, 0] + + class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0] + hw_pred[2, 65, 95] = [20, 20] + offset_pred[2, 65, 95] = [1, 2] + + class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0] + hw_pred[2, 75, 85] = [21, 25] + offset_pred[2, 75, 85] = [5, 2] + + def graph_fn(): + class_pred_tensor = tf.constant(class_pred) + hw_pred_tensor = tf.constant(hw_pred) + offset_pred_tensor = tf.constant(offset_pred) + + detection_scores, y_indices, x_indices, channel_indices = ( + cnma.top_k_feature_map_locations( + class_pred_tensor, max_pool_kernel_size=3, k=2)) + + boxes, classes, scores, num_dets = cnma.prediction_tensors_to_boxes( + detection_scores, y_indices, x_indices, channel_indices, + hw_pred_tensor, offset_pred_tensor) + return boxes, classes, scores, num_dets + + boxes, classes, scores, num_dets = self.execute(graph_fn, []) + + np.testing.assert_array_equal(num_dets, [2, 2, 2]) + + np.testing.assert_allclose( + [[-9, -8, 31, 52], [25, 35, 75, 85]], boxes[0]) + np.testing.assert_allclose( + [[96, 98, 106, 108], [96, 98, 106, 108]], boxes[1]) + np.testing.assert_allclose( + [[69.5, 74.5, 90.5, 99.5], [40, 75, 80, 105]], boxes[2]) + + np.testing.assert_array_equal(classes[0], [1, 0]) + np.testing.assert_array_equal(classes[1], [2, 1]) + np.testing.assert_array_equal(classes[2], [0, 4]) + + np.testing.assert_allclose(scores[0], [.7, .55]) + np.testing.assert_allclose(scores[1][:1], [.9]) + np.testing.assert_allclose(scores[2], [1., .8]) + + def test_offset_prediction(self): + + class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32) + offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32) + + # Sample 1, 2 boxes + class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0] + offset_pred[0, 10, 20] = [1, 2] + + class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45] + offset_pred[0, 50, 60] = [0, 0] + + # Sample 2, 2 boxes (at same location) + class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0] + offset_pred[1, 100, 100] = [1, 3] + + # Sample 3, 3 boxes + class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8] + offset_pred[2, 60, 90] = [0, 0] + + class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0] + offset_pred[2, 65, 95] = [1, 2] + + class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0] + offset_pred[2, 75, 85] = [5, 2] + + def graph_fn(): + class_pred_tensor = tf.constant(class_pred) + offset_pred_tensor = tf.constant(offset_pred) + + _, y_indices, x_indices, _ = ( + cnma.top_k_feature_map_locations( + class_pred_tensor, max_pool_kernel_size=3, k=2)) + + offsets = cnma.prediction_tensors_to_temporal_offsets( + y_indices, x_indices, offset_pred_tensor) + return offsets + + offsets = self.execute(graph_fn, []) + + np.testing.assert_allclose( + [[1, 2], [0, 0]], offsets[0]) + np.testing.assert_allclose( + [[1, 3], [1, 3]], offsets[1]) + np.testing.assert_allclose( + [[5, 2], [0, 0]], offsets[2]) + + def test_keypoint_candidate_prediction(self): + keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + keypoint_heatmap_np[0, 0, 0, 0] = 1.0 + keypoint_heatmap_np[0, 2, 1, 0] = 0.7 + keypoint_heatmap_np[0, 1, 1, 0] = 0.6 + keypoint_heatmap_np[0, 0, 2, 1] = 0.7 + keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. + keypoint_heatmap_np[0, 2, 2, 1] = 0.2 + keypoint_heatmap_np[1, 1, 0, 0] = 0.6 + keypoint_heatmap_np[1, 2, 1, 0] = 0.5 + keypoint_heatmap_np[1, 0, 0, 0] = 0.4 + keypoint_heatmap_np[1, 0, 0, 1] = 1.0 + keypoint_heatmap_np[1, 0, 1, 1] = 0.9 + keypoint_heatmap_np[1, 2, 0, 1] = 0.8 + + keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25] + keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5] + keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0] + keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0] + keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0] + keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5] + keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0] + keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5] + keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5] + keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5] + + def graph_fn(): + keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) + keypoint_heatmap_offsets = tf.constant( + keypoint_heatmap_offsets_np, dtype=tf.float32) + + keypoint_cands, keypoint_scores, num_keypoint_candidates = ( + cnma.prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, + keypoint_heatmap_offsets, + keypoint_score_threshold=0.5, + max_pool_kernel_size=1, + max_candidates=2)) + return keypoint_cands, keypoint_scores, num_keypoint_candidates + + (keypoint_cands, keypoint_scores, + num_keypoint_candidates) = self.execute(graph_fn, []) + + expected_keypoint_candidates = [ + [ # Example 0. + [[0.5, 0.25], [1.0, 2.0]], # Keypoint 1. + [[1.75, 1.5], [1.0, 1.0]], # Keypoint 2. + ], + [ # Example 1. + [[1.25, 0.5], [0.0, -0.5]], # Keypoint 1. + [[2.5, 1.0], [0.5, 0.5]], # Keypoint 2. + ], + ] + expected_keypoint_scores = [ + [ # Example 0. + [1.0, 0.7], # Keypoint 1. + [0.7, 0.3], # Keypoint 2. + ], + [ # Example 1. + [0.6, 1.0], # Keypoint 1. + [0.5, 0.9], # Keypoint 2. + ], + ] + expected_num_keypoint_candidates = [ + [2, 1], + [2, 2] + ] + np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) + np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) + np.testing.assert_array_equal(expected_num_keypoint_candidates, + num_keypoint_candidates) + + def test_keypoint_candidate_prediction_per_keypoints(self): + keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32) + keypoint_heatmap_np[0, 0, 0, 0] = 1.0 + keypoint_heatmap_np[0, 2, 1, 0] = 0.7 + keypoint_heatmap_np[0, 1, 1, 0] = 0.6 + keypoint_heatmap_np[0, 0, 2, 1] = 0.7 + keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score. + keypoint_heatmap_np[0, 2, 2, 1] = 0.2 + keypoint_heatmap_np[1, 1, 0, 0] = 0.6 + keypoint_heatmap_np[1, 2, 1, 0] = 0.5 + keypoint_heatmap_np[1, 0, 0, 0] = 0.4 + keypoint_heatmap_np[1, 0, 0, 1] = 1.0 + keypoint_heatmap_np[1, 0, 1, 1] = 0.9 + keypoint_heatmap_np[1, 2, 0, 1] = 0.8 + + # Note that the keypoint offsets are now per keypoint (as opposed to + # keypoint agnostic, in the test test_keypoint_candidate_prediction). + keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 4), dtype=np.float32) + keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25, 0.0, 0.0] + keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5, 0.0, 0.0] + keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0, 0.0, 0.0] + keypoint_heatmap_offsets_np[0, 0, 2] = [0.0, 0.0, 1.0, 0.0] + keypoint_heatmap_offsets_np[0, 2, 2] = [0.0, 0.0, 1.0, 1.0] + keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5, 0.0, 0.0] + keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0, 0.0, 0.0] + keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, 0.0, 0.0, -0.5] + keypoint_heatmap_offsets_np[1, 0, 1] = [0.0, 0.0, 0.5, -0.5] + keypoint_heatmap_offsets_np[1, 2, 0] = [0.0, 0.0, -1.0, -0.5] + + def graph_fn(): + keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32) + keypoint_heatmap_offsets = tf.constant( + keypoint_heatmap_offsets_np, dtype=tf.float32) + + keypoint_cands, keypoint_scores, num_keypoint_candidates = ( + cnma.prediction_tensors_to_keypoint_candidates( + keypoint_heatmap, + keypoint_heatmap_offsets, + keypoint_score_threshold=0.5, + max_pool_kernel_size=1, + max_candidates=2)) + return keypoint_cands, keypoint_scores, num_keypoint_candidates + + (keypoint_cands, keypoint_scores, + num_keypoint_candidates) = self.execute(graph_fn, []) + + expected_keypoint_candidates = [ + [ # Example 0. + [[0.5, 0.25], [1.0, 2.0]], # Candidate 1 of keypoint 1, 2. + [[1.75, 1.5], [1.0, 1.0]], # Candidate 2 of keypoint 1, 2. + ], + [ # Example 1. + [[1.25, 0.5], [0.0, -0.5]], # Candidate 1 of keypoint 1, 2. + [[2.5, 1.0], [0.5, 0.5]], # Candidate 2 of keypoint 1, 2. + ], + ] + expected_keypoint_scores = [ + [ # Example 0. + [1.0, 0.7], # Candidate 1 scores of keypoint 1, 2. + [0.7, 0.3], # Candidate 2 scores of keypoint 1, 2. + ], + [ # Example 1. + [0.6, 1.0], # Candidate 1 scores of keypoint 1, 2. + [0.5, 0.9], # Candidate 2 scores of keypoint 1, 2. + ], + ] + expected_num_keypoint_candidates = [ + [2, 1], + [2, 2] + ] + np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands) + np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores) + np.testing.assert_array_equal(expected_num_keypoint_candidates, + num_keypoint_candidates) + + def test_regressed_keypoints_at_object_centers(self): + batch_size = 2 + num_keypoints = 5 + num_instances = 6 + regressed_keypoint_feature_map_np = np.random.randn( + batch_size, 10, 10, 2 * num_keypoints).astype(np.float32) + y_indices = np.random.choice(10, (batch_size, num_instances)) + x_indices = np.random.choice(10, (batch_size, num_instances)) + offsets = np.stack([y_indices, x_indices], axis=2).astype(np.float32) + + def graph_fn(): + regressed_keypoint_feature_map = tf.constant( + regressed_keypoint_feature_map_np, dtype=tf.float32) + + gathered_regressed_keypoints = ( + cnma.regressed_keypoints_at_object_centers( + regressed_keypoint_feature_map, + tf.constant(y_indices, dtype=tf.int32), + tf.constant(x_indices, dtype=tf.int32))) + return gathered_regressed_keypoints + + gathered_regressed_keypoints = self.execute(graph_fn, []) + + expected_gathered_keypoints_0 = regressed_keypoint_feature_map_np[ + 0, y_indices[0], x_indices[0], :] + expected_gathered_keypoints_1 = regressed_keypoint_feature_map_np[ + 1, y_indices[1], x_indices[1], :] + expected_gathered_keypoints = np.stack([ + expected_gathered_keypoints_0, + expected_gathered_keypoints_1], axis=0) + expected_gathered_keypoints = np.reshape( + expected_gathered_keypoints, + [batch_size, num_instances, num_keypoints, 2]) + expected_gathered_keypoints += np.expand_dims(offsets, axis=2) + expected_gathered_keypoints = np.reshape( + expected_gathered_keypoints, + [batch_size, num_instances, -1]) + np.testing.assert_allclose(expected_gathered_keypoints, + gathered_regressed_keypoints) + + @parameterized.parameters( + {'candidate_ranking_mode': 'min_distance'}, + {'candidate_ranking_mode': 'score_distance_ratio'}, + ) + def test_refine_keypoints(self, candidate_ranking_mode): + regressed_keypoints_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + keypoint_candidates_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. + [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. + [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2. + ], + # Example 1. + [ + [[6.0, 1.5], [0.1, 0.4], [0.0, 0.0]], # Candidate 0. + [[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1. + [[0.0, 0.0], [0.1, 0.3], [0.0, 0.0]], # Candidate 2. + ] + ], dtype=np.float32) + keypoint_scores_np = np.array( + [ + # Example 0. + [ + [0.8, 0.9, 1.0], # Candidate 0. + [0.6, 0.1, 0.9], # Candidate 1. + [0.0, 0.0, 0.0], # Candidate 1. + ], + # Example 1. + [ + [0.7, 0.3, 0.0], # Candidate 0. + [0.6, 0.1, 0.0], # Candidate 1. + [0.0, 0.28, 0.0], # Candidate 1. + ] + ], dtype=np.float32) + num_keypoints_candidates_np = np.array( + [ + # Example 0. + [2, 2, 2], + # Example 1. + [2, 3, 0], + ], dtype=np.int32) + unmatched_keypoint_score = 0.1 + + def graph_fn(): + regressed_keypoints = tf.constant( + regressed_keypoints_np, dtype=tf.float32) + keypoint_candidates = tf.constant( + keypoint_candidates_np, dtype=tf.float32) + keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) + num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, + dtype=tf.int32) + refined_keypoints, refined_scores = cnma.refine_keypoints( + regressed_keypoints, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=None, + unmatched_keypoint_score=unmatched_keypoint_score, + box_scale=1.2, candidate_search_scale=0.3, + candidate_ranking_mode=candidate_ranking_mode) + return refined_keypoints, refined_scores + + refined_keypoints, refined_scores = self.execute(graph_fn, []) + + if candidate_ranking_mode == 'min_distance': + expected_refined_keypoints = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + expected_refined_scores = np.array( + [ + # Example 0. + [ + [0.8, 0.9, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + unmatched_keypoint_score, 1.0], + ], + # Example 1. + [ + [0.7, 0.1, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + 0.1, unmatched_keypoint_score], + ], + ], dtype=np.float32) + else: + expected_refined_keypoints = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 1.5], [0.1, 0.3], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + expected_refined_scores = np.array( + [ + # Example 0. + [ + [0.8, 0.9, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + unmatched_keypoint_score, 1.0], + ], + # Example 1. + [ + [0.7, 0.28, unmatched_keypoint_score], # Instance 0. + [unmatched_keypoint_score, # Instance 1. + 0.1, unmatched_keypoint_score], + ], + ], dtype=np.float32) + + np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) + np.testing.assert_allclose(expected_refined_scores, refined_scores) + + def test_refine_keypoints_with_bboxes(self): + regressed_keypoints_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0. + [[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + keypoint_candidates_np = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0. + [[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1. + ], + # Example 1. + [ + [[6.0, 1.5], [5.0, 5.0], [0.0, 0.0]], # Candidate 0. + [[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1. + ] + ], dtype=np.float32) + keypoint_scores_np = np.array( + [ + # Example 0. + [ + [0.8, 0.9, 1.0], # Candidate 0. + [0.6, 0.1, 0.9], # Candidate 1. + ], + # Example 1. + [ + [0.7, 0.4, 0.0], # Candidate 0. + [0.6, 0.1, 0.0], # Candidate 1. + ] + ], dtype=np.float32) + num_keypoints_candidates_np = np.array( + [ + # Example 0. + [2, 2, 2], + # Example 1. + [2, 2, 0], + ], dtype=np.int32) + bboxes_np = np.array( + [ + # Example 0. + [ + [2.0, 2.0, 14.0, 10.0], # Instance 0. + [0.0, 3.0, 5.0, 7.0], # Instance 1. + ], + # Example 1. + [ + [0.0, 0.0, 6.0, 2.0], # Instance 0. + [5.0, 1.4, 9.0, 5.0], # Instance 1. + ], + ], dtype=np.float32) + unmatched_keypoint_score = 0.1 + + def graph_fn(): + regressed_keypoints = tf.constant( + regressed_keypoints_np, dtype=tf.float32) + keypoint_candidates = tf.constant( + keypoint_candidates_np, dtype=tf.float32) + keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32) + num_keypoint_candidates = tf.constant(num_keypoints_candidates_np, + dtype=tf.int32) + bboxes = tf.constant(bboxes_np, dtype=tf.float32) + refined_keypoints, refined_scores = cnma.refine_keypoints( + regressed_keypoints, keypoint_candidates, keypoint_scores, + num_keypoint_candidates, bboxes=bboxes, + unmatched_keypoint_score=unmatched_keypoint_score, + box_scale=1.0, candidate_search_scale=0.3) + return refined_keypoints, refined_scores + + refined_keypoints, refined_scores = self.execute(graph_fn, []) + + expected_refined_keypoints = np.array( + [ + # Example 0. + [ + [[2.0, 2.5], [6.0, 10.0], [14.0, 7.0]], # Instance 0. + [[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1. + ], + # Example 1. + [ + [[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0. + [[6.0, 1.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1. + ], + ], dtype=np.float32) + expected_refined_scores = np.array( + [ + # Example 0. + [ + [0.8, unmatched_keypoint_score, # Instance 0. + unmatched_keypoint_score], + [unmatched_keypoint_score, # Instance 1. + unmatched_keypoint_score, 1.0], + ], + # Example 1. + [ + [0.7, 0.1, unmatched_keypoint_score], # Instance 0. + [0.7, 0.4, unmatched_keypoint_score], # Instance 1. + ], + ], dtype=np.float32) + + np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints) + np.testing.assert_allclose(expected_refined_scores, refined_scores) + + def test_pad_to_full_keypoint_dim(self): + batch_size = 4 + num_instances = 8 + num_keypoints = 2 + keypoint_inds = [1, 3] + num_total_keypoints = 5 + + kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2) + kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints) + + def graph_fn(): + kpt_coords = tf.constant(kpt_coords_np) + kpt_scores = tf.constant(kpt_scores_np) + kpt_coords_padded, kpt_scores_padded = ( + cnma._pad_to_full_keypoint_dim( + kpt_coords, kpt_scores, keypoint_inds, num_total_keypoints)) + return kpt_coords_padded, kpt_scores_padded + + kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, []) + + self.assertAllEqual([batch_size, num_instances, num_total_keypoints, 2], + kpt_coords_padded.shape) + self.assertAllEqual([batch_size, num_instances, num_total_keypoints], + kpt_scores_padded.shape) + + for i, kpt_ind in enumerate(keypoint_inds): + np.testing.assert_allclose(kpt_coords_np[:, :, i, :], + kpt_coords_padded[:, :, kpt_ind, :]) + np.testing.assert_allclose(kpt_scores_np[:, :, i], + kpt_scores_padded[:, :, kpt_ind]) + + def test_pad_to_full_instance_dim(self): + batch_size = 4 + max_instances = 8 + num_keypoints = 6 + num_instances = 2 + instance_inds = [1, 3] + + kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2) + kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints) + + def graph_fn(): + kpt_coords = tf.constant(kpt_coords_np) + kpt_scores = tf.constant(kpt_scores_np) + kpt_coords_padded, kpt_scores_padded = ( + cnma._pad_to_full_instance_dim( + kpt_coords, kpt_scores, instance_inds, max_instances)) + return kpt_coords_padded, kpt_scores_padded + + kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, []) + + self.assertAllEqual([batch_size, max_instances, num_keypoints, 2], + kpt_coords_padded.shape) + self.assertAllEqual([batch_size, max_instances, num_keypoints], + kpt_scores_padded.shape) + + for i, inst_ind in enumerate(instance_inds): + np.testing.assert_allclose(kpt_coords_np[:, i, :, :], + kpt_coords_padded[:, inst_ind, :, :]) + np.testing.assert_allclose(kpt_scores_np[:, i, :], + kpt_scores_padded[:, inst_ind, :]) + + def test_predicted_embeddings_at_object_centers(self): + batch_size = 2 + embedding_size = 5 + num_instances = 6 + predicted_embedding_feature_map_np = np.random.randn( + batch_size, 10, 10, embedding_size).astype(np.float32) + y_indices = np.random.choice(10, (batch_size, num_instances)) + x_indices = np.random.choice(10, (batch_size, num_instances)) + + def graph_fn(): + predicted_embedding_feature_map = tf.constant( + predicted_embedding_feature_map_np, dtype=tf.float32) + + gathered_predicted_embeddings = ( + cnma.predicted_embeddings_at_object_centers( + predicted_embedding_feature_map, + tf.constant(y_indices, dtype=tf.int32), + tf.constant(x_indices, dtype=tf.int32))) + return gathered_predicted_embeddings + + gathered_predicted_embeddings = self.execute(graph_fn, []) + + expected_gathered_embeddings_0 = predicted_embedding_feature_map_np[ + 0, y_indices[0], x_indices[0], :] + expected_gathered_embeddings_1 = predicted_embedding_feature_map_np[ + 1, y_indices[1], x_indices[1], :] + expected_gathered_embeddings = np.stack([ + expected_gathered_embeddings_0, + expected_gathered_embeddings_1], axis=0) + expected_gathered_embeddings = np.reshape( + expected_gathered_embeddings, + [batch_size, num_instances, embedding_size]) + np.testing.assert_allclose(expected_gathered_embeddings, + gathered_predicted_embeddings) + + +# Common parameters for setting up testing examples across tests. +_NUM_CLASSES = 10 +_KEYPOINT_INDICES = [0, 1, 2, 3] +_NUM_KEYPOINTS = len(_KEYPOINT_INDICES) +_DENSEPOSE_NUM_PARTS = 24 +_TASK_NAME = 'human_pose' +_NUM_TRACK_IDS = 3 +_REID_EMBED_SIZE = 2 +_NUM_FC_LAYERS = 1 + + +def get_fake_center_params(): + """Returns the fake object center parameter namedtuple.""" + return cnma.ObjectCenterParams( + classification_loss=losses.WeightedSigmoidClassificationLoss(), + object_center_loss_weight=1.0, + min_box_overlap_iou=1.0, + max_box_predictions=5, + use_labeled_classes=False) + + +def get_fake_od_params(): + """Returns the fake object detection parameter namedtuple.""" + return cnma.ObjectDetectionParams( + localization_loss=losses.L1LocalizationLoss(), + offset_loss_weight=1.0, + scale_loss_weight=0.1) + + +def get_fake_kp_params(): + """Returns the fake keypoint estimation parameter namedtuple.""" + return cnma.KeypointEstimationParams( + task_name=_TASK_NAME, + class_id=1, + keypoint_indices=_KEYPOINT_INDICES, + keypoint_std_dev=[0.00001] * len(_KEYPOINT_INDICES), + classification_loss=losses.WeightedSigmoidClassificationLoss(), + localization_loss=losses.L1LocalizationLoss(), + keypoint_candidate_score_threshold=0.1) + + +def get_fake_mask_params(): + """Returns the fake mask estimation parameter namedtuple.""" + return cnma.MaskParams( + classification_loss=losses.WeightedSoftmaxClassificationLoss(), + task_loss_weight=1.0, + mask_height=4, + mask_width=4) + + +def get_fake_densepose_params(): + """Returns the fake DensePose estimation parameter namedtuple.""" + return cnma.DensePoseParams( + class_id=1, + classification_loss=losses.WeightedSoftmaxClassificationLoss(), + localization_loss=losses.L1LocalizationLoss(), + part_loss_weight=1.0, + coordinate_loss_weight=1.0, + num_parts=_DENSEPOSE_NUM_PARTS, + task_loss_weight=1.0, + upsample_to_input_res=True, + upsample_method='nearest') + + +def get_fake_track_params(): + """Returns the fake object tracking parameter namedtuple.""" + return cnma.TrackParams( + num_track_ids=_NUM_TRACK_IDS, + reid_embed_size=_REID_EMBED_SIZE, + num_fc_layers=_NUM_FC_LAYERS, + classification_loss=losses.WeightedSoftmaxClassificationLoss(), + task_loss_weight=1.0) + + +def get_fake_temporal_offset_params(): + """Returns the fake temporal offset parameter namedtuple.""" + return cnma.TemporalOffsetParams( + localization_loss=losses.WeightedSmoothL1LocalizationLoss(), + task_loss_weight=1.0) + + +def build_center_net_meta_arch(build_resnet=False, num_classes=_NUM_CLASSES): + """Builds the CenterNet meta architecture.""" + if build_resnet: + feature_extractor = ( + center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor( + 'resnet_v2_101')) + else: + feature_extractor = DummyFeatureExtractor( + channel_means=(1.0, 2.0, 3.0), + channel_stds=(10., 20., 30.), + bgr_ordering=False, + num_feature_outputs=2, + stride=4) + image_resizer_fn = functools.partial( + preprocessor.resize_to_range, + min_dimension=128, + max_dimension=128, + pad_to_max_dimesnion=True) + + if num_classes == 1: + return cnma.CenterNetMetaArch( + is_training=True, + add_summaries=False, + num_classes=num_classes, + feature_extractor=feature_extractor, + image_resizer_fn=image_resizer_fn, + object_center_params=get_fake_center_params(), + object_detection_params=get_fake_od_params(), + keypoint_params_dict={_TASK_NAME: get_fake_kp_params()}) + else: + return cnma.CenterNetMetaArch( + is_training=True, + add_summaries=False, + num_classes=num_classes, + feature_extractor=feature_extractor, + image_resizer_fn=image_resizer_fn, + object_center_params=get_fake_center_params(), + object_detection_params=get_fake_od_params(), + keypoint_params_dict={_TASK_NAME: get_fake_kp_params()}, + mask_params=get_fake_mask_params(), + densepose_params=get_fake_densepose_params(), + track_params=get_fake_track_params(), + temporal_offset_params=get_fake_temporal_offset_params()) + + +def _logit(p): + return np.log( + (p + np.finfo(np.float32).eps) / (1 - p + np.finfo(np.float32).eps)) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchLibTest(test_case.TestCase): + """Test for CenterNet meta architecture related functions.""" + + def test_get_keypoint_name(self): + self.assertEqual('human_pose/keypoint_offset', + cnma.get_keypoint_name('human_pose', 'keypoint_offset')) + + def test_get_num_instances_from_weights(self): + weight1 = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32) + weight2 = tf.constant([0.5, 0.9, 0.0], dtype=tf.float32) + weight3 = tf.constant([0.0, 0.0, 1.0], dtype=tf.float32) + + def graph_fn_1(): + # Total of three elements with non-zero values. + num_instances = cnma.get_num_instances_from_weights( + [weight1, weight2, weight3]) + return num_instances + num_instances = self.execute(graph_fn_1, []) + self.assertAlmostEqual(3, num_instances) + + # No non-zero value in the weights. Return minimum value: 1. + def graph_fn_2(): + # Total of three elements with non-zero values. + num_instances = cnma.get_num_instances_from_weights([weight1, weight1]) + return num_instances + num_instances = self.execute(graph_fn_2, []) + self.assertAlmostEqual(1, num_instances) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchTest(test_case.TestCase, parameterized.TestCase): + """Tests for the CenterNet meta architecture.""" + + def test_construct_prediction_heads(self): + model = build_center_net_meta_arch() + fake_feature_map = np.zeros((4, 128, 128, 8)) + + # Check the dictionary contains expected keys and corresponding heads with + # correct dimensions. + # "object center" head: + output = model._prediction_head_dict[cnma.OBJECT_CENTER][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape) + + # "object scale" (height/width) head: + output = model._prediction_head_dict[cnma.BOX_SCALE][-1](fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + # "object offset" head: + output = model._prediction_head_dict[cnma.BOX_OFFSET][-1](fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + # "keypoint offset" head: + output = model._prediction_head_dict[ + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET)][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + # "keypoint heatmap" head: + output = model._prediction_head_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_HEATMAP)][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _NUM_KEYPOINTS), output.shape) + + # "keypoint regression" head: + output = model._prediction_head_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_REGRESSION)][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, 2 * _NUM_KEYPOINTS), output.shape) + + # "mask" head: + output = model._prediction_head_dict[cnma.SEGMENTATION_HEATMAP][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape) + + # "densepose parts" head: + output = model._prediction_head_dict[cnma.DENSEPOSE_HEATMAP][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _DENSEPOSE_NUM_PARTS), output.shape) + + # "densepose surface coordinates" head: + output = model._prediction_head_dict[cnma.DENSEPOSE_REGRESSION][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, 2 * _DENSEPOSE_NUM_PARTS), output.shape) + + # "track embedding" head: + output = model._prediction_head_dict[cnma.TRACK_REID][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, _REID_EMBED_SIZE), output.shape) + + # "temporal offset" head: + output = model._prediction_head_dict[cnma.TEMPORAL_OFFSET][-1]( + fake_feature_map) + self.assertEqual((4, 128, 128, 2), output.shape) + + def test_initialize_target_assigners(self): + model = build_center_net_meta_arch() + assigner_dict = model._initialize_target_assigners( + stride=2, + min_box_overlap_iou=0.7) + + # Check whether the correponding target assigner class is initialized. + # object center target assigner: + self.assertIsInstance(assigner_dict[cnma.OBJECT_CENTER], + cn_assigner.CenterNetCenterHeatmapTargetAssigner) + + # object detection target assigner: + self.assertIsInstance(assigner_dict[cnma.DETECTION_TASK], + cn_assigner.CenterNetBoxTargetAssigner) + + # keypoint estimation target assigner: + self.assertIsInstance(assigner_dict[_TASK_NAME], + cn_assigner.CenterNetKeypointTargetAssigner) + + # mask estimation target assigner: + self.assertIsInstance(assigner_dict[cnma.SEGMENTATION_TASK], + cn_assigner.CenterNetMaskTargetAssigner) + + # DensePose estimation target assigner: + self.assertIsInstance(assigner_dict[cnma.DENSEPOSE_TASK], + cn_assigner.CenterNetDensePoseTargetAssigner) + + # Track estimation target assigner: + self.assertIsInstance(assigner_dict[cnma.TRACK_TASK], + cn_assigner.CenterNetTrackTargetAssigner) + + # Temporal Offset target assigner: + self.assertIsInstance(assigner_dict[cnma.TEMPORALOFFSET_TASK], + cn_assigner.CenterNetTemporalOffsetTargetAssigner) + + def test_predict(self): + """Test the predict function.""" + + model = build_center_net_meta_arch() + def graph_fn(): + prediction_dict = model.predict(tf.zeros([2, 128, 128, 3]), None) + return prediction_dict + + prediction_dict = self.execute(graph_fn, []) + + self.assertEqual(prediction_dict['preprocessed_inputs'].shape, + (2, 128, 128, 3)) + self.assertEqual(prediction_dict[cnma.OBJECT_CENTER][0].shape, + (2, 32, 32, _NUM_CLASSES)) + self.assertEqual(prediction_dict[cnma.BOX_SCALE][0].shape, + (2, 32, 32, 2)) + self.assertEqual(prediction_dict[cnma.BOX_OFFSET][0].shape, + (2, 32, 32, 2)) + self.assertEqual(prediction_dict[cnma.SEGMENTATION_HEATMAP][0].shape, + (2, 32, 32, _NUM_CLASSES)) + self.assertEqual(prediction_dict[cnma.DENSEPOSE_HEATMAP][0].shape, + (2, 32, 32, _DENSEPOSE_NUM_PARTS)) + self.assertEqual(prediction_dict[cnma.DENSEPOSE_REGRESSION][0].shape, + (2, 32, 32, 2 * _DENSEPOSE_NUM_PARTS)) + self.assertEqual(prediction_dict[cnma.TRACK_REID][0].shape, + (2, 32, 32, _REID_EMBED_SIZE)) + self.assertEqual(prediction_dict[cnma.TEMPORAL_OFFSET][0].shape, + (2, 32, 32, 2)) + + def test_loss(self): + """Test the loss function.""" + groundtruth_dict = get_fake_groundtruth_dict(16, 32, 4) + model = build_center_net_meta_arch() + model.provide_groundtruth( + groundtruth_boxes_list=groundtruth_dict[fields.BoxListFields.boxes], + groundtruth_weights_list=groundtruth_dict[fields.BoxListFields.weights], + groundtruth_classes_list=groundtruth_dict[fields.BoxListFields.classes], + groundtruth_keypoints_list=groundtruth_dict[ + fields.BoxListFields.keypoints], + groundtruth_masks_list=groundtruth_dict[ + fields.BoxListFields.masks], + groundtruth_dp_num_points_list=groundtruth_dict[ + fields.BoxListFields.densepose_num_points], + groundtruth_dp_part_ids_list=groundtruth_dict[ + fields.BoxListFields.densepose_part_ids], + groundtruth_dp_surface_coords_list=groundtruth_dict[ + fields.BoxListFields.densepose_surface_coords], + groundtruth_track_ids_list=groundtruth_dict[ + fields.BoxListFields.track_ids], + groundtruth_track_match_flags_list=groundtruth_dict[ + fields.BoxListFields.track_match_flags], + groundtruth_temporal_offsets_list=groundtruth_dict[ + fields.BoxListFields.temporal_offsets]) + + kernel_initializer = tf.constant_initializer( + [[1, 1, 0], [-1000000, -1000000, 1000000]]) + model.track_reid_classification_net = tf.keras.layers.Dense( + _NUM_TRACK_IDS, + kernel_initializer=kernel_initializer, + input_shape=(_REID_EMBED_SIZE,)) + + prediction_dict = get_fake_prediction_dict( + input_height=16, input_width=32, stride=4) + + def graph_fn(): + loss_dict = model.loss(prediction_dict, + tf.constant([[16, 24, 3], [16, 24, 3]])) + return loss_dict + + loss_dict = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)]) + self.assertGreater( + 0.01, + loss_dict['%s/%s' % + (cnma.LOSS_KEY_PREFIX, + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP))]) + self.assertGreater( + 0.01, + loss_dict['%s/%s' % + (cnma.LOSS_KEY_PREFIX, + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET))]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_REGRESSION))]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.SEGMENTATION_HEATMAP)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.DENSEPOSE_HEATMAP)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.DENSEPOSE_REGRESSION)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.TRACK_REID)]) + self.assertGreater( + 0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, + cnma.TEMPORAL_OFFSET)]) + + @parameterized.parameters( + {'target_class_id': 1}, + {'target_class_id': 2}, + ) + def test_postprocess(self, target_class_id): + """Test the postprocess function.""" + model = build_center_net_meta_arch() + max_detection = model._center_params.max_box_predictions + num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) + + class_center = np.zeros((1, 32, 32, 10), dtype=np.float32) + height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) + offset = np.zeros((1, 32, 32, 2), dtype=np.float32) + keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) + keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) + keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) + + class_probs = np.ones(10) * _logit(0.25) + class_probs[target_class_id] = _logit(0.75) + class_center[0, 16, 16] = class_probs + height_width[0, 16, 16] = [5, 10] + offset[0, 16, 16] = [.25, .5] + keypoint_regression[0, 16, 16] = [ + -1., -1., + -1., 1., + 1., -1., + 1., 1.] + keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) + keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) + keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) + keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. + + segmentation_heatmap = np.zeros((1, 32, 32, 10), dtype=np.float32) + segmentation_heatmap[:, 14:18, 14:18, target_class_id] = 1.0 + segmentation_heatmap = _logit(segmentation_heatmap) + + dp_part_ind = 4 + dp_part_heatmap = np.zeros((1, 32, 32, _DENSEPOSE_NUM_PARTS), + dtype=np.float32) + dp_part_heatmap[0, 14:18, 14:18, dp_part_ind] = 1.0 + dp_part_heatmap = _logit(dp_part_heatmap) + + dp_surf_coords = np.random.randn(1, 32, 32, 2 * _DENSEPOSE_NUM_PARTS) + + embedding_size = 100 + track_reid_embedding = np.zeros((1, 32, 32, embedding_size), + dtype=np.float32) + track_reid_embedding[0, 16, 16, :] = np.ones(embedding_size) + + temporal_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) + temporal_offsets[..., 1] = 1 + + class_center = tf.constant(class_center) + height_width = tf.constant(height_width) + offset = tf.constant(offset) + keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) + keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) + keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) + segmentation_heatmap = tf.constant(segmentation_heatmap, dtype=tf.float32) + dp_part_heatmap = tf.constant(dp_part_heatmap, dtype=tf.float32) + dp_surf_coords = tf.constant(dp_surf_coords, dtype=tf.float32) + track_reid_embedding = tf.constant(track_reid_embedding, dtype=tf.float32) + temporal_offsets = tf.constant(temporal_offsets, dtype=tf.float32) + + prediction_dict = { + cnma.OBJECT_CENTER: [class_center], + cnma.BOX_SCALE: [height_width], + cnma.BOX_OFFSET: [offset], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): + [keypoint_heatmaps], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): + [keypoint_offsets], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): + [keypoint_regression], + cnma.SEGMENTATION_HEATMAP: [segmentation_heatmap], + cnma.DENSEPOSE_HEATMAP: [dp_part_heatmap], + cnma.DENSEPOSE_REGRESSION: [dp_surf_coords], + cnma.TRACK_REID: [track_reid_embedding], + cnma.TEMPORAL_OFFSET: [temporal_offsets], + } + + def graph_fn(): + detections = model.postprocess(prediction_dict, + tf.constant([[128, 128, 3]])) + return detections + + detections = self.execute_cpu(graph_fn, []) + + self.assertAllClose(detections['detection_boxes'][0, 0], + np.array([55, 46, 75, 86]) / 128.0) + self.assertAllClose(detections['detection_scores'][0], + [.75, .5, .5, .5, .5]) + expected_multiclass_scores = [.25] * 10 + expected_multiclass_scores[target_class_id] = .75 + self.assertAllClose(expected_multiclass_scores, + detections['detection_multiclass_scores'][0][0]) + + # The output embedding extracted at the object center will be a 3-D array of + # shape [batch, num_boxes, embedding_size]. The valid predicted embedding + # will be the first embedding in the first batch. It is a 1-D array of + # shape [embedding_size] with values all ones. All the values of the + # embedding will then be divided by the square root of 'embedding_size' + # after the L2 normalization. + self.assertAllClose(detections['detection_embeddings'][0, 0], + np.ones(embedding_size) / embedding_size**0.5) + self.assertEqual(detections['detection_classes'][0, 0], target_class_id) + self.assertEqual(detections['num_detections'], [5]) + self.assertAllEqual([1, max_detection, num_keypoints, 2], + detections['detection_keypoints'].shape) + self.assertAllEqual([1, max_detection, num_keypoints], + detections['detection_keypoint_scores'].shape) + self.assertAllEqual([1, max_detection, 4, 4], + detections['detection_masks'].shape) + self.assertAllEqual([1, max_detection, embedding_size], + detections['detection_embeddings'].shape) + self.assertAllEqual([1, max_detection, 2], + detections['detection_temporal_offsets'].shape) + + # Masks should be empty for everything but the first detection. + self.assertAllEqual( + detections['detection_masks'][0, 1:, :, :], + np.zeros_like(detections['detection_masks'][0, 1:, :, :])) + self.assertAllEqual( + detections['detection_surface_coords'][0, 1:, :, :], + np.zeros_like(detections['detection_surface_coords'][0, 1:, :, :])) + + if target_class_id == 1: + expected_kpts_for_obj_0 = np.array( + [[14., 14.], [14., 18.], [18., 14.], [17., 17.]]) / 32. + expected_kpt_scores_for_obj_0 = np.array( + [0.9, 0.9, 0.9, cnma.UNMATCHED_KEYPOINT_SCORE]) + np.testing.assert_allclose(detections['detection_keypoints'][0][0], + expected_kpts_for_obj_0, rtol=1e-6) + np.testing.assert_allclose(detections['detection_keypoint_scores'][0][0], + expected_kpt_scores_for_obj_0, rtol=1e-6) + # First detection has DensePose parts. + self.assertSameElements( + np.unique(detections['detection_masks'][0, 0, :, :]), + set([0, dp_part_ind + 1])) + self.assertGreater(np.sum(np.abs(detections['detection_surface_coords'])), + 0.0) + else: + # All keypoint outputs should be zeros. + np.testing.assert_allclose( + detections['detection_keypoints'][0][0], + np.zeros([num_keypoints, 2], np.float), + rtol=1e-6) + np.testing.assert_allclose( + detections['detection_keypoint_scores'][0][0], + np.zeros([num_keypoints], np.float), + rtol=1e-6) + # Binary segmentation mask. + self.assertSameElements( + np.unique(detections['detection_masks'][0, 0, :, :]), + set([0, 1])) + # No DensePose surface coordinates. + np.testing.assert_allclose( + detections['detection_surface_coords'][0, 0, :, :], + np.zeros_like(detections['detection_surface_coords'][0, 0, :, :])) + + def test_postprocess_simple(self): + """Test the postprocess function.""" + model = build_center_net_meta_arch(num_classes=1) + max_detection = model._center_params.max_box_predictions + num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices) + + class_center = np.zeros((1, 32, 32, 1), dtype=np.float32) + height_width = np.zeros((1, 32, 32, 2), dtype=np.float32) + offset = np.zeros((1, 32, 32, 2), dtype=np.float32) + keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32) + keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32) + keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2) + + class_probs = np.zeros(1) + class_probs[0] = _logit(0.75) + class_center[0, 16, 16] = class_probs + height_width[0, 16, 16] = [5, 10] + offset[0, 16, 16] = [.25, .5] + keypoint_regression[0, 16, 16] = [ + -1., -1., + -1., 1., + 1., -1., + 1., 1.] + keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9) + keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9) + keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9) + keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score. + + class_center = tf.constant(class_center) + height_width = tf.constant(height_width) + offset = tf.constant(offset) + keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32) + keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32) + keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32) + + prediction_dict = { + cnma.OBJECT_CENTER: [class_center], + cnma.BOX_SCALE: [height_width], + cnma.BOX_OFFSET: [offset], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): + [keypoint_heatmaps], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): + [keypoint_offsets], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): + [keypoint_regression], + } + + def graph_fn(): + detections = model.postprocess(prediction_dict, + tf.constant([[128, 128, 3]])) + return detections + + detections = self.execute_cpu(graph_fn, []) + + self.assertAllClose(detections['detection_boxes'][0, 0], + np.array([55, 46, 75, 86]) / 128.0) + self.assertAllClose(detections['detection_scores'][0], + [.75, .5, .5, .5, .5]) + + self.assertEqual(detections['detection_classes'][0, 0], 0) + self.assertEqual(detections['num_detections'], [5]) + self.assertAllEqual([1, max_detection, num_keypoints, 2], + detections['detection_keypoints'].shape) + self.assertAllEqual([1, max_detection, num_keypoints], + detections['detection_keypoint_scores'].shape) + + def test_get_instance_indices(self): + classes = tf.constant([[0, 1, 2, 0], [2, 1, 2, 2]], dtype=tf.int32) + num_detections = tf.constant([1, 3], dtype=tf.int32) + batch_index = 1 + class_id = 2 + model = build_center_net_meta_arch() + valid_indices = model._get_instance_indices( + classes, num_detections, batch_index, class_id) + self.assertAllEqual(valid_indices.numpy(), [0, 2]) + + +def get_fake_prediction_dict(input_height, input_width, stride): + """Prepares the fake prediction dictionary.""" + output_height = input_height // stride + output_width = input_width // stride + object_center = np.zeros((2, output_height, output_width, _NUM_CLASSES), + dtype=np.float32) + # Box center: + # y: floor((0.54 + 0.56) / 2 * 4) = 2, + # x: floor((0.54 + 0.56) / 2 * 8) = 4 + object_center[0, 2, 4, 1] = 1.0 + object_center = _logit(object_center) + + # Box size: + # height: (0.56 - 0.54) * 4 = 0.08 + # width: (0.56 - 0.54) * 8 = 0.16 + object_scale = np.zeros((2, output_height, output_width, 2), dtype=np.float32) + object_scale[0, 2, 4] = 0.08, 0.16 + + # Box center offset coordinate (0.55, 0.55): + # y-offset: 0.55 * 4 - 2 = 0.2 + # x-offset: 0.55 * 8 - 4 = 0.4 + object_offset = np.zeros((2, output_height, output_width, 2), + dtype=np.float32) + object_offset[0, 2, 4] = 0.2, 0.4 + + keypoint_heatmap = np.zeros((2, output_height, output_width, _NUM_KEYPOINTS), + dtype=np.float32) + keypoint_heatmap[0, 2, 4, 1] = 1.0 + keypoint_heatmap[0, 2, 4, 3] = 1.0 + keypoint_heatmap = _logit(keypoint_heatmap) + + keypoint_offset = np.zeros((2, output_height, output_width, 2), + dtype=np.float32) + keypoint_offset[0, 2, 4] = 0.2, 0.4 + + keypoint_regression = np.zeros( + (2, output_height, output_width, 2 * _NUM_KEYPOINTS), dtype=np.float32) + keypoint_regression[0, 2, 4] = 0.0, 0.0, 0.2, 0.4, 0.0, 0.0, 0.2, 0.4 + + mask_heatmap = np.zeros((2, output_height, output_width, _NUM_CLASSES), + dtype=np.float32) + mask_heatmap[0, 2, 4, 1] = 1.0 + mask_heatmap = _logit(mask_heatmap) + + densepose_heatmap = np.zeros((2, output_height, output_width, + _DENSEPOSE_NUM_PARTS), dtype=np.float32) + densepose_heatmap[0, 2, 4, 5] = 1.0 + densepose_heatmap = _logit(densepose_heatmap) + + densepose_regression = np.zeros((2, output_height, output_width, + 2 * _DENSEPOSE_NUM_PARTS), dtype=np.float32) + # The surface coordinate indices for part index 5 are: + # (5 * 2, 5 * 2 + 1), or (10, 11). + densepose_regression[0, 2, 4, 10:12] = 0.4, 0.7 + + track_reid_embedding = np.zeros((2, output_height, output_width, + _REID_EMBED_SIZE), dtype=np.float32) + track_reid_embedding[0, 2, 4, :] = np.arange(_REID_EMBED_SIZE) + + temporal_offsets = np.zeros((2, output_height, output_width, 2), + dtype=np.float32) + temporal_offsets[0, 2, 4, :] = 5 + + prediction_dict = { + 'preprocessed_inputs': + tf.zeros((2, input_height, input_width, 3)), + cnma.OBJECT_CENTER: [ + tf.constant(object_center), + tf.constant(object_center) + ], + cnma.BOX_SCALE: [ + tf.constant(object_scale), + tf.constant(object_scale) + ], + cnma.BOX_OFFSET: [ + tf.constant(object_offset), + tf.constant(object_offset) + ], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [ + tf.constant(keypoint_heatmap), + tf.constant(keypoint_heatmap) + ], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [ + tf.constant(keypoint_offset), + tf.constant(keypoint_offset) + ], + cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [ + tf.constant(keypoint_regression), + tf.constant(keypoint_regression) + ], + cnma.SEGMENTATION_HEATMAP: [ + tf.constant(mask_heatmap), + tf.constant(mask_heatmap) + ], + cnma.DENSEPOSE_HEATMAP: [ + tf.constant(densepose_heatmap), + tf.constant(densepose_heatmap), + ], + cnma.DENSEPOSE_REGRESSION: [ + tf.constant(densepose_regression), + tf.constant(densepose_regression), + ], + cnma.TRACK_REID: [ + tf.constant(track_reid_embedding), + tf.constant(track_reid_embedding), + ], + cnma.TEMPORAL_OFFSET: [ + tf.constant(temporal_offsets), + tf.constant(temporal_offsets), + ], + } + return prediction_dict + + +def get_fake_groundtruth_dict(input_height, input_width, stride): + """Prepares the fake groundtruth dictionary.""" + # A small box with center at (0.55, 0.55). + boxes = [ + tf.constant([[0.54, 0.54, 0.56, 0.56]]), + tf.constant([[0.0, 0.0, 0.5, 0.5]]), + ] + classes = [ + tf.one_hot([1], depth=_NUM_CLASSES), + tf.one_hot([0], depth=_NUM_CLASSES), + ] + weights = [ + tf.constant([1.]), + tf.constant([0.]), + ] + keypoints = [ + tf.tile( + tf.expand_dims( + tf.constant([[float('nan'), 0.55, + float('nan'), 0.55, 0.55, 0.0]]), + axis=2), + multiples=[1, 1, 2]), + tf.tile( + tf.expand_dims( + tf.constant([[float('nan'), 0.55, + float('nan'), 0.55, 0.55, 0.0]]), + axis=2), + multiples=[1, 1, 2]), + ] + labeled_classes = [ + tf.one_hot([1], depth=_NUM_CLASSES) + tf.one_hot([2], depth=_NUM_CLASSES), + tf.one_hot([0], depth=_NUM_CLASSES) + tf.one_hot([1], depth=_NUM_CLASSES), + ] + mask = np.zeros((1, input_height, input_width), dtype=np.float32) + mask[0, 8:8+stride, 16:16+stride] = 1 + masks = [ + tf.constant(mask), + tf.zeros_like(mask), + ] + densepose_num_points = [ + tf.constant([1], dtype=tf.int32), + tf.constant([0], dtype=tf.int32), + ] + densepose_part_ids = [ + tf.constant([[5, 0, 0]], dtype=tf.int32), + tf.constant([[0, 0, 0]], dtype=tf.int32), + ] + densepose_surface_coords_np = np.zeros((1, 3, 4), dtype=np.float32) + densepose_surface_coords_np[0, 0, :] = 0.55, 0.55, 0.4, 0.7 + densepose_surface_coords = [ + tf.constant(densepose_surface_coords_np), + tf.zeros_like(densepose_surface_coords_np) + ] + track_ids = [ + tf.constant([2], dtype=tf.int32), + tf.constant([1], dtype=tf.int32), + ] + temporal_offsets = [ + tf.constant([[5.0, 5.0]], dtype=tf.float32), + tf.constant([[2.0, 3.0]], dtype=tf.float32), + ] + track_match_flags = [ + tf.constant([1.0], dtype=tf.float32), + tf.constant([1.0], dtype=tf.float32), + ] + groundtruth_dict = { + fields.BoxListFields.boxes: boxes, + fields.BoxListFields.weights: weights, + fields.BoxListFields.classes: classes, + fields.BoxListFields.keypoints: keypoints, + fields.BoxListFields.masks: masks, + fields.BoxListFields.densepose_num_points: densepose_num_points, + fields.BoxListFields.densepose_part_ids: densepose_part_ids, + fields.BoxListFields.densepose_surface_coords: + densepose_surface_coords, + fields.BoxListFields.track_ids: track_ids, + fields.BoxListFields.temporal_offsets: temporal_offsets, + fields.BoxListFields.track_match_flags: track_match_flags, + fields.InputDataFields.groundtruth_labeled_classes: labeled_classes, + } + return groundtruth_dict + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaComputeLossTest(test_case.TestCase): + """Test for CenterNet loss compuation related functions.""" + + def setUp(self): + self.model = build_center_net_meta_arch() + self.classification_loss_fn = self.model._center_params.classification_loss + self.localization_loss_fn = self.model._od_params.localization_loss + self.true_image_shapes = tf.constant([[16, 24, 3], [16, 24, 3]]) + self.input_height = 16 + self.input_width = 32 + self.stride = 4 + self.per_pixel_weights = self.get_per_pixel_weights(self.true_image_shapes, + self.input_height, + self.input_width, + self.stride) + self.prediction_dict = get_fake_prediction_dict(self.input_height, + self.input_width, + self.stride) + self.model._groundtruth_lists = get_fake_groundtruth_dict( + self.input_height, self.input_width, self.stride) + super(CenterNetMetaComputeLossTest, self).setUp() + + def get_per_pixel_weights(self, true_image_shapes, input_height, input_width, + stride): + output_height, output_width = (input_height // stride, + input_width // stride) + + # TODO(vighneshb) Explore whether using floor here is safe. + output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride) + per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image( + output_true_image_shapes, output_height, output_width) + per_pixel_weights = tf.expand_dims(per_pixel_weights, 2) + return per_pixel_weights + + def test_compute_object_center_loss(self): + def graph_fn(): + loss = self.model._compute_object_center_loss( + object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER], + input_height=self.input_height, + input_width=self.input_width, + per_pixel_weights=self.per_pixel_weights) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + default_value = self.model._center_params.use_only_known_classes + self.model._center_params = ( + self.model._center_params._replace(use_only_known_classes=True)) + loss = self.model._compute_object_center_loss( + object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER], + input_height=self.input_height, + input_width=self.input_width, + per_pixel_weights=self.per_pixel_weights) + self.model._center_params = ( + self.model._center_params._replace( + use_only_known_classes=default_value)) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_box_scale_and_offset_loss(self): + def graph_fn(): + scale_loss, offset_loss = self.model._compute_box_scale_and_offset_loss( + scale_predictions=self.prediction_dict[cnma.BOX_SCALE], + offset_predictions=self.prediction_dict[cnma.BOX_OFFSET], + input_height=self.input_height, + input_width=self.input_width) + return scale_loss, offset_loss + + scale_loss, offset_loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, scale_loss) + self.assertGreater(0.01, offset_loss) + + def test_compute_kp_heatmap_loss(self): + def graph_fn(): + loss = self.model._compute_kp_heatmap_loss( + input_height=self.input_height, + input_width=self.input_width, + task_name=_TASK_NAME, + heatmap_predictions=self.prediction_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_HEATMAP)], + classification_loss_fn=self.classification_loss_fn, + per_pixel_weights=self.per_pixel_weights) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_kp_offset_loss(self): + def graph_fn(): + loss = self.model._compute_kp_offset_loss( + input_height=self.input_height, + input_width=self.input_width, + task_name=_TASK_NAME, + offset_predictions=self.prediction_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_OFFSET)], + localization_loss_fn=self.localization_loss_fn) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_kp_regression_loss(self): + def graph_fn(): + loss = self.model._compute_kp_regression_loss( + input_height=self.input_height, + input_width=self.input_width, + task_name=_TASK_NAME, + regression_predictions=self.prediction_dict[cnma.get_keypoint_name( + _TASK_NAME, cnma.KEYPOINT_REGRESSION,)], + localization_loss_fn=self.localization_loss_fn) + return loss + + loss = self.execute(graph_fn, []) + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + def test_compute_track_embedding_loss(self): + default_fc = self.model.track_reid_classification_net + # Initialize the kernel to extreme values so that the classification score + # is close to (0, 0, 1) after the softmax layer. + kernel_initializer = tf.constant_initializer( + [[1, 1, 0], [-1000000, -1000000, 1000000]]) + self.model.track_reid_classification_net = tf.keras.layers.Dense( + _NUM_TRACK_IDS, + kernel_initializer=kernel_initializer, + input_shape=(_REID_EMBED_SIZE,)) + + loss = self.model._compute_track_embedding_loss( + input_height=self.input_height, + input_width=self.input_width, + object_reid_predictions=self.prediction_dict[cnma.TRACK_REID]) + + self.model.track_reid_classification_net = default_fc + + # The prediction and groundtruth are curated to produce very low loss. + self.assertGreater(0.01, loss) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMetaArchRestoreTest(test_case.TestCase): + + def test_restore_map_resnet(self): + """Test restore map for a resnet backbone.""" + + model = build_center_net_meta_arch(build_resnet=True) + restore_from_objects_map = model.restore_from_objects('classification') + self.assertIsInstance(restore_from_objects_map['feature_extractor'], + tf.keras.Model) + + def test_retore_map_error(self): + """Test that restoring unsupported checkpoint type raises an error.""" + + model = build_center_net_meta_arch(build_resnet=True) + msg = ("Checkpoint type \"detection\" not supported for " + "CenterNetResnetFeatureExtractor. Supported types are " + "['classification', 'fine_tune']") + with self.assertRaisesRegex(ValueError, re.escape(msg)): + model.restore_from_objects('detection') + + +class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor): + + def __init__(self, + channel_means, + channel_stds, + bgr_ordering, + num_feature_outputs, + stride): + self._num_feature_outputs = num_feature_outputs + self._stride = stride + super(DummyFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + + def predict(self): + pass + + def loss(self): + pass + + def postprocess(self): + pass + + def call(self, inputs): + batch_size, input_height, input_width, _ = inputs.shape + fake_output = tf.ones([ + batch_size, input_height // self._stride, input_width // self._stride, + 64 + ], dtype=tf.float32) + return [fake_output] * self._num_feature_outputs + + @property + def out_stride(self): + return self._stride + + @property + def num_feature_outputs(self): + return self._num_feature_outputs + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetFeatureExtractorTest(test_case.TestCase): + """Test the base feature extractor class.""" + + def test_preprocess(self): + feature_extractor = DummyFeatureExtractor( + channel_means=(1.0, 2.0, 3.0), + channel_stds=(10., 20., 30.), bgr_ordering=False, + num_feature_outputs=2, stride=4) + + img = np.zeros((2, 32, 32, 3)) + img[:, :, :] = 11, 22, 33 + + def graph_fn(): + output = feature_extractor.preprocess(img) + return output + + output = self.execute(graph_fn, []) + self.assertAlmostEqual(output.sum(), 2 * 32 * 32 * 3) + + def test_bgr_ordering(self): + feature_extractor = DummyFeatureExtractor( + channel_means=(0.0, 0.0, 0.0), + channel_stds=(1., 1., 1.), bgr_ordering=True, + num_feature_outputs=2, stride=4) + + img = np.zeros((2, 32, 32, 3), dtype=np.float32) + img[:, :, :] = 1, 2, 3 + + def graph_fn(): + output = feature_extractor.preprocess(img) + return output + + output = self.execute(graph_fn, []) + self.assertAllClose(output[..., 2], 1 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 0], 3 * np.ones((2, 32, 32))) + + def test_default_ordering(self): + feature_extractor = DummyFeatureExtractor( + channel_means=(0.0, 0.0, 0.0), + channel_stds=(1., 1., 1.), bgr_ordering=False, + num_feature_outputs=2, stride=4) + + img = np.zeros((2, 32, 32, 3), dtype=np.float32) + img[:, :, :] = 1, 2, 3 + + def graph_fn(): + output = feature_extractor.preprocess(img) + return output + + output = self.execute(graph_fn, []) + self.assertAllClose(output[..., 0], 1 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32))) + self.assertAllClose(output[..., 2], 3 * np.ones((2, 32, 32))) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..902a88c77669cd27eb36490d645740041600fcac --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib.py @@ -0,0 +1,224 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library functions for ContextRCNN.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +import tf_slim as slim + + +# The negative value used in padding the invalid weights. +_NEGATIVE_PADDING_VALUE = -100000 + + +def filter_weight_value(weights, values, valid_mask): + """Filters weights and values based on valid_mask. + + _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to + avoid their contribution in softmax. 0 will be set for the invalid elements in + the values. + + Args: + weights: A float Tensor of shape [batch_size, input_size, context_size]. + values: A float Tensor of shape [batch_size, context_size, + projected_dimension]. + valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means + valid and False means invalid. + + Returns: + weights: A float Tensor of shape [batch_size, input_size, context_size]. + values: A float Tensor of shape [batch_size, context_size, + projected_dimension]. + + Raises: + ValueError: If shape of doesn't match. + """ + w_batch_size, _, w_context_size = weights.shape + v_batch_size, v_context_size, _ = values.shape + m_batch_size, m_context_size = valid_mask.shape + if w_batch_size != v_batch_size or v_batch_size != m_batch_size: + raise ValueError("Please make sure the first dimension of the input" + " tensors are the same.") + + if w_context_size != v_context_size: + raise ValueError("Please make sure the third dimension of weights matches" + " the second dimension of values.") + + if w_context_size != m_context_size: + raise ValueError("Please make sure the third dimension of the weights" + " matches the second dimension of the valid_mask.") + + valid_mask = valid_mask[..., tf.newaxis] + + # Force the invalid weights to be very negative so it won't contribute to + # the softmax. + weights += tf.transpose( + tf.cast(tf.math.logical_not(valid_mask), weights.dtype) * + _NEGATIVE_PADDING_VALUE, + perm=[0, 2, 1]) + + # Force the invalid values to be 0. + values *= tf.cast(valid_mask, values.dtype) + + return weights, values + + +def compute_valid_mask(num_valid_elements, num_elements): + """Computes mask of valid entries within padded context feature. + + Args: + num_valid_elements: A int32 Tensor of shape [batch_size]. + num_elements: An int32 Tensor. + + Returns: + A boolean Tensor of the shape [batch_size, num_elements]. True means + valid and False means invalid. + """ + batch_size = num_valid_elements.shape[0] + element_idxs = tf.range(num_elements, dtype=tf.int32) + batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) + num_valid_elements = num_valid_elements[..., tf.newaxis] + valid_mask = tf.less(batch_element_idxs, num_valid_elements) + return valid_mask + + +def project_features(features, projection_dimension, is_training, normalize): + """Projects features to another feature space. + + Args: + features: A float Tensor of shape [batch_size, features_size, + num_features]. + projection_dimension: A int32 Tensor. + is_training: A boolean Tensor (affecting batch normalization). + normalize: A boolean Tensor. If true, the output features will be l2 + normalized on the last dimension. + + Returns: + A float Tensor of shape [batch, features_size, projection_dimension]. + """ + # TODO(guanhangwu) Figure out a better way of specifying the batch norm + # params. + batch_norm_params = { + "is_training": is_training, + "decay": 0.97, + "epsilon": 0.001, + "center": True, + "scale": True + } + + batch_size, _, num_features = features.shape + features = tf.reshape(features, [-1, num_features]) + projected_features = slim.fully_connected( + features, + num_outputs=projection_dimension, + activation_fn=tf.nn.relu6, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params) + + projected_features = tf.reshape(projected_features, + [batch_size, -1, projection_dimension]) + + if normalize: + projected_features = tf.math.l2_normalize(projected_features, axis=-1) + + return projected_features + + +def attention_block(input_features, context_features, bottleneck_dimension, + output_dimension, attention_temperature, valid_mask, + is_training): + """Generic attention block. + + Args: + input_features: A float Tensor of shape [batch_size, input_size, + num_input_features]. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + bottleneck_dimension: A int32 Tensor representing the bottleneck dimension + for intermediate projections. + output_dimension: A int32 Tensor representing the last dimension of the + output feature. + attention_temperature: A float Tensor. It controls the temperature of the + softmax for weights calculation. The formula for calculation as follows: + weights = exp(weights / temperature) / sum(exp(weights / temperature)) + valid_mask: A boolean Tensor of shape [batch_size, context_size]. + is_training: A boolean Tensor (affecting batch normalization). + + Returns: + A float Tensor of shape [batch_size, input_size, output_dimension]. + """ + + with tf.variable_scope("AttentionBlock"): + queries = project_features( + input_features, bottleneck_dimension, is_training, normalize=True) + keys = project_features( + context_features, bottleneck_dimension, is_training, normalize=True) + values = project_features( + context_features, bottleneck_dimension, is_training, normalize=True) + + weights = tf.matmul(queries, keys, transpose_b=True) + + weights, values = filter_weight_value(weights, values, valid_mask) + + weights = tf.nn.softmax(weights / attention_temperature) + + features = tf.matmul(weights, values) + output_features = project_features( + features, output_dimension, is_training, normalize=False) + return output_features + + +def compute_box_context_attention(box_features, context_features, + valid_context_size, bottleneck_dimension, + attention_temperature, is_training): + """Computes the attention feature from the context given a batch of box. + + Args: + box_features: A float Tensor of shape [batch_size, max_num_proposals, + height, width, channels]. It is pooled features from first stage + proposals. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + valid_context_size: A int32 Tensor of shape [batch_size]. + bottleneck_dimension: A int32 Tensor representing the bottleneck dimension + for intermediate projections. + attention_temperature: A float Tensor. It controls the temperature of the + softmax for weights calculation. The formula for calculation as follows: + weights = exp(weights / temperature) / sum(exp(weights / temperature)) + is_training: A boolean Tensor (affecting batch normalization). + + Returns: + A float Tensor of shape [batch_size, max_num_proposals, 1, 1, channels]. + """ + _, context_size, _ = context_features.shape + valid_mask = compute_valid_mask(valid_context_size, context_size) + + channels = box_features.shape[-1] + # Average pools over height and width dimension so that the shape of + # box_features becomes [batch_size, max_num_proposals, channels]. + box_features = tf.reduce_mean(box_features, [2, 3]) + + output_features = attention_block(box_features, context_features, + bottleneck_dimension, channels.value, + attention_temperature, valid_mask, + is_training) + + # Expands the dimension back to match with the original feature map. + output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] + + return output_features diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f2749cb1e1522084308cca7842836d11e936c16 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0b3b848d835dcad37f6c75f05b869fbaec4facb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf1_test.py @@ -0,0 +1,126 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for context_rcnn_lib.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import context_rcnn_lib +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_NEGATIVE_PADDING_VALUE = -100000 + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ContextRcnnLibTest(parameterized.TestCase, test_case.TestCase, + tf.test.TestCase): + """Tests for the functions in context_rcnn_lib.""" + + def test_compute_valid_mask(self): + num_elements = tf.constant(3, tf.int32) + num_valid_elementss = tf.constant((1, 2), tf.int32) + valid_mask = context_rcnn_lib.compute_valid_mask(num_valid_elementss, + num_elements) + expected_valid_mask = tf.constant([[1, 0, 0], [1, 1, 0]], tf.float32) + self.assertAllEqual(valid_mask, expected_valid_mask) + + def test_filter_weight_value(self): + weights = tf.ones((2, 3, 2), tf.float32) * 4 + values = tf.ones((2, 2, 4), tf.float32) + valid_mask = tf.constant([[True, True], [True, False]], tf.bool) + + filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( + weights, values, valid_mask) + expected_weights = tf.constant([[[4, 4], [4, 4], [4, 4]], + [[4, _NEGATIVE_PADDING_VALUE + 4], + [4, _NEGATIVE_PADDING_VALUE + 4], + [4, _NEGATIVE_PADDING_VALUE + 4]]]) + + expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], + [[1, 1, 1, 1], [0, 0, 0, 0]]]) + self.assertAllEqual(filtered_weights, expected_weights) + self.assertAllEqual(filtered_values, expected_values) + + # Changes the valid_mask so the results will be different. + valid_mask = tf.constant([[True, True], [False, False]], tf.bool) + + filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( + weights, values, valid_mask) + expected_weights = tf.constant( + [[[4, 4], [4, 4], [4, 4]], + [[_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], + [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], + [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4]]]) + + expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], + [[0, 0, 0, 0], [0, 0, 0, 0]]]) + self.assertAllEqual(filtered_weights, expected_weights) + self.assertAllEqual(filtered_values, expected_values) + + @parameterized.parameters((2, True, True), (2, False, True), + (10, True, False), (10, False, False)) + def test_project_features(self, projection_dimension, is_training, normalize): + features = tf.ones([2, 3, 4], tf.float32) + projected_features = context_rcnn_lib.project_features( + features, + projection_dimension, + is_training=is_training, + normalize=normalize) + + # Makes sure the shape is correct. + self.assertAllEqual(projected_features.shape, [2, 3, projection_dimension]) + + @parameterized.parameters( + (2, 10, 1), + (3, 10, 2), + (4, 20, 3), + (5, 20, 4), + (7, 20, 5), + ) + def test_attention_block(self, bottleneck_dimension, output_dimension, + attention_temperature): + input_features = tf.ones([2, 3, 4], tf.float32) + context_features = tf.ones([2, 2, 3], tf.float32) + valid_mask = tf.constant([[True, True], [False, False]], tf.bool) + is_training = False + output_features = context_rcnn_lib.attention_block( + input_features, context_features, bottleneck_dimension, + output_dimension, attention_temperature, valid_mask, is_training) + + # Makes sure the shape is correct. + self.assertAllEqual(output_features.shape, [2, 3, output_dimension]) + + @parameterized.parameters(True, False) + def test_compute_box_context_attention(self, is_training): + box_features = tf.ones([2, 3, 4, 4, 4], tf.float32) + context_features = tf.ones([2, 5, 6], tf.float32) + valid_context_size = tf.constant((2, 3), tf.int32) + bottleneck_dimension = 10 + attention_temperature = 1 + attention_features = context_rcnn_lib.compute_box_context_attention( + box_features, context_features, valid_context_size, + bottleneck_dimension, attention_temperature, is_training) + # Makes sure the shape is correct. + self.assertAllEqual(attention_features.shape, [2, 3, 1, 1, 4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7d80ab6c6008e14ec1a74f7445bced03988749 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2.py @@ -0,0 +1,239 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Library functions for Context R-CNN.""" +import tensorflow as tf + +from object_detection.core import freezable_batch_norm + +# The negative value used in padding the invalid weights. +_NEGATIVE_PADDING_VALUE = -100000 + + +class ContextProjection(tf.keras.layers.Layer): + """Custom layer to do batch normalization and projection.""" + + def __init__(self, projection_dimension, **kwargs): + self.batch_norm = freezable_batch_norm.FreezableBatchNorm( + epsilon=0.001, + center=True, + scale=True, + momentum=0.97, + trainable=True) + self.projection = tf.keras.layers.Dense(units=projection_dimension, + use_bias=True) + self.projection_dimension = projection_dimension + super(ContextProjection, self).__init__(**kwargs) + + def build(self, input_shape): + self.projection.build(input_shape) + self.batch_norm.build(input_shape[:1] + [self.projection_dimension]) + + def call(self, input_features, is_training=False): + return tf.nn.relu6(self.batch_norm(self.projection(input_features), + is_training)) + + +class AttentionBlock(tf.keras.layers.Layer): + """Custom layer to perform all attention.""" + + def __init__(self, bottleneck_dimension, attention_temperature, + output_dimension=None, is_training=False, + name='AttentionBlock', **kwargs): + """Constructs an attention block. + + Args: + bottleneck_dimension: A int32 Tensor representing the bottleneck dimension + for intermediate projections. + attention_temperature: A float Tensor. It controls the temperature of the + softmax for weights calculation. The formula for calculation as follows: + weights = exp(weights / temperature) / sum(exp(weights / temperature)) + output_dimension: A int32 Tensor representing the last dimension of the + output feature. + is_training: A boolean Tensor (affecting batch normalization). + name: A string describing what to name the variables in this block. + **kwargs: Additional keyword arguments. + """ + + self._key_proj = ContextProjection(bottleneck_dimension) + self._val_proj = ContextProjection(bottleneck_dimension) + self._query_proj = ContextProjection(bottleneck_dimension) + self._feature_proj = None + self._attention_temperature = attention_temperature + self._bottleneck_dimension = bottleneck_dimension + self._is_training = is_training + self._output_dimension = output_dimension + if self._output_dimension: + self._feature_proj = ContextProjection(self._output_dimension) + super(AttentionBlock, self).__init__(name=name, **kwargs) + + def build(self, input_shapes): + """Finishes building the attention block. + + Args: + input_shapes: the shape of the primary input box features. + """ + if not self._feature_proj: + self._output_dimension = input_shapes[-1] + self._feature_proj = ContextProjection(self._output_dimension) + + def call(self, box_features, context_features, valid_context_size): + """Handles a call by performing attention. + + Args: + box_features: A float Tensor of shape [batch_size, input_size, height, + width, num_input_features]. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + valid_context_size: A int32 Tensor of shape [batch_size]. + + Returns: + A float Tensor with shape [batch_size, input_size, num_input_features] + containing output features after attention with context features. + """ + + _, context_size, _ = context_features.shape + valid_mask = compute_valid_mask(valid_context_size, context_size) + + # Average pools over height and width dimension so that the shape of + # box_features becomes [batch_size, max_num_proposals, channels]. + box_features = tf.reduce_mean(box_features, [2, 3]) + + queries = project_features( + box_features, self._bottleneck_dimension, self._is_training, + self._query_proj, normalize=True) + keys = project_features( + context_features, self._bottleneck_dimension, self._is_training, + self._key_proj, normalize=True) + values = project_features( + context_features, self._bottleneck_dimension, self._is_training, + self._val_proj, normalize=True) + + weights = tf.matmul(queries, keys, transpose_b=True) + weights, values = filter_weight_value(weights, values, valid_mask) + weights = tf.nn.softmax(weights / self._attention_temperature) + + features = tf.matmul(weights, values) + output_features = project_features( + features, self._output_dimension, self._is_training, + self._feature_proj, normalize=False) + + output_features = output_features[:, :, tf.newaxis, tf.newaxis, :] + + return output_features + + +def filter_weight_value(weights, values, valid_mask): + """Filters weights and values based on valid_mask. + + _NEGATIVE_PADDING_VALUE will be added to invalid elements in the weights to + avoid their contribution in softmax. 0 will be set for the invalid elements in + the values. + + Args: + weights: A float Tensor of shape [batch_size, input_size, context_size]. + values: A float Tensor of shape [batch_size, context_size, + projected_dimension]. + valid_mask: A boolean Tensor of shape [batch_size, context_size]. True means + valid and False means invalid. + + Returns: + weights: A float Tensor of shape [batch_size, input_size, context_size]. + values: A float Tensor of shape [batch_size, context_size, + projected_dimension]. + + Raises: + ValueError: If shape of doesn't match. + """ + w_batch_size, _, w_context_size = weights.shape + v_batch_size, v_context_size, _ = values.shape + m_batch_size, m_context_size = valid_mask.shape + if w_batch_size != v_batch_size or v_batch_size != m_batch_size: + raise ValueError('Please make sure the first dimension of the input' + ' tensors are the same.') + + if w_context_size != v_context_size: + raise ValueError('Please make sure the third dimension of weights matches' + ' the second dimension of values.') + + if w_context_size != m_context_size: + raise ValueError('Please make sure the third dimension of the weights' + ' matches the second dimension of the valid_mask.') + + valid_mask = valid_mask[..., tf.newaxis] + + # Force the invalid weights to be very negative so it won't contribute to + # the softmax. + weights += tf.transpose( + tf.cast(tf.math.logical_not(valid_mask), weights.dtype) * + _NEGATIVE_PADDING_VALUE, + perm=[0, 2, 1]) + + # Force the invalid values to be 0. + values *= tf.cast(valid_mask, values.dtype) + + return weights, values + + +def project_features(features, bottleneck_dimension, is_training, + layer, normalize=True): + """Projects features to another feature space. + + Args: + features: A float Tensor of shape [batch_size, features_size, + num_features]. + bottleneck_dimension: A int32 Tensor. + is_training: A boolean Tensor (affecting batch normalization). + layer: Contains a custom layer specific to the particular operation + being performed (key, value, query, features) + normalize: A boolean Tensor. If true, the output features will be l2 + normalized on the last dimension. + + Returns: + A float Tensor of shape [batch, features_size, projection_dimension]. + """ + shape_arr = features.shape + batch_size, _, num_features = shape_arr + features = tf.reshape(features, [-1, num_features]) + + projected_features = layer(features, is_training) + + projected_features = tf.reshape(projected_features, + [batch_size, -1, bottleneck_dimension]) + + if normalize: + projected_features = tf.keras.backend.l2_normalize(projected_features, + axis=-1) + + return projected_features + + +def compute_valid_mask(num_valid_elements, num_elements): + """Computes mask of valid entries within padded context feature. + + Args: + num_valid_elements: A int32 Tensor of shape [batch_size]. + num_elements: An int32 Tensor. + + Returns: + A boolean Tensor of the shape [batch_size, num_elements]. True means + valid and False means invalid. + """ + batch_size = num_valid_elements.shape[0] + element_idxs = tf.range(num_elements, dtype=tf.int32) + batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1]) + num_valid_elements = num_valid_elements[..., tf.newaxis] + valid_mask = tf.less(batch_element_idxs, num_valid_elements) + return valid_mask diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce7f01a8fc70aecec2ec73ee89c53cc798d420fb Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb116c18b1c720d23b4c8454c24ec7820265221 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_lib_tf2_test.py @@ -0,0 +1,120 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for context_rcnn_lib.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +from absl.testing import parameterized +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import context_rcnn_lib_tf2 as context_rcnn_lib +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_NEGATIVE_PADDING_VALUE = -100000 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ContextRcnnLibTest(parameterized.TestCase, test_case.TestCase): + """Tests for the functions in context_rcnn_lib.""" + + def test_compute_valid_mask(self): + num_elements = tf.constant(3, tf.int32) + num_valid_elementss = tf.constant((1, 2), tf.int32) + valid_mask = context_rcnn_lib.compute_valid_mask(num_valid_elementss, + num_elements) + expected_valid_mask = tf.constant([[1, 0, 0], [1, 1, 0]], tf.float32) + self.assertAllEqual(valid_mask, expected_valid_mask) + + def test_filter_weight_value(self): + weights = tf.ones((2, 3, 2), tf.float32) * 4 + values = tf.ones((2, 2, 4), tf.float32) + valid_mask = tf.constant([[True, True], [True, False]], tf.bool) + + filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( + weights, values, valid_mask) + expected_weights = tf.constant([[[4, 4], [4, 4], [4, 4]], + [[4, _NEGATIVE_PADDING_VALUE + 4], + [4, _NEGATIVE_PADDING_VALUE + 4], + [4, _NEGATIVE_PADDING_VALUE + 4]]]) + + expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], + [[1, 1, 1, 1], [0, 0, 0, 0]]]) + self.assertAllEqual(filtered_weights, expected_weights) + self.assertAllEqual(filtered_values, expected_values) + + # Changes the valid_mask so the results will be different. + valid_mask = tf.constant([[True, True], [False, False]], tf.bool) + + filtered_weights, filtered_values = context_rcnn_lib.filter_weight_value( + weights, values, valid_mask) + expected_weights = tf.constant( + [[[4, 4], [4, 4], [4, 4]], + [[_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], + [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4], + [_NEGATIVE_PADDING_VALUE + 4, _NEGATIVE_PADDING_VALUE + 4]]]) + + expected_values = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1]], + [[0, 0, 0, 0], [0, 0, 0, 0]]]) + self.assertAllEqual(filtered_weights, expected_weights) + self.assertAllEqual(filtered_values, expected_values) + + @parameterized.parameters((2, True, True), (2, False, True), + (10, True, False), (10, False, False)) + def test_project_features(self, projection_dimension, is_training, normalize): + features = tf.ones([2, 3, 4], tf.float32) + projected_features = context_rcnn_lib.project_features( + features, + projection_dimension, + is_training, + context_rcnn_lib.ContextProjection(projection_dimension), + normalize=normalize) + + # Makes sure the shape is correct. + self.assertAllEqual(projected_features.shape, [2, 3, projection_dimension]) + + @parameterized.parameters( + (2, 10, 1), + (3, 10, 2), + (4, None, 3), + (5, 20, 4), + (7, None, 5), + ) + def test_attention_block(self, bottleneck_dimension, output_dimension, + attention_temperature): + input_features = tf.ones([2, 8, 3, 3, 3], tf.float32) + context_features = tf.ones([2, 20, 10], tf.float32) + attention_block = context_rcnn_lib.AttentionBlock( + bottleneck_dimension, + attention_temperature, + output_dimension=output_dimension, + is_training=False) + valid_context_size = tf.random_uniform((2,), + minval=0, + maxval=10, + dtype=tf.int32) + output_features = attention_block(input_features, context_features, + valid_context_size) + + # Makes sure the shape is correct. + self.assertAllEqual(output_features.shape, + [2, 8, 1, 1, (output_dimension or 3)]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..eca97d9209923ea23f3e23b480c51b28a06ebd3b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch.py @@ -0,0 +1,351 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Context R-CNN meta-architecture definition. + +This adds the ability to use attention into contextual features within the +Faster R-CNN object detection framework to improve object detection performance. +See https://arxiv.org/abs/1912.03538 for more information. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from object_detection.core import standard_fields as fields +from object_detection.meta_architectures import context_rcnn_lib +from object_detection.meta_architectures import context_rcnn_lib_tf2 +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import tf_version + + +class ContextRCNNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): + """Context R-CNN Meta-architecture definition.""" + + def __init__(self, + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + initial_crop_size, + maxpool_kernel_size, + maxpool_stride, + second_stage_target_assigner, + second_stage_mask_rcnn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + second_stage_mask_prediction_loss_weight=1.0, + hard_example_miner=None, + parallel_iterations=16, + add_summaries=True, + clip_anchors_to_image=False, + use_static_shapes=False, + resize_masks=True, + freeze_batchnorm=False, + return_raw_detections_during_predict=False, + output_final_box_features=False, + attention_bottleneck_dimension=None, + attention_temperature=None): + """ContextRCNNMetaArch Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + num_classes: Number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + image_resizer_fn: A callable for image resizing. This callable + takes a rank-3 image tensor of shape [height, width, channels] + (corresponding to a single image), an optional rank-3 instance mask + tensor of shape [num_masks, height, width] and returns a resized rank-3 + image tensor, a resized mask tensor if one was provided in the input. In + addition this callable must also return a 1-D tensor of the form + [height, width, channels] containing the size of the true image, as the + image resizer can perform zero padding. See protos/image_resizer.proto. + feature_extractor: A FasterRCNNFeatureExtractor object. + number_of_stages: An integer values taking values in {1, 2, 3}. If + 1, the function will construct only the Region Proposal Network (RPN) + part of the model. If 2, the function will perform box refinement and + other auxiliary predictions all in the second stage. If 3, it will + extract features from refined boxes and perform the auxiliary + predictions on the non-maximum suppressed refined boxes. + If is_training is true and the value of number_of_stages is 3, it is + reduced to 2 since all the model heads are trained in parallel in second + stage during training. + first_stage_anchor_generator: An anchor_generator.AnchorGenerator object + (note that currently we only support + grid_anchor_generator.GridAnchorGenerator objects) + first_stage_target_assigner: Target assigner to use for first stage of + Faster R-CNN (RPN). + first_stage_atrous_rate: A single integer indicating the atrous rate for + the single convolution op which is applied to the `rpn_features_to_crop` + tensor to obtain a tensor to be used for box prediction. Some feature + extractors optionally allow for producing feature maps computed at + denser resolutions. The atrous rate is used to compensate for the + denser feature maps by using an effectively larger receptive field. + (This should typically be set to 1). + first_stage_box_predictor_arg_scope_fn: Either a + Keras layer hyperparams object or a function to construct tf-slim + arg_scope for conv2d, separable_conv2d and fully_connected ops. Used + for the RPN box predictor. If it is a keras hyperparams object the + RPN box predictor will be a Keras model. If it is a function to + construct an arg scope it will be a tf-slim box predictor. + first_stage_box_predictor_kernel_size: Kernel size to use for the + convolution op just prior to RPN box predictions. + first_stage_box_predictor_depth: Output depth for the convolution op + just prior to RPN box predictions. + first_stage_minibatch_size: The "batch size" to use for computing the + objectness and location loss of the region proposal network. This + "batch size" refers to the number of anchors selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + first_stage_sampler: Sampler to use for first stage loss (RPN loss). + first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window`(with + all other inputs already set) and returns a dictionary containing + tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes`, `num_detections`. This is used to perform non max + suppression on the boxes predicted by the Region Proposal Network + (RPN). + See `post_processing.batch_multiclass_non_max_suppression` for the type + and shape of these tensors. + first_stage_max_proposals: Maximum number of boxes to retain after + performing Non-Max Suppression (NMS) on the boxes predicted by the + Region Proposal Network (RPN). + first_stage_localization_loss_weight: A float + first_stage_objectness_loss_weight: A float + crop_and_resize_fn: A differentiable resampler to use for cropping RPN + proposal features. + initial_crop_size: A single integer indicating the output size + (width and height are set to be the same) of the initial bilinear + interpolation based cropping during ROI pooling. + maxpool_kernel_size: A single integer indicating the kernel size of the + max pool op on the cropped feature map during ROI pooling. + maxpool_stride: A single integer indicating the stride of the max pool + op on the cropped feature map during ROI pooling. + second_stage_target_assigner: Target assigner to use for second stage of + Faster R-CNN. If the model is configured with multiple prediction heads, + this target assigner is used to generate targets for all heads (with the + correct `unmatched_class_label`). + second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for + the second stage. + second_stage_batch_size: The batch size used for computing the + classification and refined location loss of the box classifier. This + "batch size" refers to the number of proposals selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + second_stage_sampler: Sampler to use for second stage loss (box + classifier loss). + second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores`, optional `clip_window` and + optional (kwarg) `mask` inputs (with all other inputs already set) + and returns a dictionary containing tensors with keys: + `detection_boxes`, `detection_scores`, `detection_classes`, + `num_detections`, and (optionally) `detection_masks`. See + `post_processing.batch_multiclass_non_max_suppression` for the type and + shape of these tensors. + second_stage_score_conversion_fn: Callable elementwise nonlinearity + (that takes tensors as inputs and returns tensors). This is usually + used to convert logits to probabilities. + second_stage_localization_loss_weight: A float indicating the scale factor + for second stage localization loss. + second_stage_classification_loss_weight: A float indicating the scale + factor for second stage classification loss. + second_stage_classification_loss: Classification loss used by the second + stage classifier. Either losses.WeightedSigmoidClassificationLoss or + losses.WeightedSoftmaxClassificationLoss. + second_stage_mask_prediction_loss_weight: A float indicating the scale + factor for second stage mask prediction loss. This is applicable only if + second stage box predictor is configured to predict masks. + hard_example_miner: A losses.HardExampleMiner object (can be None). + parallel_iterations: (Optional) The number of iterations allowed to run + in parallel for calls to tf.map_fn. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + clip_anchors_to_image: Normally, anchors generated for a given image size + are pruned during training if they lie outside the image window. This + option clips the anchors to be within the image instead of pruning. + use_static_shapes: If True, uses implementation of ops with static shape + guarantees. + resize_masks: Indicates whether the masks presend in the groundtruth + should be resized in the model with `image_resizer_fn` + freeze_batchnorm: Whether to freeze batch norm parameters in the first + stage box predictor during training or not. When training with a small + batch size (e.g. 1), it is desirable to freeze batch norm update and + use pretrained batch norm params. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + output_final_box_features: Whether to output final box features. If true, + it crops the feauture map based on the final box prediction and returns + in the dict as detection_features. + attention_bottleneck_dimension: A single integer. The bottleneck feature + dimension of the attention block. + attention_temperature: A single float. The attention temperature. + + Raises: + ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at + training time. + ValueError: If first_stage_anchor_generator is not of type + grid_anchor_generator.GridAnchorGenerator. + """ + super(ContextRCNNMetaArch, self).__init__( + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + initial_crop_size, + maxpool_kernel_size, + maxpool_stride, + second_stage_target_assigner, + second_stage_mask_rcnn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + second_stage_mask_prediction_loss_weight=( + second_stage_mask_prediction_loss_weight), + hard_example_miner=hard_example_miner, + parallel_iterations=parallel_iterations, + add_summaries=add_summaries, + clip_anchors_to_image=clip_anchors_to_image, + use_static_shapes=use_static_shapes, + resize_masks=resize_masks, + freeze_batchnorm=freeze_batchnorm, + return_raw_detections_during_predict=( + return_raw_detections_during_predict), + output_final_box_features=output_final_box_features) + + if tf_version.is_tf1(): + self._context_feature_extract_fn = functools.partial( + context_rcnn_lib.compute_box_context_attention, + bottleneck_dimension=attention_bottleneck_dimension, + attention_temperature=attention_temperature, + is_training=is_training) + else: + self._context_feature_extract_fn = context_rcnn_lib_tf2.AttentionBlock( + bottleneck_dimension=attention_bottleneck_dimension, + attention_temperature=attention_temperature, + is_training=is_training) + + @staticmethod + def get_side_inputs(features): + """Overrides the get_side_inputs function in the base class. + + This function returns context_features and valid_context_size, which will be + used in the _compute_second_stage_input_feature_maps function. + + Args: + features: A dictionary of tensors. + + Returns: + A dictionary of tensors contains context_features and valid_context_size. + + Raises: + ValueError: If context_features or valid_context_size is not in the + features. + """ + if (fields.InputDataFields.context_features not in features or + fields.InputDataFields.valid_context_size not in features): + raise ValueError( + "Please make sure context_features and valid_context_size are in the " + "features") + + return { + fields.InputDataFields.context_features: + features[fields.InputDataFields.context_features], + fields.InputDataFields.valid_context_size: + features[fields.InputDataFields.valid_context_size] + } + + def _compute_second_stage_input_feature_maps(self, features_to_crop, + proposal_boxes_normalized, + image_shape, + context_features, + valid_context_size): + """Crops to a set of proposals from the feature map for a batch of images. + + This function overrides the one in the FasterRCNNMetaArch. Aside from + cropping and resizing the feature maps, which is done in the parent class, + it adds context attention features to the box features. + + Args: + features_to_crop: A float32 Tensor with shape [batch_size, height, width, + depth] + proposal_boxes_normalized: A float32 Tensor with shape [batch_size, + num_proposals, box_code_size] containing proposal boxes in normalized + coordinates. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + context_features: A float Tensor of shape [batch_size, context_size, + num_context_features]. + valid_context_size: A int32 Tensor of shape [batch_size]. + + Returns: + A float32 Tensor with shape [K, new_height, new_width, depth]. + """ + del image_shape + box_features = self._crop_and_resize_fn( + features_to_crop, proposal_boxes_normalized, None, + [self._initial_crop_size, self._initial_crop_size]) + + attention_features = self._context_feature_extract_fn( + box_features=box_features, + context_features=context_features, + valid_context_size=valid_context_size) + + # Adds box features with attention features. + box_features += attention_features + + flattened_feature_maps = self._flatten_first_two_dimensions(box_features) + + return self._maxpool_layer(flattened_feature_maps) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cfb291758be45274ce001092a72e41063038416 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..709cf0e3ebe89ee2789dbe6884298509ad33ce61 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/context_rcnn_meta_arch_test.py @@ -0,0 +1,541 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.meta_architectures.context_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import unittest +from unittest import mock # pylint: disable=g-importing-member +from absl.testing import parameterized +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from google.protobuf import text_format + +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import post_processing_builder +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.meta_architectures import context_rcnn_meta_arch +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.protos import box_predictor_pb2 +from object_detection.protos import hyperparams_pb2 +from object_detection.protos import post_processing_pb2 +from object_detection.utils import spatial_transform_ops as spatial_ops +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + + +class FakeFasterRCNNFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + reuse_weights=None, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_proposal_features(self, preprocessed_inputs, scope): + with tf.variable_scope('mock_model'): + proposal_features = 0 * slim.conv2d( + preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1') + return proposal_features, {} + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + with tf.variable_scope('mock_model'): + return 0 * slim.conv2d( + proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2') + + +class FakeFasterRCNNKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNKerasFeatureExtractor, self).__init__( + is_training=False, first_stage_features_stride=32, weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def get_proposal_feature_extractor_model(self, name): + + class ProposalFeatureExtractor(tf.keras.Model): + """Dummy proposal feature extraction.""" + + def __init__(self, name): + super(ProposalFeatureExtractor, self).__init__(name=name) + self.conv = None + + def build(self, input_shape): + self.conv = tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name='layer1') + + def call(self, inputs): + return self.conv(inputs) + + return ProposalFeatureExtractor(name=name) + + def get_box_classifier_feature_extractor_model(self, name): + return tf.keras.Sequential([ + tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name=name + '_layer2') + ]) + + +class ContextRCNNMetaArchTest(test_case.TestCase, parameterized.TestCase): + + def _get_model(self, box_predictor, **common_kwargs): + return context_rcnn_meta_arch.ContextRCNNMetaArch( + initial_crop_size=3, + maxpool_kernel_size=1, + maxpool_stride=1, + second_stage_mask_rcnn_box_predictor=box_predictor, + attention_bottleneck_dimension=10, + attention_temperature=0.2, + **common_kwargs) + + def _build_arg_scope_with_hyperparams(self, hyperparams_text_proto, + is_training): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.build(hyperparams, is_training=is_training) + + def _build_keras_layer_hyperparams(self, hyperparams_text_proto): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def _get_second_stage_box_predictor_text_proto(self, + share_box_across_classes=False + ): + share_box_field = 'true' if share_box_across_classes else 'false' + box_predictor_text_proto = """ + mask_rcnn_box_predictor {{ + fc_hyperparams {{ + op: FC + activation: NONE + regularizer {{ + l2_regularizer {{ + weight: 0.0005 + }} + }} + initializer {{ + variance_scaling_initializer {{ + factor: 1.0 + uniform: true + mode: FAN_AVG + }} + }} + }} + share_box_across_classes: {share_box_across_classes} + }} + """.format(share_box_across_classes=share_box_field) + return box_predictor_text_proto + + def _get_box_classifier_features_shape(self, + image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + num_features): + return (batch_size * max_num_proposals, + initial_crop_size/maxpool_stride, + initial_crop_size/maxpool_stride, + num_features) + + def _get_second_stage_box_predictor(self, + num_classes, + is_training, + predict_masks, + masks_are_class_agnostic, + share_box_across_classes=False, + use_keras=False): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge( + self._get_second_stage_box_predictor_text_proto( + share_box_across_classes), box_predictor_proto) + if predict_masks: + text_format.Merge( + self._add_mask_to_second_stage_box_predictor_text_proto( + masks_are_class_agnostic), box_predictor_proto) + + if use_keras: + return box_predictor_builder.build_keras( + hyperparams_builder.KerasLayerHyperparams, + inplace_batchnorm_update=False, + freeze_batchnorm=False, + box_predictor_config=box_predictor_proto, + num_classes=num_classes, + num_predictions_per_location_list=None, + is_training=is_training) + else: + return box_predictor_builder.build( + hyperparams_builder.build, + box_predictor_proto, + num_classes=num_classes, + is_training=is_training) + + def _build_model(self, + is_training, + number_of_stages, + second_stage_batch_size, + first_stage_max_proposals=8, + num_classes=2, + hard_mining=False, + softmax_second_stage_classification_loss=True, + predict_masks=False, + pad_to_max_dimension=None, + masks_are_class_agnostic=False, + use_matmul_crop_and_resize=False, + clip_anchors_to_image=False, + use_matmul_gather_in_matcher=False, + use_static_shapes=False, + calibration_mapping_value=None, + share_box_across_classes=False, + return_raw_detections_during_predict=False): + use_keras = tf_version.is_tf2() + def image_resizer_fn(image, masks=None): + """Fake image resizer function.""" + resized_inputs = [] + resized_image = tf.identity(image) + if pad_to_max_dimension is not None: + resized_image = tf.image.pad_to_bounding_box(image, 0, 0, + pad_to_max_dimension, + pad_to_max_dimension) + resized_inputs.append(resized_image) + if masks is not None: + resized_masks = tf.identity(masks) + if pad_to_max_dimension is not None: + resized_masks = tf.image.pad_to_bounding_box( + tf.transpose(masks, [1, 2, 0]), 0, 0, pad_to_max_dimension, + pad_to_max_dimension) + resized_masks = tf.transpose(resized_masks, [2, 0, 1]) + resized_inputs.append(resized_masks) + resized_inputs.append(tf.shape(image)) + return resized_inputs + + # anchors in this test are designed so that a subset of anchors are inside + # the image and a subset of anchors are outside. + first_stage_anchor_scales = (0.001, 0.005, 0.1) + first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0) + first_stage_anchor_strides = (1, 1) + first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator( + first_stage_anchor_scales, + first_stage_anchor_aspect_ratios, + anchor_stride=first_stage_anchor_strides) + first_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'proposal', + use_matmul_gather=use_matmul_gather_in_matcher) + + if use_keras: + fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor() + else: + fake_feature_extractor = FakeFasterRCNNFeatureExtractor() + + first_stage_box_predictor_hyperparams_text_proto = """ + op: CONV + activation: RELU + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + } + } + """ + if use_keras: + first_stage_box_predictor_arg_scope_fn = ( + self._build_keras_layer_hyperparams( + first_stage_box_predictor_hyperparams_text_proto)) + else: + first_stage_box_predictor_arg_scope_fn = ( + self._build_arg_scope_with_hyperparams( + first_stage_box_predictor_hyperparams_text_proto, is_training)) + + first_stage_box_predictor_kernel_size = 3 + first_stage_atrous_rate = 1 + first_stage_box_predictor_depth = 512 + first_stage_minibatch_size = 3 + first_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=0.5, is_static=use_static_shapes) + + first_stage_nms_score_threshold = -1.0 + first_stage_nms_iou_threshold = 1.0 + first_stage_max_proposals = first_stage_max_proposals + first_stage_non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=first_stage_nms_score_threshold, + iou_thresh=first_stage_nms_iou_threshold, + max_size_per_class=first_stage_max_proposals, + max_total_size=first_stage_max_proposals, + use_static_shapes=use_static_shapes) + + first_stage_localization_loss_weight = 1.0 + first_stage_objectness_loss_weight = 1.0 + + post_processing_config = post_processing_pb2.PostProcessing() + post_processing_text_proto = """ + score_converter: IDENTITY + batch_non_max_suppression { + score_threshold: -20.0 + iou_threshold: 1.0 + max_detections_per_class: 5 + max_total_detections: 5 + use_static_shapes: """ + '{}'.format(use_static_shapes) + """ + } + """ + if calibration_mapping_value: + calibration_text_proto = """ + calibration_config { + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: %f + } + x_y_pair { + x: 1.0 + y: %f + }}}}""" % (calibration_mapping_value, calibration_mapping_value) + post_processing_text_proto = ( + post_processing_text_proto + ' ' + calibration_text_proto) + text_format.Merge(post_processing_text_proto, post_processing_config) + second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = ( + post_processing_builder.build(post_processing_config)) + + second_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'detection', + use_matmul_gather=use_matmul_gather_in_matcher) + second_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=1.0, is_static=use_static_shapes) + + second_stage_localization_loss_weight = 1.0 + second_stage_classification_loss_weight = 1.0 + if softmax_second_stage_classification_loss: + second_stage_classification_loss = ( + losses.WeightedSoftmaxClassificationLoss()) + else: + second_stage_classification_loss = ( + losses.WeightedSigmoidClassificationLoss()) + + hard_example_miner = None + if hard_mining: + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=1, + iou_threshold=0.99, + loss_type='both', + cls_loss_weight=second_stage_classification_loss_weight, + loc_loss_weight=second_stage_localization_loss_weight, + max_negatives_per_positive=None) + + crop_and_resize_fn = ( + spatial_ops.multilevel_matmul_crop_and_resize + if use_matmul_crop_and_resize + else spatial_ops.multilevel_native_crop_and_resize) + common_kwargs = { + 'is_training': + is_training, + 'num_classes': + num_classes, + 'image_resizer_fn': + image_resizer_fn, + 'feature_extractor': + fake_feature_extractor, + 'number_of_stages': + number_of_stages, + 'first_stage_anchor_generator': + first_stage_anchor_generator, + 'first_stage_target_assigner': + first_stage_target_assigner, + 'first_stage_atrous_rate': + first_stage_atrous_rate, + 'first_stage_box_predictor_arg_scope_fn': + first_stage_box_predictor_arg_scope_fn, + 'first_stage_box_predictor_kernel_size': + first_stage_box_predictor_kernel_size, + 'first_stage_box_predictor_depth': + first_stage_box_predictor_depth, + 'first_stage_minibatch_size': + first_stage_minibatch_size, + 'first_stage_sampler': + first_stage_sampler, + 'first_stage_non_max_suppression_fn': + first_stage_non_max_suppression_fn, + 'first_stage_max_proposals': + first_stage_max_proposals, + 'first_stage_localization_loss_weight': + first_stage_localization_loss_weight, + 'first_stage_objectness_loss_weight': + first_stage_objectness_loss_weight, + 'second_stage_target_assigner': + second_stage_target_assigner, + 'second_stage_batch_size': + second_stage_batch_size, + 'second_stage_sampler': + second_stage_sampler, + 'second_stage_non_max_suppression_fn': + second_stage_non_max_suppression_fn, + 'second_stage_score_conversion_fn': + second_stage_score_conversion_fn, + 'second_stage_localization_loss_weight': + second_stage_localization_loss_weight, + 'second_stage_classification_loss_weight': + second_stage_classification_loss_weight, + 'second_stage_classification_loss': + second_stage_classification_loss, + 'hard_example_miner': + hard_example_miner, + 'crop_and_resize_fn': + crop_and_resize_fn, + 'clip_anchors_to_image': + clip_anchors_to_image, + 'use_static_shapes': + use_static_shapes, + 'resize_masks': + True, + 'return_raw_detections_during_predict': + return_raw_detections_during_predict + } + + return self._get_model( + self._get_second_stage_box_predictor( + num_classes=num_classes, + is_training=is_training, + use_keras=use_keras, + predict_masks=predict_masks, + masks_are_class_agnostic=masks_are_class_agnostic, + share_box_across_classes=share_box_across_classes), **common_kwargs) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + @mock.patch.object(context_rcnn_meta_arch, 'context_rcnn_lib') + def test_prediction_mock_tf1(self, mock_context_rcnn_lib_v1): + """Mocks the context_rcnn_lib_v1 module to test the prediction. + + Using mock object so that we can ensure compute_box_context_attention is + called in side the prediction function. + + Args: + mock_context_rcnn_lib_v1: mock module for the context_rcnn_lib_v1. + """ + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + num_classes=42) + mock_tensor = tf.ones([2, 8, 3, 3, 3], tf.float32) + + mock_context_rcnn_lib_v1.compute_box_context_attention.return_value = mock_tensor + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + context_features = tf.random_uniform((2, 20, 10), + minval=0, + maxval=255, + dtype=tf.float32) + valid_context_size = tf.random_uniform((2,), + minval=0, + maxval=10, + dtype=tf.int32) + features = { + fields.InputDataFields.context_features: context_features, + fields.InputDataFields.valid_context_size: valid_context_size + } + + side_inputs = model.get_side_inputs(features) + + _ = model.predict(preprocessed_inputs, true_image_shapes, **side_inputs) + mock_context_rcnn_lib_v1.compute_box_context_attention.assert_called_once() + + @parameterized.named_parameters( + {'testcase_name': 'static_shapes', 'static_shapes': True}, + {'testcase_name': 'nostatic_shapes', 'static_shapes': False}, + ) + def test_prediction_end_to_end(self, static_shapes): + """Runs prediction end to end and test the shape of the results.""" + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + use_matmul_crop_and_resize=static_shapes, + clip_anchors_to_image=static_shapes, + use_matmul_gather_in_matcher=static_shapes, + use_static_shapes=static_shapes, + num_classes=42) + + def graph_fn(): + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + context_features = tf.random_uniform((2, 20, 10), + minval=0, + maxval=255, + dtype=tf.float32) + valid_context_size = tf.random_uniform((2,), + minval=0, + maxval=10, + dtype=tf.int32) + features = { + fields.InputDataFields.context_features: context_features, + fields.InputDataFields.valid_context_size: valid_context_size + } + + side_inputs = model.get_side_inputs(features) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes, + **side_inputs) + return (prediction_dict['rpn_box_predictor_features'], + prediction_dict['rpn_box_encodings'], + prediction_dict['refined_box_encodings'], + prediction_dict['proposal_boxes_normalized'], + prediction_dict['proposal_boxes']) + execute_fn = self.execute if static_shapes else self.execute_cpu + (rpn_box_predictor_features, rpn_box_encodings, refined_box_encodings, + proposal_boxes_normalized, proposal_boxes) = execute_fn(graph_fn, [], + graph=g) + self.assertAllEqual(len(rpn_box_predictor_features), 1) + self.assertAllEqual(rpn_box_predictor_features[0].shape, [2, 20, 20, 512]) + self.assertAllEqual(rpn_box_encodings.shape, [2, 3600, 4]) + self.assertAllEqual(refined_box_encodings.shape, [16, 42, 4]) + self.assertAllEqual(proposal_boxes_normalized.shape, [2, 8, 4]) + self.assertAllEqual(proposal_boxes.shape, [2, 8, 4]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..bfe81fe57218cfda5a1ce1b972ca487600b01035 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch.py @@ -0,0 +1,2906 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Faster R-CNN meta-architecture definition. + +General tensorflow implementation of Faster R-CNN detection models. + +See Faster R-CNN: Ren, Shaoqing, et al. +"Faster R-CNN: Towards real-time object detection with region proposal +networks." Advances in neural information processing systems. 2015. + +We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage, +all of the user facing methods (e.g., predict, postprocess, loss) can be used as +if the model consisted only of the RPN, returning class agnostic proposals +(these can be thought of as approximate detections with no associated class +information). In case of 2 stages, proposals are computed, then passed +through a second stage "box classifier" to yield (multi-class) detections. +Finally, in case of 3 stages which is only used during eval, proposals are +computed, then passed through a second stage "box classifier" that will compute +refined boxes and classes, and then features are pooled from the refined and +non-maximum suppressed boxes and are passed through the box classifier again. If +number of stages is 3 during training it will be reduced to two automatically. + +Implementations of Faster R-CNN models must define a new +FasterRCNNFeatureExtractor and override three methods: `preprocess`, +`_extract_proposal_features` (the first stage of the model), and +`_extract_box_classifier_features` (the second stage of the model). Optionally, +the `restore_fn` method can be overridden. See tests for an example. + +A few important notes: ++ Batching conventions: We support batched inference and training where +all images within a batch have the same resolution. Batch sizes are determined +dynamically via the shape of the input tensors (rather than being specified +directly as, e.g., a model constructor). + +A complication is that due to non-max suppression, we are not guaranteed to get +the same number of proposals from the first stage RPN (region proposal network) +for each image (though in practice, we should often get the same number of +proposals). For this reason we pad to a max number of proposals per image +within a batch. This `self.max_num_proposals` property is set to the +`first_stage_max_proposals` parameter at inference time and the +`second_stage_batch_size` at training time since we subsample the batch to +be sent through the box classifier during training. + +For the second stage of the pipeline, we arrange the proposals for all images +within the batch along a single batch dimension. For example, the input to +_extract_box_classifier_features is a tensor of shape +`[total_num_proposals, crop_height, crop_width, depth]` where +total_num_proposals is batch_size * self.max_num_proposals. (And note that per +the above comment, a subset of these entries correspond to zero paddings.) + ++ Coordinate representations: +Following the API (see model.DetectionModel definition), our outputs after +postprocessing operations are always normalized boxes however, internally, we +sometimes convert to absolute --- e.g. for loss computation. In particular, +anchors and proposal_boxes are both represented as absolute coordinates. + +Images are resized in the `preprocess` method. + +The Faster R-CNN meta architecture has two post-processing methods +`_postprocess_rpn` which is applied after first stage and +`_postprocess_box_classifier` which is applied after second stage. There are +three different ways post-processing can happen depending on number_of_stages +configured in the meta architecture: + +1. When number_of_stages is 1: + `_postprocess_rpn` is run as part of the `postprocess` method where + true_image_shapes is used to clip proposals, perform non-max suppression and + normalize them. +2. When number of stages is 2: + `_postprocess_rpn` is run as part of the `_predict_second_stage` method where + `resized_image_shapes` is used to clip proposals, perform non-max suppression + and normalize them. In this case `postprocess` method skips `_postprocess_rpn` + and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip + detections, perform non-max suppression and normalize them. +3. When number of stages is 3: + `_postprocess_rpn` is run as part of the `_predict_second_stage` using + `resized_image_shapes` to clip proposals, perform non-max suppression and + normalize them. Subsequently, `_postprocess_box_classifier` is run as part of + `_predict_third_stage` using `true_image_shapes` to clip detections, peform + non-max suppression and normalize them. In this case, the `postprocess` method + skips both `_postprocess_rpn` and `_postprocess_box_classifier`. +""" + +from __future__ import print_function +import abc +import functools +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import box_predictor +from object_detection.core import losses +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import variables_helper + + +_UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__' + + +class FasterRCNNFeatureExtractor(object): + """Faster R-CNN Feature Extractor definition.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + first_stage_features_stride: Output stride of extracted RPN feature map. + batch_norm_trainable: Whether to update batch norm parameters during + training or not. When training with a relative large batch size + (e.g. 8), it could be desirable to enable batch norm update. + reuse_weights: Whether to reuse variables. Default is None. + weight_decay: float weight decay for feature extractor (default: 0.0). + """ + self._is_training = is_training + self._first_stage_features_stride = first_stage_features_stride + self._train_batch_norm = (batch_norm_trainable and is_training) + self._reuse_weights = tf.AUTO_REUSE if reuse_weights else None + self._weight_decay = weight_decay + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Feature-extractor specific preprocessing (minus image resizing).""" + pass + + def extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + This function is responsible for extracting feature maps from preprocessed + images. These features are used by the region proposal network (RPN) to + predict proposals. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping activation tensor names to tensors. + """ + with tf.variable_scope(scope, values=[preprocessed_inputs]): + return self._extract_proposal_features(preprocessed_inputs, scope) + + @abc.abstractmethod + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features, to be overridden.""" + pass + + def extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.variable_scope( + scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE): + return self._extract_box_classifier_features(proposal_feature_maps, scope) + + @abc.abstractmethod + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features, to be overridden.""" + pass + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + for scope_name in [first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope]: + if variable.op.name.startswith(scope_name): + var_name = variable.op.name.replace(scope_name + '/', '') + variables_to_restore[var_name] = variable + return variables_to_restore + + +class FasterRCNNKerasFeatureExtractor(object): + """Keras-based Faster R-CNN Feature Extractor definition.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + first_stage_features_stride: Output stride of extracted RPN feature map. + batch_norm_trainable: Whether to update batch norm parameters during + training or not. When training with a relative large batch size + (e.g. 8), it could be desirable to enable batch norm update. + weight_decay: float weight decay for feature extractor (default: 0.0). + """ + self._is_training = is_training + self._first_stage_features_stride = first_stage_features_stride + self._train_batch_norm = (batch_norm_trainable and is_training) + self._weight_decay = weight_decay + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Feature-extractor specific preprocessing (minus image resizing).""" + pass + + @abc.abstractmethod + def get_proposal_feature_extractor_model(self, name): + """Get model that extracts first stage RPN features, to be overridden.""" + pass + + @abc.abstractmethod + def get_box_classifier_feature_extractor_model(self, name): + """Get model that extracts second stage box classifier features.""" + pass + + +class FasterRCNNMetaArch(model.DetectionModel): + """Faster R-CNN Meta-architecture definition.""" + + def __init__(self, + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + initial_crop_size, + maxpool_kernel_size, + maxpool_stride, + second_stage_target_assigner, + second_stage_mask_rcnn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + second_stage_mask_prediction_loss_weight=1.0, + hard_example_miner=None, + parallel_iterations=16, + add_summaries=True, + clip_anchors_to_image=False, + use_static_shapes=False, + resize_masks=True, + freeze_batchnorm=False, + return_raw_detections_during_predict=False, + output_final_box_features=False): + """FasterRCNNMetaArch Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + num_classes: Number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + image_resizer_fn: A callable for image resizing. This callable + takes a rank-3 image tensor of shape [height, width, channels] + (corresponding to a single image), an optional rank-3 instance mask + tensor of shape [num_masks, height, width] and returns a resized rank-3 + image tensor, a resized mask tensor if one was provided in the input. In + addition this callable must also return a 1-D tensor of the form + [height, width, channels] containing the size of the true image, as the + image resizer can perform zero padding. See protos/image_resizer.proto. + feature_extractor: A FasterRCNNFeatureExtractor object. + number_of_stages: An integer values taking values in {1, 2, 3}. If + 1, the function will construct only the Region Proposal Network (RPN) + part of the model. If 2, the function will perform box refinement and + other auxiliary predictions all in the second stage. If 3, it will + extract features from refined boxes and perform the auxiliary + predictions on the non-maximum suppressed refined boxes. + If is_training is true and the value of number_of_stages is 3, it is + reduced to 2 since all the model heads are trained in parallel in second + stage during training. + first_stage_anchor_generator: An anchor_generator.AnchorGenerator object + (note that currently we only support + grid_anchor_generator.GridAnchorGenerator objects) + first_stage_target_assigner: Target assigner to use for first stage of + Faster R-CNN (RPN). + first_stage_atrous_rate: A single integer indicating the atrous rate for + the single convolution op which is applied to the `rpn_features_to_crop` + tensor to obtain a tensor to be used for box prediction. Some feature + extractors optionally allow for producing feature maps computed at + denser resolutions. The atrous rate is used to compensate for the + denser feature maps by using an effectively larger receptive field. + (This should typically be set to 1). + first_stage_box_predictor_arg_scope_fn: Either a + Keras layer hyperparams object or a function to construct tf-slim + arg_scope for conv2d, separable_conv2d and fully_connected ops. Used + for the RPN box predictor. If it is a keras hyperparams object the + RPN box predictor will be a Keras model. If it is a function to + construct an arg scope it will be a tf-slim box predictor. + first_stage_box_predictor_kernel_size: Kernel size to use for the + convolution op just prior to RPN box predictions. + first_stage_box_predictor_depth: Output depth for the convolution op + just prior to RPN box predictions. + first_stage_minibatch_size: The "batch size" to use for computing the + objectness and location loss of the region proposal network. This + "batch size" refers to the number of anchors selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + first_stage_sampler: Sampler to use for first stage loss (RPN loss). + first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window`(with + all other inputs already set) and returns a dictionary containing + tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes`, `num_detections`. This is used to perform non max + suppression on the boxes predicted by the Region Proposal Network + (RPN). + See `post_processing.batch_multiclass_non_max_suppression` for the type + and shape of these tensors. + first_stage_max_proposals: Maximum number of boxes to retain after + performing Non-Max Suppression (NMS) on the boxes predicted by the + Region Proposal Network (RPN). + first_stage_localization_loss_weight: A float + first_stage_objectness_loss_weight: A float + crop_and_resize_fn: A differentiable resampler to use for cropping RPN + proposal features. + initial_crop_size: A single integer indicating the output size + (width and height are set to be the same) of the initial bilinear + interpolation based cropping during ROI pooling. + maxpool_kernel_size: A single integer indicating the kernel size of the + max pool op on the cropped feature map during ROI pooling. + maxpool_stride: A single integer indicating the stride of the max pool + op on the cropped feature map during ROI pooling. + second_stage_target_assigner: Target assigner to use for second stage of + Faster R-CNN. If the model is configured with multiple prediction heads, + this target assigner is used to generate targets for all heads (with the + correct `unmatched_class_label`). + second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for + the second stage. + second_stage_batch_size: The batch size used for computing the + classification and refined location loss of the box classifier. This + "batch size" refers to the number of proposals selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + second_stage_sampler: Sampler to use for second stage loss (box + classifier loss). + second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores`, optional `clip_window` and + optional (kwarg) `mask` inputs (with all other inputs already set) + and returns a dictionary containing tensors with keys: + `detection_boxes`, `detection_scores`, `detection_classes`, + `num_detections`, and (optionally) `detection_masks`. See + `post_processing.batch_multiclass_non_max_suppression` for the type and + shape of these tensors. + second_stage_score_conversion_fn: Callable elementwise nonlinearity + (that takes tensors as inputs and returns tensors). This is usually + used to convert logits to probabilities. + second_stage_localization_loss_weight: A float indicating the scale factor + for second stage localization loss. + second_stage_classification_loss_weight: A float indicating the scale + factor for second stage classification loss. + second_stage_classification_loss: Classification loss used by the second + stage classifier. Either losses.WeightedSigmoidClassificationLoss or + losses.WeightedSoftmaxClassificationLoss. + second_stage_mask_prediction_loss_weight: A float indicating the scale + factor for second stage mask prediction loss. This is applicable only if + second stage box predictor is configured to predict masks. + hard_example_miner: A losses.HardExampleMiner object (can be None). + parallel_iterations: (Optional) The number of iterations allowed to run + in parallel for calls to tf.map_fn. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + clip_anchors_to_image: Normally, anchors generated for a given image size + are pruned during training if they lie outside the image window. This + option clips the anchors to be within the image instead of pruning. + use_static_shapes: If True, uses implementation of ops with static shape + guarantees. + resize_masks: Indicates whether the masks presend in the groundtruth + should be resized in the model with `image_resizer_fn` + freeze_batchnorm: Whether to freeze batch norm parameters in the first + stage box predictor during training or not. When training with a small + batch size (e.g. 1), it is desirable to freeze batch norm update and + use pretrained batch norm params. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + output_final_box_features: Whether to output final box features. If true, + it crops the feauture map based on the final box prediction and returns + in the dict as detection_features. + + Raises: + ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at + training time. + ValueError: If first_stage_anchor_generator is not of type + grid_anchor_generator.GridAnchorGenerator. + """ + # TODO(rathodv): add_summaries is currently unused. Respect that directive + # in the future. + super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes) + + self._is_training = is_training + self._image_resizer_fn = image_resizer_fn + self._resize_masks = resize_masks + self._feature_extractor = feature_extractor + if isinstance(feature_extractor, FasterRCNNKerasFeatureExtractor): + # We delay building the feature extractor until it is used, + # to avoid creating the variables when a model is built just for data + # preprocessing. (This prevents a subtle bug where variable names are + # mismatched across workers, causing only one worker to be able to train) + self._feature_extractor_for_proposal_features = ( + _UNINITIALIZED_FEATURE_EXTRACTOR) + self._feature_extractor_for_box_classifier_features = ( + _UNINITIALIZED_FEATURE_EXTRACTOR) + else: + self._feature_extractor_for_proposal_features = None + self._feature_extractor_for_box_classifier_features = None + + self._number_of_stages = number_of_stages + + self._proposal_target_assigner = first_stage_target_assigner + self._detector_target_assigner = second_stage_target_assigner + # Both proposal and detector target assigners use the same box coder + self._box_coder = self._proposal_target_assigner.box_coder + + # (First stage) Region proposal network parameters + self._first_stage_anchor_generator = first_stage_anchor_generator + self._first_stage_atrous_rate = first_stage_atrous_rate + self._first_stage_box_predictor_depth = first_stage_box_predictor_depth + self._first_stage_box_predictor_kernel_size = ( + first_stage_box_predictor_kernel_size) + self._first_stage_minibatch_size = first_stage_minibatch_size + self._first_stage_sampler = first_stage_sampler + if isinstance(first_stage_box_predictor_arg_scope_fn, + hyperparams_builder.KerasLayerHyperparams): + num_anchors_per_location = ( + self._first_stage_anchor_generator.num_anchors_per_location()) + + conv_hyperparams = ( + first_stage_box_predictor_arg_scope_fn) + self._first_stage_box_predictor_first_conv = ( + tf.keras.Sequential([ + tf.keras.layers.Conv2D( + self._first_stage_box_predictor_depth, + kernel_size=[self._first_stage_box_predictor_kernel_size, + self._first_stage_box_predictor_kernel_size], + dilation_rate=self._first_stage_atrous_rate, + padding='SAME', + name='RPNConv', + **conv_hyperparams.params()), + conv_hyperparams.build_batch_norm( + (self._is_training and not freeze_batchnorm), + name='RPNBatchNorm'), + tf.keras.layers.Lambda( + tf.nn.relu6, + name='RPNActivation') + ], name='FirstStageRPNFeatures')) + self._first_stage_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=self._is_training, + num_classes=1, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=False, + num_predictions_per_location_list=num_anchors_per_location, + use_dropout=False, + dropout_keep_prob=1.0, + box_code_size=self._box_coder.code_size, + kernel_size=1, + num_layers_before_predictor=0, + min_depth=0, + max_depth=0, + name=self.first_stage_box_predictor_scope)) + else: + self._first_stage_box_predictor_arg_scope_fn = ( + first_stage_box_predictor_arg_scope_fn) + def rpn_box_predictor_feature_extractor(single_rpn_features_to_crop): + with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()): + return slim.conv2d( + single_rpn_features_to_crop, + self._first_stage_box_predictor_depth, + kernel_size=[ + self._first_stage_box_predictor_kernel_size, + self._first_stage_box_predictor_kernel_size + ], + rate=self._first_stage_atrous_rate, + activation_fn=tf.nn.relu6, + scope='Conv', + reuse=tf.AUTO_REUSE) + self._first_stage_box_predictor_first_conv = ( + rpn_box_predictor_feature_extractor) + self._first_stage_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=self._is_training, + num_classes=1, + conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn, + use_dropout=False, + dropout_keep_prob=1.0, + box_code_size=self._box_coder.code_size, + kernel_size=1, + num_layers_before_predictor=0, + min_depth=0, + max_depth=0)) + + self._first_stage_nms_fn = first_stage_non_max_suppression_fn + self._first_stage_max_proposals = first_stage_max_proposals + self._use_static_shapes = use_static_shapes + + self._first_stage_localization_loss = ( + losses.WeightedSmoothL1LocalizationLoss()) + self._first_stage_objectness_loss = ( + losses.WeightedSoftmaxClassificationLoss()) + self._first_stage_loc_loss_weight = first_stage_localization_loss_weight + self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight + + # Per-region cropping parameters + self._crop_and_resize_fn = crop_and_resize_fn + self._initial_crop_size = initial_crop_size + self._maxpool_kernel_size = maxpool_kernel_size + self._maxpool_stride = maxpool_stride + # If max pooling is to be used, build the layer + if maxpool_kernel_size: + self._maxpool_layer = tf.keras.layers.MaxPooling2D( + [self._maxpool_kernel_size, self._maxpool_kernel_size], + strides=self._maxpool_stride, + name='MaxPool2D') + + self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor + + self._second_stage_batch_size = second_stage_batch_size + self._second_stage_sampler = second_stage_sampler + + self._second_stage_nms_fn = second_stage_non_max_suppression_fn + self._second_stage_score_conversion_fn = second_stage_score_conversion_fn + + self._second_stage_localization_loss = ( + losses.WeightedSmoothL1LocalizationLoss()) + self._second_stage_classification_loss = second_stage_classification_loss + self._second_stage_mask_loss = ( + losses.WeightedSigmoidClassificationLoss()) + self._second_stage_loc_loss_weight = second_stage_localization_loss_weight + self._second_stage_cls_loss_weight = second_stage_classification_loss_weight + self._second_stage_mask_loss_weight = ( + second_stage_mask_prediction_loss_weight) + self._hard_example_miner = hard_example_miner + self._parallel_iterations = parallel_iterations + + self.clip_anchors_to_image = clip_anchors_to_image + + if self._number_of_stages <= 0 or self._number_of_stages > 3: + raise ValueError('Number of stages should be a value in {1, 2, 3}.') + self._batched_prediction_tensor_names = [] + self._return_raw_detections_during_predict = ( + return_raw_detections_during_predict) + self._output_final_box_features = output_final_box_features + + @property + def first_stage_feature_extractor_scope(self): + return 'FirstStageFeatureExtractor' + + @property + def second_stage_feature_extractor_scope(self): + return 'SecondStageFeatureExtractor' + + @property + def first_stage_box_predictor_scope(self): + return 'FirstStageBoxPredictor' + + @property + def second_stage_box_predictor_scope(self): + return 'SecondStageBoxPredictor' + + @property + def max_num_proposals(self): + """Max number of proposals (to pad to) for each image in the input batch. + + At training time, this is set to be the `second_stage_batch_size` if hard + example miner is not configured, else it is set to + `first_stage_max_proposals`. At inference time, this is always set to + `first_stage_max_proposals`. + + Returns: + A positive integer. + """ + if self._is_training and not self._hard_example_miner: + return self._second_stage_batch_size + return self._first_stage_max_proposals + + @property + def anchors(self): + if not self._anchors: + raise RuntimeError('anchors have not been constructed yet!') + if not isinstance(self._anchors, box_list.BoxList): + raise RuntimeError('anchors should be a BoxList object, but is not.') + return self._anchors + + @property + def batched_prediction_tensor_names(self): + if not self._batched_prediction_tensor_names: + raise RuntimeError('Must call predict() method to get batched prediction ' + 'tensor names.') + return self._batched_prediction_tensor_names + + @property + def feature_extractor(self): + return self._feature_extractor + + def preprocess(self, inputs): + """Feature-extractor specific preprocessing. + + See base class. + + For Faster R-CNN, we perform image resizing in the base class --- each + class subclassing FasterRCNNMetaArch is responsible for any additional + preprocessing (e.g., scaling pixel values to be in [-1, 1]). + + Args: + inputs: a [batch, height_in, width_in, channels] float tensor representing + a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, height_out, width_out, channels] float + tensor representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + Raises: + ValueError: if inputs tensor does not have type tf.float32 + """ + + with tf.name_scope('Preprocessor'): + (resized_inputs, + true_image_shapes) = shape_utils.resize_images_and_return_shapes( + inputs, self._image_resizer_fn) + + return (self._feature_extractor.preprocess(resized_inputs), + true_image_shapes) + + def _compute_clip_window(self, image_shapes): + """Computes clip window for non max suppression based on image shapes. + + This function assumes that the clip window's left top corner is at (0, 0). + + Args: + image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing + shapes of images in the batch. Each row represents [height, width, + channels] of an image. + + Returns: + A 2-D float32 tensor of shape [batch_size, 4] containing the clip window + for each image in the form [ymin, xmin, ymax, xmax]. + """ + clip_heights = image_shapes[:, 0] + clip_widths = image_shapes[:, 1] + clip_window = tf.cast( + tf.stack([ + tf.zeros_like(clip_heights), + tf.zeros_like(clip_heights), clip_heights, clip_widths + ], + axis=1), + dtype=tf.float32) + return clip_window + + def _proposal_postprocess(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, anchors, + image_shape, true_image_shapes): + """Wraps over FasterRCNNMetaArch._postprocess_rpn().""" + image_shape_2d = self._image_batch_shape_2d(image_shape) + proposal_boxes_normalized, _, _, num_proposals, _, _ = \ + self._postprocess_rpn( + rpn_box_encodings, rpn_objectness_predictions_with_background, + anchors, image_shape_2d, true_image_shapes) + return proposal_boxes_normalized, num_proposals + + def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): + """Predicts unpostprocessed tensors from input tensor. + + This function takes an input batch of images and runs it through the + forward pass of the network to yield "raw" un-postprocessed predictions. + If `number_of_stages` is 1, this function only returns first stage + RPN predictions (un-postprocessed). Otherwise it returns both + first stage RPN predictions as well as second stage box classifier + predictions. + + Other remarks: + + Anchor pruning vs. clipping: following the recommendation of the Faster + R-CNN paper, we prune anchors that venture outside the image window at + training time and clip anchors to the image window at inference time. + + Proposal padding: as described at the top of the file, proposals are + padded to self._max_num_proposals and flattened so that proposals from all + images within the input batch are arranged along the same batch dimension. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) rpn_box_predictor_features: A list of 4-D float32 tensor with shape + [batch_size, height_i, width_j, depth] to be used for predicting + proposal boxes and corresponding objectness scores. + 2) rpn_features_to_crop: A list of 4-D float32 tensor with shape + [batch_size, height, width, depth] representing image features to crop + using the proposal boxes predicted by the RPN. + 3) image_shape: a 1-D tensor of shape [4] representing the input + image shape. + 4) rpn_box_encodings: 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN (in absolute coordinates). Note that + `num_anchors` can differ depending on whether the model is created in + training or inference mode. + 7) feature_maps: A single element list containing a 4-D float32 tensor + with shape batch_size, height, width, depth] representing the RPN + features to crop. + + (and if number_of_stages > 1): + 8) refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using + a shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 9) class_predictions_with_background: a 3-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 10) num_proposals: An int32 tensor of shape [batch_size] representing + the number of proposals generated by the RPN. `num_proposals` allows + us to keep track of which entries are to be treated as zero paddings + and which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 11) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 12) mask_predictions: (optional) a 4-D tensor with shape + [total_num_padded_proposals, num_classes, mask_height, mask_width] + containing instance mask predictions. + 13) raw_detection_boxes: (optional) a + [batch_size, self.max_num_proposals, num_classes, 4] float32 tensor + with detections prior to NMS in normalized coordinates. + 14) raw_detection_feature_map_indices: (optional) a + [batch_size, self.max_num_proposals, num_classes] int32 tensor with + indices indicating which feature map each raw detection box was + produced from. The indices correspond to the elements in the + 'feature_maps' field. + + Raises: + ValueError: If `predict` is called before `preprocess`. + """ + prediction_dict = self._predict_first_stage(preprocessed_inputs) + + if self._number_of_stages >= 2: + prediction_dict.update( + self._predict_second_stage( + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['rpn_features_to_crop'], + prediction_dict['anchors'], prediction_dict['image_shape'], + true_image_shapes, **side_inputs)) + + if self._number_of_stages == 3: + prediction_dict = self._predict_third_stage(prediction_dict, + true_image_shapes) + + self._batched_prediction_tensor_names = [ + x for x in prediction_dict if x not in ('image_shape', 'anchors') + ] + return prediction_dict + + def _predict_first_stage(self, preprocessed_inputs): + """First stage of prediction. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) rpn_box_predictor_features: A list of 4-D float32/bfloat16 tensor + with shape [batch_size, height_i, width_j, depth] to be used for + predicting proposal boxes and corresponding objectness scores. + 2) rpn_features_to_crop: A list of 4-D float32/bfloat16 tensor with + shape [batch_size, height, width, depth] representing image features + to crop using the proposal boxes predicted by the RPN. + 3) image_shape: a 1-D tensor of shape [4] representing the input + image shape. + 4) rpn_box_encodings: 3-D float32 tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + 5) rpn_objectness_predictions_with_background: 3-D float32 tensor of + shape [batch_size, num_anchors, 2] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions (at class index 0). + 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN (in absolute coordinates). Note that + `num_anchors` can differ depending on whether the model is created in + training or inference mode. + 7) feature_maps: A single element list containing a 4-D float32 tensor + with shape batch_size, height, width, depth] representing the RPN + features to crop. + """ + (rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist, + image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs) + (rpn_box_encodings, rpn_objectness_predictions_with_background + ) = self._predict_rpn_proposals(rpn_box_predictor_features) + + # The Faster R-CNN paper recommends pruning anchors that venture outside + # the image window at training time and clipping at inference time. + clip_window = tf.cast(tf.stack([0, 0, image_shape[1], image_shape[2]]), + dtype=tf.float32) + if self._is_training: + if self.clip_anchors_to_image: + anchors_boxlist = box_list_ops.clip_to_window( + anchors_boxlist, clip_window, filter_nonoverlapping=False) + else: + (rpn_box_encodings, rpn_objectness_predictions_with_background, + anchors_boxlist) = self._remove_invalid_anchors_and_predictions( + rpn_box_encodings, rpn_objectness_predictions_with_background, + anchors_boxlist, clip_window) + else: + anchors_boxlist = box_list_ops.clip_to_window( + anchors_boxlist, clip_window, + filter_nonoverlapping=not self._use_static_shapes) + + self._anchors = anchors_boxlist + prediction_dict = { + 'rpn_box_predictor_features': + rpn_box_predictor_features, + 'rpn_features_to_crop': + rpn_features_to_crop, + 'image_shape': + image_shape, + 'rpn_box_encodings': + tf.cast(rpn_box_encodings, dtype=tf.float32), + 'rpn_objectness_predictions_with_background': + tf.cast(rpn_objectness_predictions_with_background, + dtype=tf.float32), + 'anchors': + anchors_boxlist.data['boxes'], + fields.PredictionFields.feature_maps: rpn_features_to_crop + } + return prediction_dict + + def _image_batch_shape_2d(self, image_batch_shape_1d): + """Takes a 1-D image batch shape tensor and converts it to a 2-D tensor. + + Example: + If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D + image batch tensor would be [[300, 300, 3], [300, 300, 3]] + + Args: + image_batch_shape_1d: 1-D tensor of the form [batch_size, height, + width, channels]. + + Returns: + image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is + of the form [height, width, channels]. + """ + return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0), + [image_batch_shape_1d[0], 1]) + + def _predict_second_stage(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, anchors, image_shape, + true_image_shapes, **side_inputs): + """Predicts the output tensors from second stage of Faster R-CNN. + + Args: + rpn_box_encodings: 3-D float tensor of shape + [batch_size, num_valid_anchors, self._box_coder.code_size] containing + predicted boxes. + rpn_objectness_predictions_with_background: 2-D float tensor of shape + [batch_size, num_valid_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + rpn_features_to_crop: A list of 4-D float32 or bfloat16 tensor with shape + [batch_size, height_i, width_i, depth] representing image features to + crop using the proposal boxes predicted by the RPN. + anchors: 2-D float tensor of shape + [num_anchors, self._box_coder.code_size]. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D float32 tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using a + shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 2) class_predictions_with_background: a 3-D float32 tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) num_proposals: An int32 tensor of shape [batch_size] representing the + number of proposals generated by the RPN. `num_proposals` allows us + to keep track of which entries are to be treated as zero paddings and + which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 4) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 5) proposal_boxes_normalized: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes in normalized coordinates. Can be used to override the + boxes proposed by the RPN, thus enabling one to extract features and + get box classification and prediction for externally selected areas + of the image. + 6) box_classifier_features: a 4-D float32/bfloat16 tensor + representing the features for each proposal. + If self._return_raw_detections_during_predict is True, the dictionary + will also contain: + 7) raw_detection_boxes: a 4-D float32 tensor with shape + [batch_size, self.max_num_proposals, num_classes, 4] in normalized + coordinates. + 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape + [batch_size, self.max_num_proposals, num_classes]. + """ + proposal_boxes_normalized, num_proposals = self._proposal_postprocess( + rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, + image_shape, true_image_shapes) + prediction_dict = self._box_prediction(rpn_features_to_crop, + proposal_boxes_normalized, + image_shape, true_image_shapes, + **side_inputs) + prediction_dict['num_proposals'] = num_proposals + return prediction_dict + + def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized, + image_shape, true_image_shapes, **side_inputs): + """Predicts the output tensors from second stage of Faster R-CNN. + + Args: + rpn_features_to_crop: A list 4-D float32 or bfloat16 tensor with shape + [batch_size, height_i, width_i, depth] representing image features to + crop using the proposal boxes predicted by the RPN. + proposal_boxes_normalized: A float tensor with shape [batch_size, + max_num_proposals, 4] representing the (potentially zero padded) + proposal boxes for all images in the batch. These boxes are represented + as normalized coordinates. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + **side_inputs: additional tensors that are required by the network. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D float32 tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using a + shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 2) class_predictions_with_background: a 3-D float32 tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 4) proposal_boxes_normalized: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes in normalized coordinates. Can be used to override the + boxes proposed by the RPN, thus enabling one to extract features and + get box classification and prediction for externally selected areas + of the image. + 5) box_classifier_features: a 4-D float32/bfloat16 tensor + representing the features for each proposal. + If self._return_raw_detections_during_predict is True, the dictionary + will also contain: + 6) raw_detection_boxes: a 4-D float32 tensor with shape + [batch_size, self.max_num_proposals, num_classes, 4] in normalized + coordinates. + 7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape + [batch_size, self.max_num_proposals, num_classes]. + 8) final_anchors: a 3-D float tensor of shape [batch_size, + self.max_num_proposals, 4] containing the reference anchors for raw + detection boxes in normalized coordinates. + """ + flattened_proposal_feature_maps = ( + self._compute_second_stage_input_feature_maps( + rpn_features_to_crop, proposal_boxes_normalized, + image_shape, **side_inputs)) + + box_classifier_features = self._extract_box_classifier_features( + flattened_proposal_feature_maps) + + if self._mask_rcnn_box_predictor.is_keras_model: + box_predictions = self._mask_rcnn_box_predictor( + [box_classifier_features], + prediction_stage=2) + else: + box_predictions = self._mask_rcnn_box_predictor.predict( + [box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + prediction_stage=2) + + refined_box_encodings = tf.squeeze( + box_predictions[box_predictor.BOX_ENCODINGS], + axis=1, name='all_refined_box_encodings') + class_predictions_with_background = tf.squeeze( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1, name='all_class_predictions_with_background') + + absolute_proposal_boxes = ops.normalized_to_image_coordinates( + proposal_boxes_normalized, image_shape, self._parallel_iterations) + + prediction_dict = { + 'refined_box_encodings': tf.cast(refined_box_encodings, + dtype=tf.float32), + 'class_predictions_with_background': + tf.cast(class_predictions_with_background, dtype=tf.float32), + 'proposal_boxes': absolute_proposal_boxes, + 'box_classifier_features': box_classifier_features, + 'proposal_boxes_normalized': proposal_boxes_normalized, + 'final_anchors': proposal_boxes_normalized + } + + if self._return_raw_detections_during_predict: + prediction_dict.update(self._raw_detections_and_feature_map_inds( + refined_box_encodings, absolute_proposal_boxes, true_image_shapes)) + + return prediction_dict + + def _raw_detections_and_feature_map_inds( + self, refined_box_encodings, absolute_proposal_boxes, true_image_shapes): + """Returns raw detections and feat map inds from where they originated. + + Args: + refined_box_encodings: [total_num_proposals, num_classes, + self._box_coder.code_size] float32 tensor. + absolute_proposal_boxes: [batch_size, self.max_num_proposals, 4] float32 + tensor representing decoded proposal bounding boxes in absolute + coordinates. + true_image_shapes: [batch, 3] int32 tensor where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + A dictionary with raw detection boxes, and the feature map indices from + which they originated. + """ + box_encodings_batch = tf.reshape( + refined_box_encodings, + [-1, self.max_num_proposals, refined_box_encodings.shape[1], + self._box_coder.code_size]) + raw_detection_boxes_absolute = self._batch_decode_boxes( + box_encodings_batch, absolute_proposal_boxes) + + raw_detection_boxes_normalized = shape_utils.static_or_dynamic_map_fn( + self._normalize_and_clip_boxes, + elems=[raw_detection_boxes_absolute, true_image_shapes], + dtype=tf.float32) + detection_feature_map_indices = tf.zeros_like( + raw_detection_boxes_normalized[:, :, :, 0], dtype=tf.int32) + return { + fields.PredictionFields.raw_detection_boxes: + raw_detection_boxes_normalized, + fields.PredictionFields.raw_detection_feature_map_indices: + detection_feature_map_indices + } + + def _extract_box_classifier_features(self, flattened_feature_maps): + if self._feature_extractor_for_box_classifier_features == ( + _UNINITIALIZED_FEATURE_EXTRACTOR): + self._feature_extractor_for_box_classifier_features = ( + self._feature_extractor.get_box_classifier_feature_extractor_model( + name=self.second_stage_feature_extractor_scope)) + + if self._feature_extractor_for_box_classifier_features: + box_classifier_features = ( + self._feature_extractor_for_box_classifier_features( + flattened_feature_maps)) + else: + box_classifier_features = ( + self._feature_extractor.extract_box_classifier_features( + flattened_feature_maps, + scope=self.second_stage_feature_extractor_scope)) + return box_classifier_features + + def _predict_third_stage(self, prediction_dict, image_shapes): + """Predicts non-box, non-class outputs using refined detections. + + For training, masks as predicted directly on the box_classifier_features, + which are region-features from the initial anchor boxes. + For inference, this happens after calling the post-processing stage, such + that masks are only calculated for the top scored boxes. + + Args: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals. If using a + shared box across classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + 2) class_predictions_with_background: a 3-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) num_proposals: An int32 tensor of shape [batch_size] representing the + number of proposals generated by the RPN. `num_proposals` allows us + to keep track of which entries are to be treated as zero paddings and + which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 4) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes in absolute coordinates. + 5) box_classifier_features: a 4-D float32 tensor representing the + features for each proposal. + 6) image_shape: a 1-D tensor of shape [4] representing the input + image shape. + image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing + shapes of images in the batch. + + Returns: + prediction_dict: a dictionary that in addition to the input predictions + does hold the following predictions as well: + 1) mask_predictions: a 4-D tensor with shape + [batch_size, max_detection, mask_height, mask_width] containing + instance mask predictions. + """ + if self._is_training: + curr_box_classifier_features = prediction_dict['box_classifier_features'] + detection_classes = prediction_dict['class_predictions_with_background'] + if self._mask_rcnn_box_predictor.is_keras_model: + mask_predictions = self._mask_rcnn_box_predictor( + [curr_box_classifier_features], + prediction_stage=3) + else: + mask_predictions = self._mask_rcnn_box_predictor.predict( + [curr_box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + prediction_stage=3) + prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[ + box_predictor.MASK_PREDICTIONS], axis=1) + else: + detections_dict = self._postprocess_box_classifier( + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['proposal_boxes'], + prediction_dict['num_proposals'], + image_shapes) + prediction_dict.update(detections_dict) + detection_boxes = detections_dict[ + fields.DetectionResultFields.detection_boxes] + detection_classes = detections_dict[ + fields.DetectionResultFields.detection_classes] + rpn_features_to_crop = prediction_dict['rpn_features_to_crop'] + image_shape = prediction_dict['image_shape'] + batch_size = tf.shape(detection_boxes)[0] + max_detection = tf.shape(detection_boxes)[1] + flattened_detected_feature_maps = ( + self._compute_second_stage_input_feature_maps( + rpn_features_to_crop, detection_boxes, image_shape)) + curr_box_classifier_features = self._extract_box_classifier_features( + flattened_detected_feature_maps) + + if self._mask_rcnn_box_predictor.is_keras_model: + mask_predictions = self._mask_rcnn_box_predictor( + [curr_box_classifier_features], + prediction_stage=3) + else: + mask_predictions = self._mask_rcnn_box_predictor.predict( + [curr_box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + prediction_stage=3) + + detection_masks = tf.squeeze(mask_predictions[ + box_predictor.MASK_PREDICTIONS], axis=1) + + _, num_classes, mask_height, mask_width = ( + detection_masks.get_shape().as_list()) + _, max_detection = detection_classes.get_shape().as_list() + prediction_dict['mask_predictions'] = tf.reshape( + detection_masks, [-1, num_classes, mask_height, mask_width]) + if num_classes > 1: + detection_masks = self._gather_instance_masks( + detection_masks, detection_classes) + + detection_masks = tf.cast(detection_masks, tf.float32) + prediction_dict[fields.DetectionResultFields.detection_masks] = ( + tf.reshape(tf.sigmoid(detection_masks), + [batch_size, max_detection, mask_height, mask_width])) + + return prediction_dict + + def _gather_instance_masks(self, instance_masks, classes): + """Gathers the masks that correspond to classes. + + Args: + instance_masks: A 4-D float32 tensor with shape + [K, num_classes, mask_height, mask_width]. + classes: A 2-D int32 tensor with shape [batch_size, max_detection]. + + Returns: + masks: a 3-D float32 tensor with shape [K, mask_height, mask_width]. + """ + _, num_classes, height, width = instance_masks.get_shape().as_list() + k = tf.shape(instance_masks)[0] + instance_masks = tf.reshape(instance_masks, [-1, height, width]) + classes = tf.cast(tf.reshape(classes, [-1]), dtype=tf.int32) + gather_idx = tf.range(k) * num_classes + classes + return tf.gather(instance_masks, gather_idx) + + def _extract_rpn_feature_maps(self, preprocessed_inputs): + """Extracts RPN features. + + This function extracts two feature maps: a feature map to be directly + fed to a box predictor (to predict location and objectness scores for + proposals) and a feature map from which to crop regions which will then + be sent to the second stage box classifier. + + Args: + preprocessed_inputs: a [batch, height, width, channels] image tensor. + + Returns: + rpn_box_predictor_features: A list of 4-D float32 tensor with shape + [batch, height_i, width_j, depth] to be used for predicting proposal + boxes and corresponding objectness scores. + rpn_features_to_crop: A list of 4-D float32 tensor with shape + [batch, height, width, depth] representing image features to crop using + the proposals boxes. + anchors: A list of BoxList representing anchors (for the RPN) in + absolute coordinates. + image_shape: A 1-D tensor representing the input image shape. + """ + image_shape = tf.shape(preprocessed_inputs) + + rpn_features_to_crop, self.endpoints = self._extract_proposal_features( + preprocessed_inputs) + + # Decide if rpn_features_to_crop is a list. If not make it a list + if not isinstance(rpn_features_to_crop, list): + rpn_features_to_crop = [rpn_features_to_crop] + + feature_map_shapes = [] + rpn_box_predictor_features = [] + for single_rpn_features_to_crop in rpn_features_to_crop: + single_shape = tf.shape(single_rpn_features_to_crop) + feature_map_shapes.append((single_shape[1], single_shape[2])) + single_rpn_box_predictor_features = ( + self._first_stage_box_predictor_first_conv( + single_rpn_features_to_crop)) + rpn_box_predictor_features.append(single_rpn_box_predictor_features) + anchors = box_list_ops.concatenate( + self._first_stage_anchor_generator.generate(feature_map_shapes)) + return (rpn_box_predictor_features, rpn_features_to_crop, + anchors, image_shape) + + def _extract_proposal_features(self, preprocessed_inputs): + if self._feature_extractor_for_proposal_features == ( + _UNINITIALIZED_FEATURE_EXTRACTOR): + self._feature_extractor_for_proposal_features = ( + self._feature_extractor.get_proposal_feature_extractor_model( + name=self.first_stage_feature_extractor_scope)) + if self._feature_extractor_for_proposal_features: + proposal_features = ( + self._feature_extractor_for_proposal_features(preprocessed_inputs), + {}) + else: + proposal_features = ( + self._feature_extractor.extract_proposal_features( + preprocessed_inputs, + scope=self.first_stage_feature_extractor_scope)) + return proposal_features + + def _predict_rpn_proposals(self, rpn_box_predictor_features): + """Adds box predictors to RPN feature map to predict proposals. + + Note resulting tensors will not have been postprocessed. + + Args: + rpn_box_predictor_features: A list of 4-D float32 tensor with shape + [batch, height_i, width_j, depth] to be used for predicting proposal + boxes and corresponding objectness scores. + + Returns: + box_encodings: 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + + Raises: + RuntimeError: if the anchor generator generates anchors corresponding to + multiple feature maps. We currently assume that a single feature map + is generated for the RPN. + """ + num_anchors_per_location = ( + self._first_stage_anchor_generator.num_anchors_per_location()) + + if self._first_stage_box_predictor.is_keras_model: + box_predictions = self._first_stage_box_predictor( + rpn_box_predictor_features) + else: + box_predictions = self._first_stage_box_predictor.predict( + rpn_box_predictor_features, + num_anchors_per_location, + scope=self.first_stage_box_predictor_scope) + + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (tf.squeeze(box_encodings, axis=2), + objectness_predictions_with_background) + + def _remove_invalid_anchors_and_predictions( + self, + box_encodings, + objectness_predictions_with_background, + anchors_boxlist, + clip_window): + """Removes anchors that (partially) fall outside an image. + + Also removes associated box encodings and objectness predictions. + + Args: + box_encodings: 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted boxes. + objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN) + in absolute coordinates. + clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax] + extent of the window to clip/prune to. + + Returns: + box_encodings: 4-D float tensor of shape + [batch_size, num_valid_anchors, self._box_coder.code_size] containing + predicted boxes, where num_valid_anchors <= num_anchors + objectness_predictions_with_background: 2-D float tensor of shape + [batch_size, num_valid_anchors, 2] containing class + predictions (logits) for each of the anchors, where + num_valid_anchors <= num_anchors. Note that this + tensor *includes* background class predictions (at class index 0). + anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in + absolute coordinates. + """ + pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window( + anchors_boxlist, clip_window) + def _batch_gather_kept_indices(predictions_tensor): + return shape_utils.static_or_dynamic_map_fn( + functools.partial(tf.gather, indices=keep_indices), + elems=predictions_tensor, + dtype=tf.float32, + parallel_iterations=self._parallel_iterations, + back_prop=True) + return (_batch_gather_kept_indices(box_encodings), + _batch_gather_kept_indices(objectness_predictions_with_background), + pruned_anchors_boxlist) + + def _flatten_first_two_dimensions(self, inputs): + """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor. + + Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape + [A * B, ..., depth]. + + Args: + inputs: A float tensor with shape [A, B, ..., depth]. Note that the first + two and last dimensions must be statically defined. + Returns: + A float tensor with shape [A * B, ..., depth] (where the first and last + dimension are statically defined. + """ + combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs) + flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] + + combined_shape[2:]) + return tf.reshape(inputs, flattened_shape) + + def postprocess(self, prediction_dict, true_image_shapes): + """Convert prediction tensors to final detections. + + This function converts raw predictions tensors to final detection results. + See base class for output format conventions. Note also that by default, + scores are to be interpreted as logits, but if a score_converter is used, + then scores are remapped (and may thus have a different interpretation). + + If number_of_stages=1, the returned results represent proposals from the + first stage RPN and are padded to have self.max_num_proposals for each + image; otherwise, the results can be interpreted as multiclass detections + from the full two-stage model and are padded to self._max_detections. + + Args: + prediction_dict: a dictionary holding prediction tensors (see the + documentation for the predict method. If number_of_stages=1, we + expect prediction_dict to contain `rpn_box_encodings`, + `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, + and `anchors` fields. Otherwise we expect prediction_dict to + additionally contain `refined_box_encodings`, + `class_predictions_with_background`, `num_proposals`, + `proposal_boxes` and, optionally, `mask_predictions` fields. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + detections: a dictionary containing the following fields + detection_boxes: [batch, max_detection, 4] + detection_scores: [batch, max_detections] + detection_multiclass_scores: [batch, max_detections, 2] + detection_anchor_indices: [batch, max_detections] + detection_classes: [batch, max_detections] + (this entry is only created if rpn_mode=False) + num_detections: [batch] + raw_detection_boxes: [batch, total_detections, 4] + raw_detection_scores: [batch, total_detections, num_classes + 1] + + Raises: + ValueError: If `predict` is called before `preprocess`. + ValueError: If `_output_final_box_features` is true but + rpn_features_to_crop is not in the prediction_dict. + """ + + with tf.name_scope('FirstStagePostprocessor'): + if self._number_of_stages == 1: + + image_shapes = self._image_batch_shape_2d( + prediction_dict['image_shape']) + (proposal_boxes, proposal_scores, proposal_multiclass_scores, + num_proposals, raw_proposal_boxes, + raw_proposal_scores) = self._postprocess_rpn( + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors'], image_shapes, true_image_shapes) + return { + fields.DetectionResultFields.detection_boxes: + proposal_boxes, + fields.DetectionResultFields.detection_scores: + proposal_scores, + fields.DetectionResultFields.detection_multiclass_scores: + proposal_multiclass_scores, + fields.DetectionResultFields.num_detections: + tf.cast(num_proposals, dtype=tf.float32), + fields.DetectionResultFields.raw_detection_boxes: + raw_proposal_boxes, + fields.DetectionResultFields.raw_detection_scores: + raw_proposal_scores + } + + # TODO(jrru): Remove mask_predictions from _post_process_box_classifier. + if (self._number_of_stages == 2 or + (self._number_of_stages == 3 and self._is_training)): + with tf.name_scope('SecondStagePostprocessor'): + mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS) + detections_dict = self._postprocess_box_classifier( + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['proposal_boxes'], + prediction_dict['num_proposals'], + true_image_shapes, + mask_predictions=mask_predictions) + + if self._output_final_box_features: + if 'rpn_features_to_crop' not in prediction_dict: + raise ValueError( + 'Please make sure rpn_features_to_crop is in the prediction_dict.' + ) + detections_dict[ + 'detection_features'] = self._add_detection_features_output_node( + detections_dict[fields.DetectionResultFields.detection_boxes], + prediction_dict['rpn_features_to_crop'], + prediction_dict['image_shape']) + + return detections_dict + + if self._number_of_stages == 3: + # Post processing is already performed in 3rd stage. We need to transfer + # postprocessed tensors from `prediction_dict` to `detections_dict`. + # Remove any items from the prediction dictionary if they are not pure + # Tensors. + non_tensor_predictions = [ + k for k, v in prediction_dict.items() if not isinstance(v, tf.Tensor)] + for k in non_tensor_predictions: + tf.logging.info('Removing {0} from prediction_dict'.format(k)) + prediction_dict.pop(k) + return prediction_dict + + def _add_detection_features_output_node(self, detection_boxes, + rpn_features_to_crop, image_shape): + """Add detection features to outputs. + + This function extracts box features for each box in rpn_features_to_crop. + It returns the extracted box features, reshaped to + [batch size, max_detections, height, width, depth], and average pools + the extracted features across the spatial dimensions and adds a graph node + to the pooled features named 'pooled_detection_features' + + Args: + detection_boxes: a 3-D float32 tensor of shape + [batch_size, max_detections, 4] which represents the bounding boxes. + rpn_features_to_crop: A list of 4-D float32 tensor with shape + [batch, height, width, depth] representing image features to crop using + the proposals boxes. + image_shape: a 1-D tensor of shape [4] representing the image shape. + + Returns: + detection_features: a 4-D float32 tensor of shape + [batch size, max_detections, height, width, depth] representing + cropped image features + """ + with tf.name_scope('SecondStageDetectionFeaturesExtract'): + flattened_detected_feature_maps = ( + self._compute_second_stage_input_feature_maps( + rpn_features_to_crop, detection_boxes, image_shape)) + detection_features_unpooled = self._extract_box_classifier_features( + flattened_detected_feature_maps) + + batch_size = tf.shape(detection_boxes)[0] + max_detections = tf.shape(detection_boxes)[1] + detection_features_pool = tf.reduce_mean( + detection_features_unpooled, axis=[1, 2]) + reshaped_detection_features_pool = tf.reshape( + detection_features_pool, + [batch_size, max_detections, tf.shape(detection_features_pool)[-1]]) + reshaped_detection_features_pool = tf.identity( + reshaped_detection_features_pool, 'pooled_detection_features') + + reshaped_detection_features = tf.reshape( + detection_features_unpooled, + [batch_size, max_detections, + tf.shape(detection_features_unpooled)[1], + tf.shape(detection_features_unpooled)[2], + tf.shape(detection_features_unpooled)[3]]) + + return reshaped_detection_features + + def _postprocess_rpn(self, + rpn_box_encodings_batch, + rpn_objectness_predictions_with_background_batch, + anchors, + image_shapes, + true_image_shapes): + """Converts first stage prediction tensors from the RPN to proposals. + + This function decodes the raw RPN predictions, runs non-max suppression + on the result. + + Note that the behavior of this function is slightly modified during + training --- specifically, we stop the gradient from passing through the + proposal boxes and we only return a balanced sampled subset of proposals + with size `second_stage_batch_size`. + + Args: + rpn_box_encodings_batch: A 3-D float32 tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted proposal box encodings. + rpn_objectness_predictions_with_background_batch: A 3-D float tensor of + shape [batch_size, num_anchors, 2] containing objectness predictions + (logits) for each of the anchors with 0 corresponding to background + and 1 corresponding to object. + anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN. Note that `num_anchors` can differ depending + on whether the model is created in training or inference mode. + image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of + images in the batch. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + proposal_boxes: A float tensor with shape + [batch_size, max_num_proposals, 4] representing the (potentially zero + padded) proposal boxes for all images in the batch. These boxes are + represented as normalized coordinates. + proposal_scores: A float tensor with shape + [batch_size, max_num_proposals] representing the (potentially zero + padded) proposal objectness scores for all images in the batch. + proposal_multiclass_scores: A float tensor with shape + [batch_size, max_num_proposals, 2] representing the (potentially zero + padded) proposal multiclass scores for all images in the batch. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + raw_detection_boxes: [batch, total_detections, 4] tensor with decoded + proposal boxes before Non-Max Suppression. + raw_detection_scores: [batch, total_detections, + num_classes_with_background] tensor of multi-class scores for raw + proposal boxes. + """ + rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2) + rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape( + rpn_box_encodings_batch) + tiled_anchor_boxes = tf.tile( + tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1]) + proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch, + tiled_anchor_boxes) + raw_proposal_boxes = tf.squeeze(proposal_boxes, axis=2) + rpn_objectness_softmax = tf.nn.softmax( + rpn_objectness_predictions_with_background_batch) + rpn_objectness_softmax_without_background = rpn_objectness_softmax[:, :, 1] + clip_window = self._compute_clip_window(true_image_shapes) + additional_fields = {'multiclass_scores': rpn_objectness_softmax} + (proposal_boxes, proposal_scores, _, _, nmsed_additional_fields, + num_proposals) = self._first_stage_nms_fn( + tf.expand_dims(raw_proposal_boxes, axis=2), + tf.expand_dims(rpn_objectness_softmax_without_background, axis=2), + additional_fields=additional_fields, + clip_window=clip_window) + if self._is_training: + proposal_boxes = tf.stop_gradient(proposal_boxes) + if not self._hard_example_miner: + (groundtruth_boxlists, groundtruth_classes_with_background_list, _, + groundtruth_weights_list + ) = self._format_groundtruth_data(image_shapes) + (proposal_boxes, proposal_scores, + num_proposals) = self._sample_box_classifier_batch( + proposal_boxes, proposal_scores, num_proposals, + groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_weights_list) + # normalize proposal boxes + def normalize_boxes(args): + proposal_boxes_per_image = args[0] + image_shape = args[1] + normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( + box_list.BoxList(proposal_boxes_per_image), image_shape[0], + image_shape[1], check_range=False).get() + return normalized_boxes_per_image + normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( + normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32) + raw_normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn( + normalize_boxes, + elems=[raw_proposal_boxes, image_shapes], + dtype=tf.float32) + proposal_multiclass_scores = ( + nmsed_additional_fields.get('multiclass_scores') + if nmsed_additional_fields else None) + return (normalized_proposal_boxes, proposal_scores, + proposal_multiclass_scores, num_proposals, + raw_normalized_proposal_boxes, rpn_objectness_softmax) + + def _sample_box_classifier_batch( + self, + proposal_boxes, + proposal_scores, + num_proposals, + groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list): + """Samples a minibatch for second stage. + + Args: + proposal_boxes: A float tensor with shape + [batch_size, num_proposals, 4] representing the (potentially zero + padded) proposal boxes for all images in the batch. These boxes are + represented in absolute coordinates. + proposal_scores: A float tensor with shape + [batch_size, num_proposals] representing the (potentially zero + padded) proposal objectness scores for all images in the batch. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates + of the groundtruth boxes. + groundtruth_classes_with_background_list: A list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes] + indicating the weight associated with the groundtruth boxes. + + Returns: + proposal_boxes: A float tensor with shape + [batch_size, second_stage_batch_size, 4] representing the (potentially + zero padded) proposal boxes for all images in the batch. These boxes + are represented in absolute coordinates. + proposal_scores: A float tensor with shape + [batch_size, second_stage_batch_size] representing the (potentially zero + padded) proposal objectness scores for all images in the batch. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + """ + single_image_proposal_box_sample = [] + single_image_proposal_score_sample = [] + single_image_num_proposals_sample = [] + for (single_image_proposal_boxes, + single_image_proposal_scores, + single_image_num_proposals, + single_image_groundtruth_boxlist, + single_image_groundtruth_classes_with_background, + single_image_groundtruth_weights) in zip( + tf.unstack(proposal_boxes), + tf.unstack(proposal_scores), + tf.unstack(num_proposals), + groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list): + single_image_boxlist = box_list.BoxList(single_image_proposal_boxes) + single_image_boxlist.add_field(fields.BoxListFields.scores, + single_image_proposal_scores) + sampled_boxlist = self._sample_box_classifier_minibatch_single_image( + single_image_boxlist, + single_image_num_proposals, + single_image_groundtruth_boxlist, + single_image_groundtruth_classes_with_background, + single_image_groundtruth_weights) + sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list( + sampled_boxlist, + num_boxes=self._second_stage_batch_size) + single_image_num_proposals_sample.append(tf.minimum( + sampled_boxlist.num_boxes(), + self._second_stage_batch_size)) + bb = sampled_padded_boxlist.get() + single_image_proposal_box_sample.append(bb) + single_image_proposal_score_sample.append( + sampled_padded_boxlist.get_field(fields.BoxListFields.scores)) + return (tf.stack(single_image_proposal_box_sample), + tf.stack(single_image_proposal_score_sample), + tf.stack(single_image_num_proposals_sample)) + + def _format_groundtruth_data(self, image_shapes): + """Helper function for preparing groundtruth data for target assignment. + + In order to be consistent with the model.DetectionModel interface, + groundtruth boxes are specified in normalized coordinates and classes are + specified as label indices with no assumed background category. To prepare + for target assignment, we: + 1) convert boxes to absolute coordinates, + 2) add a background class at class index 0 + 3) groundtruth instance masks, if available, are resized to match + image_shape. + + Args: + image_shapes: a 2-D int32 tensor of shape [batch_size, 3] containing + shapes of input image in the batch. + + Returns: + groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates + of the groundtruth boxes. + groundtruth_classes_with_background_list: A list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of + shape [num_boxes, image_height, image_width] containing instance masks. + This is set to None if no masks exist in the provided groundtruth. + """ + # pylint: disable=g-complex-comprehension + groundtruth_boxlists = [ + box_list_ops.to_absolute_coordinates( + box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1]) + for i, boxes in enumerate( + self.groundtruth_lists(fields.BoxListFields.boxes)) + ] + groundtruth_classes_with_background_list = [] + for one_hot_encoding in self.groundtruth_lists( + fields.BoxListFields.classes): + groundtruth_classes_with_background_list.append( + tf.cast( + tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'), + dtype=tf.float32)) + + groundtruth_masks_list = self._groundtruth_lists.get( + fields.BoxListFields.masks) + # TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted. + if groundtruth_masks_list is not None and self._resize_masks: + resized_masks_list = [] + for mask in groundtruth_masks_list: + + _, resized_mask, _ = self._image_resizer_fn( + # Reuse the given `image_resizer_fn` to resize groundtruth masks. + # `mask` tensor for an image is of the shape [num_masks, + # image_height, image_width]. Below we create a dummy image of the + # the shape [image_height, image_width, 1] to use with + # `image_resizer_fn`. + image=tf.zeros(tf.stack([tf.shape(mask)[1], + tf.shape(mask)[2], 1])), + masks=mask) + resized_masks_list.append(resized_mask) + + groundtruth_masks_list = resized_masks_list + # Masks could be set to bfloat16 in the input pipeline for performance + # reasons. Convert masks back to floating point space here since the rest of + # this module assumes groundtruth to be of float32 type. + float_groundtruth_masks_list = [] + if groundtruth_masks_list: + for mask in groundtruth_masks_list: + float_groundtruth_masks_list.append(tf.cast(mask, tf.float32)) + groundtruth_masks_list = float_groundtruth_masks_list + + if self.groundtruth_has_field(fields.BoxListFields.weights): + groundtruth_weights_list = self.groundtruth_lists( + fields.BoxListFields.weights) + else: + # Set weights for all batch elements equally to 1.0 + groundtruth_weights_list = [] + for groundtruth_classes in groundtruth_classes_with_background_list: + num_gt = tf.shape(groundtruth_classes)[0] + groundtruth_weights = tf.ones(num_gt) + groundtruth_weights_list.append(groundtruth_weights) + + return (groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_masks_list, groundtruth_weights_list) + + def _sample_box_classifier_minibatch_single_image( + self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist, + groundtruth_classes_with_background, groundtruth_weights): + """Samples a mini-batch of proposals to be sent to the box classifier. + + Helper function for self._postprocess_rpn. + + Args: + proposal_boxlist: A BoxList containing K proposal boxes in absolute + coordinates. + num_valid_proposals: Number of valid proposals in the proposal boxlist. + groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in + absolute coordinates. + groundtruth_classes_with_background: A tensor with shape + `[N, self.num_classes + 1]` representing groundtruth classes. The + classes are assumed to be k-hot encoded, and include background as the + zero-th class. + groundtruth_weights: Weights attached to the groundtruth_boxes. + + Returns: + a BoxList contained sampled proposals. + """ + (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign( + proposal_boxlist, + groundtruth_boxlist, + groundtruth_classes_with_background, + unmatched_class_label=tf.constant( + [1] + self._num_classes * [0], dtype=tf.float32), + groundtruth_weights=groundtruth_weights) + # Selects all boxes as candidates if none of them is selected according + # to cls_weights. This could happen as boxes within certain IOU ranges + # are ignored. If triggered, the selected boxes will still be ignored + # during loss computation. + cls_weights = tf.reduce_mean(cls_weights, axis=-1) + positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) + valid_indicator = tf.logical_and( + tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals, + cls_weights > 0 + ) + selected_positions = self._second_stage_sampler.subsample( + valid_indicator, + self._second_stage_batch_size, + positive_indicator) + return box_list_ops.boolean_mask( + proposal_boxlist, + selected_positions, + use_static_shapes=self._use_static_shapes, + indicator_sum=(self._second_stage_batch_size + if self._use_static_shapes else None)) + + def _compute_second_stage_input_feature_maps(self, features_to_crop, + proposal_boxes_normalized, + image_shape, + **side_inputs): + """Crops to a set of proposals from the feature map for a batch of images. + + Helper function for self._postprocess_rpn. This function calls + `tf.image.crop_and_resize` to create the feature map to be passed to the + second stage box classifier for each proposal. + + Args: + features_to_crop: A float32 tensor with shape + [batch_size, height, width, depth] + proposal_boxes_normalized: A float32 tensor with shape [batch_size, + num_proposals, box_code_size] containing proposal boxes in + normalized coordinates. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + **side_inputs: additional tensors that are required by the network. + + Returns: + A float32 tensor with shape [K, new_height, new_width, depth]. + """ + num_levels = len(features_to_crop) + box_levels = None + if num_levels != 1: + # If there are multiple levels to select, get the box levels + # unit_scale_index: num_levels-2 is chosen based on section 4.2 of + # https://arxiv.org/pdf/1612.03144.pdf and works best for Resnet based + # feature extractor. + box_levels = ops.fpn_feature_levels( + num_levels, num_levels - 2, + tf.sqrt(tf.cast(image_shape[1] * image_shape[2], tf.float32)) / 224.0, + proposal_boxes_normalized) + + cropped_regions = self._flatten_first_two_dimensions( + self._crop_and_resize_fn( + features_to_crop, proposal_boxes_normalized, box_levels, + [self._initial_crop_size, self._initial_crop_size])) + return self._maxpool_layer(cropped_regions) + + def _postprocess_box_classifier(self, + refined_box_encodings, + class_predictions_with_background, + proposal_boxes, + num_proposals, + image_shapes, + mask_predictions=None): + """Converts predictions from the second stage box classifier to detections. + + Args: + refined_box_encodings: a 3-D float tensor with shape + [total_num_padded_proposals, num_classes, self._box_coder.code_size] + representing predicted (final) refined box encodings. If using a shared + box across classes the shape will instead be + [total_num_padded_proposals, 1, 4] + class_predictions_with_background: a 2-D tensor float with shape + [total_num_padded_proposals, num_classes + 1] containing class + predictions (logits) for each of the proposals. Note that this tensor + *includes* background class predictions (at class index 0). + proposal_boxes: a 3-D float tensor with shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes in absolute coordinates. + num_proposals: a 1-D int32 tensor of shape [batch] representing the number + of proposals predicted for each image in the batch. + image_shapes: a 2-D int32 tensor containing shapes of input image in the + batch. + mask_predictions: (optional) a 4-D float tensor with shape + [total_num_padded_proposals, num_classes, mask_height, mask_width] + containing instance mask prediction logits. + + Returns: + A dictionary containing: + `detection_boxes`: [batch, max_detection, 4] in normalized co-ordinates. + `detection_scores`: [batch, max_detections] + `detection_multiclass_scores`: [batch, max_detections, + num_classes_with_background] tensor with class score distribution for + post-processed detection boxes including background class if any. + `detection_anchor_indices`: [batch, max_detections] with anchor + indices. + `detection_classes`: [batch, max_detections] + `num_detections`: [batch] + `detection_masks`: + (optional) [batch, max_detections, mask_height, mask_width]. Note + that a pixel-wise sigmoid score converter is applied to the detection + masks. + `raw_detection_boxes`: [batch, total_detections, 4] tensor with decoded + detection boxes in normalized coordinates, before Non-Max Suppression. + The value total_detections is the number of second stage anchors + (i.e. the total number of boxes before NMS). + `raw_detection_scores`: [batch, total_detections, + num_classes_with_background] tensor of multi-class scores for + raw detection boxes. The value total_detections is the number of + second stage anchors (i.e. the total number of boxes before NMS). + """ + refined_box_encodings_batch = tf.reshape( + refined_box_encodings, + [-1, + self.max_num_proposals, + refined_box_encodings.shape[1], + self._box_coder.code_size]) + class_predictions_with_background_batch = tf.reshape( + class_predictions_with_background, + [-1, self.max_num_proposals, self.num_classes + 1] + ) + refined_decoded_boxes_batch = self._batch_decode_boxes( + refined_box_encodings_batch, proposal_boxes) + class_predictions_with_background_batch_normalized = ( + self._second_stage_score_conversion_fn( + class_predictions_with_background_batch)) + class_predictions_batch = tf.reshape( + tf.slice(class_predictions_with_background_batch_normalized, + [0, 0, 1], [-1, -1, -1]), + [-1, self.max_num_proposals, self.num_classes]) + clip_window = self._compute_clip_window(image_shapes) + mask_predictions_batch = None + if mask_predictions is not None: + mask_height = shape_utils.get_dim_as_int(mask_predictions.shape[2]) + mask_width = shape_utils.get_dim_as_int(mask_predictions.shape[3]) + mask_predictions = tf.sigmoid(mask_predictions) + mask_predictions_batch = tf.reshape( + mask_predictions, [-1, self.max_num_proposals, + self.num_classes, mask_height, mask_width]) + + batch_size = shape_utils.combined_static_and_dynamic_shape( + refined_box_encodings_batch)[0] + batch_anchor_indices = tf.tile( + tf.expand_dims(tf.range(self.max_num_proposals), 0), + multiples=[batch_size, 1]) + additional_fields = { + 'multiclass_scores': class_predictions_with_background_batch_normalized, + 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32) + } + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, num_detections) = self._second_stage_nms_fn( + refined_decoded_boxes_batch, + class_predictions_batch, + clip_window=clip_window, + change_coordinate_frame=True, + num_valid_boxes=num_proposals, + additional_fields=additional_fields, + masks=mask_predictions_batch) + if refined_decoded_boxes_batch.shape[2] > 1: + class_ids = tf.expand_dims( + tf.argmax(class_predictions_with_background_batch[:, :, 1:], axis=2, + output_type=tf.int32), + axis=-1) + raw_detection_boxes = tf.squeeze( + tf.batch_gather(refined_decoded_boxes_batch, class_ids), axis=2) + else: + raw_detection_boxes = tf.squeeze(refined_decoded_boxes_batch, axis=2) + + raw_normalized_detection_boxes = shape_utils.static_or_dynamic_map_fn( + self._normalize_and_clip_boxes, + elems=[raw_detection_boxes, image_shapes], + dtype=tf.float32) + + detections = { + fields.DetectionResultFields.detection_boxes: + nmsed_boxes, + fields.DetectionResultFields.detection_scores: + nmsed_scores, + fields.DetectionResultFields.detection_classes: + nmsed_classes, + fields.DetectionResultFields.detection_multiclass_scores: + nmsed_additional_fields['multiclass_scores'], + fields.DetectionResultFields.detection_anchor_indices: + tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), + fields.DetectionResultFields.num_detections: + tf.cast(num_detections, dtype=tf.float32), + fields.DetectionResultFields.raw_detection_boxes: + raw_normalized_detection_boxes, + fields.DetectionResultFields.raw_detection_scores: + class_predictions_with_background_batch_normalized + } + if nmsed_masks is not None: + detections[fields.DetectionResultFields.detection_masks] = nmsed_masks + return detections + + def _batch_decode_boxes(self, box_encodings, anchor_boxes): + """Decodes box encodings with respect to the anchor boxes. + + Args: + box_encodings: a 4-D tensor with shape + [batch_size, num_anchors, num_classes, self._box_coder.code_size] + representing box encodings. + anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size] + representing decoded bounding boxes. If using a shared box across + classes the shape will instead be + [total_num_proposals, 1, self._box_coder.code_size]. + + Returns: + decoded_boxes: a + [batch_size, num_anchors, num_classes, self._box_coder.code_size] + float tensor representing bounding box predictions (for each image in + batch, proposal and class). If using a shared box across classes the + shape will instead be + [batch_size, num_anchors, 1, self._box_coder.code_size]. + """ + combined_shape = shape_utils.combined_static_and_dynamic_shape( + box_encodings) + num_classes = combined_shape[2] + tiled_anchor_boxes = tf.tile( + tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1]) + tiled_anchors_boxlist = box_list.BoxList( + tf.reshape(tiled_anchor_boxes, [-1, 4])) + decoded_boxes = self._box_coder.decode( + tf.reshape(box_encodings, [-1, self._box_coder.code_size]), + tiled_anchors_boxlist) + return tf.reshape(decoded_boxes.get(), + tf.stack([combined_shape[0], combined_shape[1], + num_classes, 4])) + + def _normalize_and_clip_boxes(self, boxes_and_image_shape): + """Normalize and clip boxes.""" + boxes_per_image = boxes_and_image_shape[0] + image_shape = boxes_and_image_shape[1] + + boxes_contains_classes_dim = boxes_per_image.shape.ndims == 3 + if boxes_contains_classes_dim: + boxes_per_image = shape_utils.flatten_first_n_dimensions( + boxes_per_image, 2) + normalized_boxes_per_image = box_list_ops.to_normalized_coordinates( + box_list.BoxList(boxes_per_image), + image_shape[0], + image_shape[1], + check_range=False).get() + + normalized_boxes_per_image = box_list_ops.clip_to_window( + box_list.BoxList(normalized_boxes_per_image), + tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32), + filter_nonoverlapping=False).get() + + if boxes_contains_classes_dim: + max_num_proposals, num_classes, _ = ( + shape_utils.combined_static_and_dynamic_shape( + boxes_and_image_shape[0])) + normalized_boxes_per_image = shape_utils.expand_first_dimension( + normalized_boxes_per_image, [max_num_proposals, num_classes]) + + return normalized_boxes_per_image + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Compute scalar loss tensors given prediction tensors. + + If number_of_stages=1, only RPN related losses are computed (i.e., + `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all + losses are computed. + + Args: + prediction_dict: a dictionary holding prediction tensors (see the + documentation for the predict method. If number_of_stages=1, we + expect prediction_dict to contain `rpn_box_encodings`, + `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, + `image_shape`, and `anchors` fields. Otherwise we expect + prediction_dict to additionally contain `refined_box_encodings`, + `class_predictions_with_background`, `num_proposals`, and + `proposal_boxes` fields. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + scope: Optional scope name. + + Returns: + a dictionary mapping loss keys (`first_stage_localization_loss`, + `first_stage_objectness_loss`, 'second_stage_localization_loss', + 'second_stage_classification_loss') to scalar tensors representing + corresponding loss values. + """ + with tf.name_scope(scope, 'Loss', prediction_dict.values()): + (groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_masks_list, groundtruth_weights_list + ) = self._format_groundtruth_data( + self._image_batch_shape_2d(prediction_dict['image_shape'])) + loss_dict = self._loss_rpn( + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors'], groundtruth_boxlists, + groundtruth_classes_with_background_list, groundtruth_weights_list) + if self._number_of_stages > 1: + loss_dict.update( + self._loss_box_classifier( + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['proposal_boxes'], + prediction_dict['num_proposals'], groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list, prediction_dict['image_shape'], + prediction_dict.get('mask_predictions'), groundtruth_masks_list, + prediction_dict.get( + fields.DetectionResultFields.detection_boxes), + prediction_dict.get( + fields.DetectionResultFields.num_detections))) + return loss_dict + + def _loss_rpn(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, anchors, + groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_weights_list): + """Computes scalar RPN loss tensors. + + Uses self._proposal_target_assigner to obtain regression and classification + targets for the first stage RPN, samples a "minibatch" of anchors to + participate in the loss computation, and returns the RPN losses. + + Args: + rpn_box_encodings: A 3-D float tensor of shape + [batch_size, num_anchors, self._box_coder.code_size] containing + predicted proposal box encodings. + rpn_objectness_predictions_with_background: A 2-D float tensor of shape + [batch_size, num_anchors, 2] containing objectness predictions + (logits) for each of the anchors with 0 corresponding to background + and 1 corresponding to object. + anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors + for the first stage RPN. Note that `num_anchors` can differ depending + on whether the model is created in training or inference mode. + groundtruth_boxlists: A list of BoxLists containing coordinates of the + groundtruth boxes. + groundtruth_classes_with_background_list: A list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + + Returns: + a dictionary mapping loss keys (`first_stage_localization_loss`, + `first_stage_objectness_loss`) to scalar tensors representing + corresponding loss values. + """ + with tf.name_scope('RPNLoss'): + (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, _) = target_assigner.batch_assign_targets( + target_assigner=self._proposal_target_assigner, + anchors_batch=box_list.BoxList(anchors), + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=(len(groundtruth_boxlists) * [None]), + gt_weights_batch=groundtruth_weights_list) + batch_cls_weights = tf.reduce_mean(batch_cls_weights, axis=2) + batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2) + + def _minibatch_subsample_fn(inputs): + cls_targets, cls_weights = inputs + return self._first_stage_sampler.subsample( + tf.cast(cls_weights, tf.bool), + self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool)) + batch_sampled_indices = tf.cast(shape_utils.static_or_dynamic_map_fn( + _minibatch_subsample_fn, + [batch_cls_targets, batch_cls_weights], + dtype=tf.bool, + parallel_iterations=self._parallel_iterations, + back_prop=True), dtype=tf.float32) + + # Normalize by number of examples in sampled minibatch + normalizer = tf.maximum( + tf.reduce_sum(batch_sampled_indices, axis=1), 1.0) + batch_one_hot_targets = tf.one_hot( + tf.cast(batch_cls_targets, dtype=tf.int32), depth=2) + sampled_reg_indices = tf.multiply(batch_sampled_indices, + batch_reg_weights) + + losses_mask = None + if self.groundtruth_has_field(fields.InputDataFields.is_annotated): + losses_mask = tf.stack(self.groundtruth_lists( + fields.InputDataFields.is_annotated)) + localization_losses = self._first_stage_localization_loss( + rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices, + losses_mask=losses_mask) + objectness_losses = self._first_stage_objectness_loss( + rpn_objectness_predictions_with_background, + batch_one_hot_targets, + weights=tf.expand_dims(batch_sampled_indices, axis=-1), + losses_mask=losses_mask) + localization_loss = tf.reduce_mean( + tf.reduce_sum(localization_losses, axis=1) / normalizer) + objectness_loss = tf.reduce_mean( + tf.reduce_sum(objectness_losses, axis=1) / normalizer) + + localization_loss = tf.multiply(self._first_stage_loc_loss_weight, + localization_loss, + name='localization_loss') + objectness_loss = tf.multiply(self._first_stage_obj_loss_weight, + objectness_loss, name='objectness_loss') + loss_dict = {'Loss/RPNLoss/localization_loss': localization_loss, + 'Loss/RPNLoss/objectness_loss': objectness_loss} + return loss_dict + + def _loss_box_classifier(self, + refined_box_encodings, + class_predictions_with_background, + proposal_boxes, + num_proposals, + groundtruth_boxlists, + groundtruth_classes_with_background_list, + groundtruth_weights_list, + image_shape, + prediction_masks=None, + groundtruth_masks_list=None, + detection_boxes=None, + num_detections=None): + """Computes scalar box classifier loss tensors. + + Uses self._detector_target_assigner to obtain regression and classification + targets for the second stage box classifier, optionally performs + hard mining, and returns losses. All losses are computed independently + for each image and then averaged across the batch. + Please note that for boxes and masks with multiple labels, the box + regression and mask prediction losses are only computed for one label. + + This function assumes that the proposal boxes in the "padded" regions are + actually zero (and thus should not be matched to). + + + Args: + refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, box_coder.code_size] representing + predicted (final) refined box encodings. If using a shared box across + classes this will instead have shape + [total_num_proposals, 1, box_coder.code_size]. + class_predictions_with_background: a 2-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors. Note that this tensor + *includes* background class predictions (at class index 0). + proposal_boxes: [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + groundtruth_boxlists: a list of BoxLists containing coordinates of the + groundtruth boxes. + groundtruth_classes_with_background_list: a list of 2-D one-hot + (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the + class targets with the 0th index assumed to map to the background class. + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + image_shape: a 1-D tensor of shape [4] representing the image shape. + prediction_masks: an optional 4-D tensor with shape [total_num_proposals, + num_classes, mask_height, mask_width] containing the instance masks for + each box. + groundtruth_masks_list: an optional list of 3-D tensors of shape + [num_boxes, image_height, image_width] containing the instance masks for + each of the boxes. + detection_boxes: 3-D float tensor of shape [batch, + max_total_detections, 4] containing post-processed detection boxes in + normalized co-ordinates. + num_detections: 1-D int32 tensor of shape [batch] containing number of + valid detections in `detection_boxes`. + + Returns: + a dictionary mapping loss keys ('second_stage_localization_loss', + 'second_stage_classification_loss') to scalar tensors representing + corresponding loss values. + + Raises: + ValueError: if `predict_instance_masks` in + second_stage_mask_rcnn_box_predictor is True and + `groundtruth_masks_list` is not provided. + """ + with tf.name_scope('BoxClassifierLoss'): + paddings_indicator = self._padded_batched_proposals_indicator( + num_proposals, proposal_boxes.shape[1]) + proposal_boxlists = [ + box_list.BoxList(proposal_boxes_single_image) + for proposal_boxes_single_image in tf.unstack(proposal_boxes)] + batch_size = len(proposal_boxlists) + + num_proposals_or_one = tf.cast(tf.expand_dims( + tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1), + dtype=tf.float32) + normalizer = tf.tile(num_proposals_or_one, + [1, self.max_num_proposals]) * batch_size + + (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets, + batch_reg_weights, _) = target_assigner.batch_assign_targets( + target_assigner=self._detector_target_assigner, + anchors_batch=proposal_boxlists, + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=groundtruth_classes_with_background_list, + unmatched_class_label=tf.constant( + [1] + self._num_classes * [0], dtype=tf.float32), + gt_weights_batch=groundtruth_weights_list) + if self.groundtruth_has_field( + fields.InputDataFields.groundtruth_labeled_classes): + gt_labeled_classes = self.groundtruth_lists( + fields.InputDataFields.groundtruth_labeled_classes) + gt_labeled_classes = tf.pad( + gt_labeled_classes, [[0, 0], [1, 0]], + mode='CONSTANT', + constant_values=1) + batch_cls_weights *= tf.expand_dims(gt_labeled_classes, 1) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, self.max_num_proposals, -1]) + + flat_cls_targets_with_background = tf.reshape( + batch_cls_targets_with_background, + [batch_size * self.max_num_proposals, -1]) + one_hot_flat_cls_targets_with_background = tf.argmax( + flat_cls_targets_with_background, axis=1) + one_hot_flat_cls_targets_with_background = tf.one_hot( + one_hot_flat_cls_targets_with_background, + flat_cls_targets_with_background.get_shape()[1]) + + # If using a shared box across classes use directly + if refined_box_encodings.shape[1] == 1: + reshaped_refined_box_encodings = tf.reshape( + refined_box_encodings, + [batch_size, self.max_num_proposals, self._box_coder.code_size]) + # For anchors with multiple labels, picks refined_location_encodings + # for just one class to avoid over-counting for regression loss and + # (optionally) mask loss. + else: + reshaped_refined_box_encodings = ( + self._get_refined_encodings_for_postitive_class( + refined_box_encodings, + one_hot_flat_cls_targets_with_background, batch_size)) + + losses_mask = None + if self.groundtruth_has_field(fields.InputDataFields.is_annotated): + losses_mask = tf.stack(self.groundtruth_lists( + fields.InputDataFields.is_annotated)) + second_stage_loc_losses = self._second_stage_localization_loss( + reshaped_refined_box_encodings, + batch_reg_targets, + weights=batch_reg_weights, + losses_mask=losses_mask) / normalizer + second_stage_cls_losses = ops.reduce_sum_trailing_dimensions( + self._second_stage_classification_loss( + class_predictions_with_background, + batch_cls_targets_with_background, + weights=batch_cls_weights, + losses_mask=losses_mask), + ndims=2) / normalizer + + second_stage_loc_loss = tf.reduce_sum( + second_stage_loc_losses * tf.cast(paddings_indicator, + dtype=tf.float32)) + second_stage_cls_loss = tf.reduce_sum( + second_stage_cls_losses * tf.cast(paddings_indicator, + dtype=tf.float32)) + + if self._hard_example_miner: + (second_stage_loc_loss, second_stage_cls_loss + ) = self._unpad_proposals_and_apply_hard_mining( + proposal_boxlists, second_stage_loc_losses, + second_stage_cls_losses, num_proposals) + localization_loss = tf.multiply(self._second_stage_loc_loss_weight, + second_stage_loc_loss, + name='localization_loss') + + classification_loss = tf.multiply(self._second_stage_cls_loss_weight, + second_stage_cls_loss, + name='classification_loss') + + loss_dict = {'Loss/BoxClassifierLoss/localization_loss': + localization_loss, + 'Loss/BoxClassifierLoss/classification_loss': + classification_loss} + second_stage_mask_loss = None + if prediction_masks is not None: + if groundtruth_masks_list is None: + raise ValueError('Groundtruth instance masks not provided. ' + 'Please configure input reader.') + + if not self._is_training: + (proposal_boxes, proposal_boxlists, paddings_indicator, + one_hot_flat_cls_targets_with_background + ) = self._get_mask_proposal_boxes_and_classes( + detection_boxes, num_detections, image_shape, + groundtruth_boxlists, groundtruth_classes_with_background_list, + groundtruth_weights_list) + unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32) + (batch_mask_targets, _, _, batch_mask_target_weights, + _) = target_assigner.batch_assign_targets( + target_assigner=self._detector_target_assigner, + anchors_batch=proposal_boxlists, + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=groundtruth_masks_list, + unmatched_class_label=unmatched_mask_label, + gt_weights_batch=groundtruth_weights_list) + + # Pad the prediction_masks with to add zeros for background class to be + # consistent with class predictions. + if prediction_masks.get_shape().as_list()[1] == 1: + # Class agnostic masks or masks for one-class prediction. Logic for + # both cases is the same since background predictions are ignored + # through the batch_mask_target_weights. + prediction_masks_masked_by_class_targets = prediction_masks + else: + prediction_masks_with_background = tf.pad( + prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]]) + prediction_masks_masked_by_class_targets = tf.boolean_mask( + prediction_masks_with_background, + tf.greater(one_hot_flat_cls_targets_with_background, 0)) + + mask_height = shape_utils.get_dim_as_int(prediction_masks.shape[2]) + mask_width = shape_utils.get_dim_as_int(prediction_masks.shape[3]) + reshaped_prediction_masks = tf.reshape( + prediction_masks_masked_by_class_targets, + [batch_size, -1, mask_height * mask_width]) + + batch_mask_targets_shape = tf.shape(batch_mask_targets) + flat_gt_masks = tf.reshape(batch_mask_targets, + [-1, batch_mask_targets_shape[2], + batch_mask_targets_shape[3]]) + + # Use normalized proposals to crop mask targets from image masks. + flat_normalized_proposals = box_list_ops.to_normalized_coordinates( + box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])), + image_shape[1], image_shape[2], check_range=False).get() + + flat_cropped_gt_mask = self._crop_and_resize_fn( + [tf.expand_dims(flat_gt_masks, -1)], + tf.expand_dims(flat_normalized_proposals, axis=1), None, + [mask_height, mask_width]) + # Without stopping gradients into cropped groundtruth masks the + # performance with 100-padded groundtruth masks when batch size > 1 is + # about 4% worse. + # TODO(rathodv): Investigate this since we don't expect any variables + # upstream of flat_cropped_gt_mask. + flat_cropped_gt_mask = tf.stop_gradient(flat_cropped_gt_mask) + + batch_cropped_gt_mask = tf.reshape( + flat_cropped_gt_mask, + [batch_size, -1, mask_height * mask_width]) + + mask_losses_weights = ( + batch_mask_target_weights * tf.cast(paddings_indicator, + dtype=tf.float32)) + mask_losses = self._second_stage_mask_loss( + reshaped_prediction_masks, + batch_cropped_gt_mask, + weights=tf.expand_dims(mask_losses_weights, axis=-1), + losses_mask=losses_mask) + total_mask_loss = tf.reduce_sum(mask_losses) + normalizer = tf.maximum( + tf.reduce_sum(mask_losses_weights * mask_height * mask_width), 1.0) + second_stage_mask_loss = total_mask_loss / normalizer + + if second_stage_mask_loss is not None: + mask_loss = tf.multiply(self._second_stage_mask_loss_weight, + second_stage_mask_loss, name='mask_loss') + loss_dict['Loss/BoxClassifierLoss/mask_loss'] = mask_loss + return loss_dict + + def _get_mask_proposal_boxes_and_classes( + self, detection_boxes, num_detections, image_shape, groundtruth_boxlists, + groundtruth_classes_with_background_list, groundtruth_weights_list): + """Returns proposal boxes and class targets to compute evaluation mask loss. + + During evaluation, detection boxes are used to extract features for mask + prediction. Therefore, to compute mask loss during evaluation detection + boxes must be used to compute correct class and mask targets. This function + returns boxes and classes in the correct format for computing mask targets + during evaluation. + + Args: + detection_boxes: A 3-D float tensor of shape [batch, max_detection_boxes, + 4] containing detection boxes in normalized co-ordinates. + num_detections: A 1-D float tensor of shape [batch] containing number of + valid boxes in `detection_boxes`. + image_shape: A 1-D tensor of shape [4] containing image tensor shape. + groundtruth_boxlists: A list of groundtruth boxlists. + groundtruth_classes_with_background_list: A list of groundtruth classes. + groundtruth_weights_list: A list of groundtruth weights. + Return: + mask_proposal_boxes: detection boxes to use for mask proposals in absolute + co-ordinates. + mask_proposal_boxlists: `mask_proposal_boxes` in a list of BoxLists in + absolute co-ordinates. + mask_proposal_paddings_indicator: a tensor indicating valid boxes. + mask_proposal_one_hot_flat_cls_targets_with_background: Class targets + computed using detection boxes. + """ + batch, max_num_detections, _ = detection_boxes.shape.as_list() + proposal_boxes = tf.reshape(box_list_ops.to_absolute_coordinates( + box_list.BoxList(tf.reshape(detection_boxes, [-1, 4])), image_shape[1], + image_shape[2]).get(), [batch, max_num_detections, 4]) + proposal_boxlists = [ + box_list.BoxList(detection_boxes_single_image) + for detection_boxes_single_image in tf.unstack(proposal_boxes) + ] + paddings_indicator = self._padded_batched_proposals_indicator( + tf.cast(num_detections, dtype=tf.int32), detection_boxes.shape[1]) + (batch_cls_targets_with_background, _, _, _, + _) = target_assigner.batch_assign_targets( + target_assigner=self._detector_target_assigner, + anchors_batch=proposal_boxlists, + gt_box_batch=groundtruth_boxlists, + gt_class_targets_batch=groundtruth_classes_with_background_list, + unmatched_class_label=tf.constant( + [1] + self._num_classes * [0], dtype=tf.float32), + gt_weights_batch=groundtruth_weights_list) + flat_cls_targets_with_background = tf.reshape( + batch_cls_targets_with_background, [-1, self._num_classes + 1]) + one_hot_flat_cls_targets_with_background = tf.argmax( + flat_cls_targets_with_background, axis=1) + one_hot_flat_cls_targets_with_background = tf.one_hot( + one_hot_flat_cls_targets_with_background, + flat_cls_targets_with_background.get_shape()[1]) + return (proposal_boxes, proposal_boxlists, paddings_indicator, + one_hot_flat_cls_targets_with_background) + + def _get_refined_encodings_for_postitive_class( + self, refined_box_encodings, flat_cls_targets_with_background, + batch_size): + # We only predict refined location encodings for the non background + # classes, but we now pad it to make it compatible with the class + # predictions + refined_box_encodings_with_background = tf.pad(refined_box_encodings, + [[0, 0], [1, 0], [0, 0]]) + refined_box_encodings_masked_by_class_targets = ( + box_list_ops.boolean_mask( + box_list.BoxList( + tf.reshape(refined_box_encodings_with_background, + [-1, self._box_coder.code_size])), + tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]), + use_static_shapes=self._use_static_shapes, + indicator_sum=batch_size * self.max_num_proposals + if self._use_static_shapes else None).get()) + return tf.reshape( + refined_box_encodings_masked_by_class_targets, [ + batch_size, self.max_num_proposals, + self._box_coder.code_size + ]) + + def _padded_batched_proposals_indicator(self, + num_proposals, + max_num_proposals): + """Creates indicator matrix of non-pad elements of padded batch proposals. + + Args: + num_proposals: Tensor of type tf.int32 with shape [batch_size]. + max_num_proposals: Maximum number of proposals per image (integer). + + Returns: + A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. + """ + batch_size = tf.size(num_proposals) + tiled_num_proposals = tf.tile( + tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) + tiled_proposal_index = tf.tile( + tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) + return tf.greater(tiled_num_proposals, tiled_proposal_index) + + def _unpad_proposals_and_apply_hard_mining(self, + proposal_boxlists, + second_stage_loc_losses, + second_stage_cls_losses, + num_proposals): + """Unpads proposals and applies hard mining. + + Args: + proposal_boxlists: A list of `batch_size` BoxLists each representing + `self.max_num_proposals` representing decoded proposal bounding boxes + for each image. + second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape + `[batch_size, self.max_num_proposals]` representing per-anchor + second stage localization loss values. + second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape + `[batch_size, self.max_num_proposals]` representing per-anchor + second stage classification loss values. + num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] + representing the number of proposals predicted for each image in + the batch. + + Returns: + second_stage_loc_loss: A scalar float32 tensor representing the second + stage localization loss. + second_stage_cls_loss: A scalar float32 tensor representing the second + stage classification loss. + """ + for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss, + single_image_num_proposals) in zip( + proposal_boxlists, + tf.unstack(second_stage_loc_losses), + tf.unstack(second_stage_cls_losses), + tf.unstack(num_proposals)): + proposal_boxlist = box_list.BoxList( + tf.slice(proposal_boxlist.get(), + [0, 0], [single_image_num_proposals, -1])) + single_image_loc_loss = tf.slice(single_image_loc_loss, + [0], [single_image_num_proposals]) + single_image_cls_loss = tf.slice(single_image_cls_loss, + [0], [single_image_num_proposals]) + return self._hard_example_miner( + location_losses=tf.expand_dims(single_image_loc_loss, 0), + cls_losses=tf.expand_dims(single_image_cls_loss, 0), + decoded_boxlist_list=[proposal_boxlist]) + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + all_losses = [] + slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + # Copy the slim losses to avoid modifying the collection + if slim_losses: + all_losses.extend(slim_losses) + # TODO(kaftan): Possibly raise an error if the feature extractors are + # uninitialized in Keras. + if self._feature_extractor_for_proposal_features: + if (self._feature_extractor_for_proposal_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + all_losses.extend(self._feature_extractor_for_proposal_features.losses) + if isinstance(self._first_stage_box_predictor_first_conv, + tf.keras.Model): + all_losses.extend( + self._first_stage_box_predictor_first_conv.losses) + if self._first_stage_box_predictor.is_keras_model: + all_losses.extend(self._first_stage_box_predictor.losses) + if self._feature_extractor_for_box_classifier_features: + if (self._feature_extractor_for_box_classifier_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + all_losses.extend( + self._feature_extractor_for_box_classifier_features.losses) + if self._mask_rcnn_box_predictor: + if self._mask_rcnn_box_predictor.is_keras_model: + all_losses.extend(self._mask_rcnn_box_predictor.losses) + return all_losses + + def restore_map(self, + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False): + """Returns a map of variables to load from a foreign checkpoint. + + See parent class for details. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + load_all_detection_checkpoint_vars: whether to load all variables (when + `fine_tune_checkpoint_type` is `detection`). If False, only variables + within the feature extractor scopes are included. Default False. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + Raises: + ValueError: if fine_tune_checkpoint_type is neither `classification` + nor `detection`. + """ + if fine_tune_checkpoint_type not in ['detection', 'classification']: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + if fine_tune_checkpoint_type == 'classification': + return self._feature_extractor.restore_from_classification_checkpoint_fn( + self.first_stage_feature_extractor_scope, + self.second_stage_feature_extractor_scope) + + variables_to_restore = variables_helper.get_global_variables_safely() + variables_to_restore.append(tf.train.get_or_create_global_step()) + # Only load feature extractor variables to be consistent with loading from + # a classification checkpoint. + include_patterns = None + if not load_all_detection_checkpoint_vars: + include_patterns = [ + self.first_stage_feature_extractor_scope, + self.second_stage_feature_extractor_scope + ] + feature_extractor_variables = slim.filter_variables( + variables_to_restore, include_patterns=include_patterns) + return {var.op.name: var for var in feature_extractor_variables} + + def restore_from_objects(self, fine_tune_checkpoint_type='detection'): + """Returns a map of Trackable objects to load from a foreign checkpoint. + + Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module + or Checkpoint). This enables the model to initialize based on weights from + another task. For example, the feature extractor variables from a + classification model can be used to bootstrap training of an object + detector. When loading from an object detection model, the checkpoint model + should have the same parameters as this detection model with exception of + the num_classes parameter. + + Note that this function is intended to be used to restore Keras-based + models when running Tensorflow 2, whereas restore_map (above) is intended + to be used to restore Slim-based models when running Tensorflow 1.x. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + + Returns: + A dict mapping keys to Trackable objects (tf.Module or Checkpoint). + """ + if fine_tune_checkpoint_type == 'classification': + return { + 'feature_extractor': + self._feature_extractor.classification_backbone + } + elif fine_tune_checkpoint_type == 'detection': + fake_model = tf.train.Checkpoint( + _feature_extractor_for_box_classifier_features= + self._feature_extractor_for_box_classifier_features, + _feature_extractor_for_proposal_features= + self._feature_extractor_for_proposal_features) + return {'model': fake_model} + else: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + update_ops = [] + slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + # Copy the slim ops to avoid modifying the collection + if slim_update_ops: + update_ops.extend(slim_update_ops) + # Passing None to get_updates_for grabs updates that should always be + # executed and don't depend on any model inputs in the graph. + # (E.g. if there was some count that should be incremented every time a + # model is run). + # + # Passing inputs grabs updates that are transitively computed from the + # model inputs being passed in. + # (E.g. a batchnorm update depends on the observed inputs) + if self._feature_extractor_for_proposal_features: + if (self._feature_extractor_for_proposal_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + update_ops.extend( + self._feature_extractor_for_proposal_features.get_updates_for(None)) + update_ops.extend( + self._feature_extractor_for_proposal_features.get_updates_for( + self._feature_extractor_for_proposal_features.inputs)) + if isinstance(self._first_stage_box_predictor_first_conv, + tf.keras.Model): + update_ops.extend( + self._first_stage_box_predictor_first_conv.get_updates_for( + None)) + update_ops.extend( + self._first_stage_box_predictor_first_conv.get_updates_for( + self._first_stage_box_predictor_first_conv.inputs)) + if self._first_stage_box_predictor.is_keras_model: + update_ops.extend( + self._first_stage_box_predictor.get_updates_for(None)) + update_ops.extend( + self._first_stage_box_predictor.get_updates_for( + self._first_stage_box_predictor.inputs)) + if self._feature_extractor_for_box_classifier_features: + if (self._feature_extractor_for_box_classifier_features != + _UNINITIALIZED_FEATURE_EXTRACTOR): + update_ops.extend( + self._feature_extractor_for_box_classifier_features.get_updates_for( + None)) + update_ops.extend( + self._feature_extractor_for_box_classifier_features.get_updates_for( + self._feature_extractor_for_box_classifier_features.inputs)) + if self._mask_rcnn_box_predictor: + if self._mask_rcnn_box_predictor.is_keras_model: + update_ops.extend( + self._mask_rcnn_box_predictor.get_updates_for(None)) + update_ops.extend( + self._mask_rcnn_box_predictor.get_updates_for( + self._mask_rcnn_box_predictor.inputs)) + return update_ops diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47edfc4ccdc4bd4272d87fcac5ec345d0d72a729 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d935c99fad63dcdecb67b310430f97e2c51a9ed6 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py @@ -0,0 +1,513 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib +from object_detection.utils import test_utils + + +class FasterRCNNMetaArchTest( + faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase, + parameterized.TestCase): + + def test_postprocess_second_stage_only_inference_mode_with_masks(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + mask_height = 2 + mask_width = 2 + mask_predictions = 30. * tf.ones( + [total_num_padded_proposals, model.num_classes, + mask_height, mask_width], dtype=tf.float32) + + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': image_shape, + 'mask_predictions': mask_predictions + }, true_image_shapes) + return (detections['detection_boxes'], + detections['detection_scores'], + detections['detection_classes'], + detections['num_detections'], + detections['detection_masks']) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) + exp_detection_masks = np.array([[[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]]], + [[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[0, 0], [0, 0]]]]) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllClose(detection_scores, + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + self.assertAllClose(detection_masks, exp_detection_masks) + self.assertTrue(np.amax(detection_masks <= 1.0)) + self.assertTrue(np.amin(detection_masks >= 0.0)) + + def test_postprocess_second_stage_only_inference_mode_with_calibration(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6, + calibration_mapping_value=0.5) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + mask_height = 2 + mask_width = 2 + mask_predictions = 30. * tf.ones( + [total_num_padded_proposals, model.num_classes, + mask_height, mask_width], dtype=tf.float32) + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': image_shape, + 'mask_predictions': mask_predictions + }, true_image_shapes) + return (detections['detection_boxes'], + detections['detection_scores'], + detections['detection_classes'], + detections['num_detections'], + detections['detection_masks']) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_masks) = self.execute_cpu(graph_fn, [], graph=g) + exp_detection_masks = np.array([[[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]]], + [[[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[1, 1], [1, 1]], + [[0, 0], [0, 0]]]]) + + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + # All scores map to 0.5, except for the final one, which is pruned. + self.assertAllClose(detection_scores, + [[0.5, 0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5, 0.0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + self.assertAllClose(detection_masks, + exp_detection_masks) + self.assertTrue(np.amax(detection_masks <= 1.0)) + self.assertTrue(np.amin(detection_masks >= 0.0)) + + def test_postprocess_second_stage_only_inference_mode_with_shared_boxes(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + + # This has 1 box instead of one for each class. + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, 1, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': image_shape, + }, true_image_shapes) + return (detections['detection_boxes'], + detections['detection_scores'], + detections['detection_classes'], + detections['num_detections']) + (detection_boxes, detection_scores, detection_classes, + num_detections) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllClose(detection_scores, + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + + @parameterized.parameters( + {'masks_are_class_agnostic': False}, + {'masks_are_class_agnostic': True}, + ) + def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks( + self, masks_are_class_agnostic): + batch_size = 2 + image_size = 10 + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=3, + second_stage_batch_size=2, + predict_masks=True, + masks_are_class_agnostic=masks_are_class_agnostic) + def graph_fn(): + shape = [tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + 3] + image = tf.zeros(shape) + _, true_image_shapes = model.preprocess(image) + detections = model.predict(image, true_image_shapes) + return (detections['detection_boxes'], detections['detection_classes'], + detections['detection_scores'], detections['num_detections'], + detections['detection_masks'], detections['mask_predictions']) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_masks, + mask_predictions) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllEqual(detection_masks.shape, + [2, 5, 14, 14]) + self.assertAllEqual(detection_classes.shape, [2, 5]) + self.assertAllEqual(detection_scores.shape, [2, 5]) + self.assertAllEqual(num_detections.shape, [2]) + num_classes = 1 if masks_are_class_agnostic else 2 + self.assertAllEqual(mask_predictions.shape, + [10, num_classes, 14, 14]) + + def test_raw_detection_boxes_and_anchor_indices_correct(self): + batch_size = 2 + image_size = 10 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=2, + share_box_across_classes=True, + return_raw_detections_during_predict=True) + def graph_fn(): + shape = [tf.random_uniform([], minval=batch_size, maxval=batch_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + tf.random_uniform([], minval=image_size, maxval=image_size + 1, + dtype=tf.int32), + 3] + image = tf.zeros(shape) + _, true_image_shapes = model.preprocess(image) + predict_tensor_dict = model.predict(image, true_image_shapes) + detections = model.postprocess(predict_tensor_dict, true_image_shapes) + return (detections['detection_boxes'], + detections['num_detections'], + detections['detection_anchor_indices'], + detections['raw_detection_boxes'], + predict_tensor_dict['raw_detection_boxes']) + (detection_boxes, num_detections, detection_anchor_indices, + raw_detection_boxes, + predict_raw_detection_boxes) = self.execute_cpu(graph_fn, [], graph=g) + + # Verify that the raw detections from predict and postprocess are the + # same. + self.assertAllClose( + np.squeeze(predict_raw_detection_boxes), raw_detection_boxes) + # Verify that the raw detection boxes at detection anchor indices are the + # same as the postprocessed detections. + for i in range(batch_size): + num_detections_per_image = int(num_detections[i]) + detection_boxes_per_image = detection_boxes[i][ + :num_detections_per_image] + detection_anchor_indices_per_image = detection_anchor_indices[i][ + :num_detections_per_image] + raw_detections_per_image = np.squeeze(raw_detection_boxes[i]) + raw_detections_at_anchor_indices = raw_detections_per_image[ + detection_anchor_indices_per_image] + self.assertAllClose(detection_boxes_per_image, + raw_detections_at_anchor_indices) + + @parameterized.parameters( + {'masks_are_class_agnostic': False}, + {'masks_are_class_agnostic': True}, + ) + def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks( + self, masks_are_class_agnostic): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=3, + second_stage_batch_size=7, + predict_masks=True, + masks_are_class_agnostic=masks_are_class_agnostic) + batch_size = 2 + image_size = 10 + max_num_proposals = 7 + def graph_fn(): + image_shape = (batch_size, image_size, image_size, 3) + preprocessed_inputs = tf.zeros(image_shape, dtype=tf.float32) + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32) + ] + groundtruth_classes_list = [ + tf.constant([[1, 0], [0, 1]], dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], dtype=tf.float32) + ] + groundtruth_weights_list = [ + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 1], dtype=tf.float32)] + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_weights_list=groundtruth_weights_list) + + result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes) + return result_tensor_dict['mask_predictions'] + mask_shape_1 = 1 if masks_are_class_agnostic else model._num_classes + mask_out = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(mask_out.shape, + (2 * max_num_proposals, mask_shape_1, 14, 14)) + + def test_postprocess_third_stage_only_inference_mode(self): + batch_size = 2 + initial_crop_size = 3 + maxpool_stride = 1 + height = initial_crop_size // maxpool_stride + width = initial_crop_size // maxpool_stride + depth = 3 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, number_of_stages=3, + second_stage_batch_size=6, predict_masks=True) + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(images_shape, num_proposals, proposal_boxes, + refined_box_encodings, class_predictions_with_background): + _, true_image_shapes = model.preprocess( + tf.zeros(images_shape)) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + 'image_shape': images_shape, + 'detection_boxes': tf.zeros([2, 5, 4]), + 'detection_masks': tf.zeros([2, 5, 14, 14]), + 'detection_scores': tf.zeros([2, 5]), + 'detection_classes': tf.zeros([2, 5]), + 'num_detections': tf.zeros([2]), + 'detection_features': tf.zeros([2, 5, width, height, depth]) + }, true_image_shapes) + return (detections['detection_boxes'], detections['detection_masks'], + detections['detection_scores'], detections['detection_classes'], + detections['num_detections'], + detections['detection_features']) + images_shape = np.array((2, 36, 48, 3), dtype=np.int32) + proposal_boxes = np.array( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]]) + num_proposals = np.array([3, 2], dtype=np.int32) + refined_box_encodings = np.zeros( + [total_num_padded_proposals, model.num_classes, 4]) + class_predictions_with_background = np.ones( + [total_num_padded_proposals, model.num_classes+1]) + + (detection_boxes, detection_masks, detection_scores, detection_classes, + num_detections, + detection_features) = self.execute_cpu(graph_fn, + [images_shape, num_proposals, + proposal_boxes, + refined_box_encodings, + class_predictions_with_background], + graph=g) + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllEqual(detection_masks.shape, [2, 5, 14, 14]) + self.assertAllClose(detection_scores.shape, [2, 5]) + self.assertAllClose(detection_classes.shape, [2, 5]) + self.assertAllClose(num_detections.shape, [2]) + self.assertTrue(np.amax(detection_masks <= 1.0)) + self.assertTrue(np.amin(detection_masks >= 0.0)) + self.assertAllEqual(detection_features.shape, + [2, 5, width, height, depth]) + self.assertGreaterEqual(np.amax(detection_features), 0) + + def _get_box_classifier_features_shape(self, + image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + num_features): + return (batch_size * max_num_proposals, + initial_crop_size // maxpool_stride, + initial_crop_size // maxpool_stride, + num_features) + + def test_output_final_box_features(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + output_final_box_features=True) + + batch_size = 2 + total_num_padded_proposals = batch_size * model.max_num_proposals + def graph_fn(): + proposal_boxes = tf.constant([[[1, 1, 2, 3], [0, 0, 1, 1], + [.5, .5, .6, .6], 4 * [0], 4 * [0], + 4 * [0], 4 * [0], 4 * [0]], + [[2, 3, 6, 8], [1, 2, 5, 3], 4 * [0], + 4 * [0], 4 * [0], 4 * [0], 4 * [0], + 4 * [0]]], + dtype=tf.float32) + num_proposals = tf.constant([3, 2], dtype=tf.int32) + refined_box_encodings = tf.zeros( + [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32) + class_predictions_with_background = tf.ones( + [total_num_padded_proposals, model.num_classes + 1], dtype=tf.float32) + image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32) + + mask_height = 2 + mask_width = 2 + mask_predictions = 30. * tf.ones([ + total_num_padded_proposals, model.num_classes, mask_height, mask_width + ], + dtype=tf.float32) + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + rpn_features_to_crop = tf.ones((batch_size, mask_height, mask_width, 3), + tf.float32) + detections = model.postprocess( + { + 'refined_box_encodings': + refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': + num_proposals, + 'proposal_boxes': + proposal_boxes, + 'image_shape': + image_shape, + 'mask_predictions': + mask_predictions, + 'rpn_features_to_crop': + [rpn_features_to_crop] + }, true_image_shapes) + self.assertIn('detection_features', detections) + return (detections['detection_boxes'], detections['detection_scores'], + detections['detection_classes'], detections['num_detections'], + detections['detection_masks']) + (detection_boxes, detection_scores, detection_classes, num_detections, + detection_masks) = self.execute_cpu(graph_fn, [], graph=g) + exp_detection_masks = np.array([[[[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[1, 1], [1, 1]]], + [[[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[1, 1], [1, 1]], [[1, 1], [1, 1]], + [[0, 0], [0, 0]]]]) + + self.assertAllEqual(detection_boxes.shape, [2, 5, 4]) + self.assertAllClose(detection_scores, + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]]) + self.assertAllClose(detection_classes, + [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]]) + self.assertAllClose(num_detections, [5, 4]) + self.assertAllClose(detection_masks, + exp_detection_masks) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..d5d454de9f964933ef2f902e3687b1b6d8cc0500 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/faster_rcnn_meta_arch_test_lib.py @@ -0,0 +1,2182 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.""" +import functools +from absl.testing import parameterized + +import numpy as np +import six +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.anchor_generators import grid_anchor_generator +from object_detection.anchor_generators import multiscale_grid_anchor_generator +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.builders import post_processing_builder +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import target_assigner +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.protos import box_predictor_pb2 +from object_detection.protos import hyperparams_pb2 +from object_detection.protos import post_processing_pb2 +from object_detection.utils import spatial_transform_ops as spatial_ops +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +BOX_CODE_SIZE = 4 + + +class FakeFasterRCNNFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + reuse_weights=None, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_proposal_features(self, preprocessed_inputs, scope): + with tf.variable_scope('mock_model'): + proposal_features = 0 * slim.conv2d( + preprocessed_inputs, num_outputs=3, kernel_size=1, scope='layer1') + return proposal_features, {} + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + with tf.variable_scope('mock_model'): + return 0 * slim.conv2d( + proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer2') + + +class FakeFasterRCNNMultiLevelFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNMultiLevelFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + reuse_weights=None, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_proposal_features(self, preprocessed_inputs, scope): + with tf.variable_scope('mock_model'): + proposal_features_1 = 0 * slim.conv2d( + preprocessed_inputs, num_outputs=3, kernel_size=3, scope='layer1', + padding='VALID') + proposal_features_2 = 0 * slim.conv2d( + proposal_features_1, num_outputs=3, kernel_size=3, scope='layer2', + padding='VALID') + return [proposal_features_1, proposal_features_2], {} + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + with tf.variable_scope('mock_model'): + return 0 * slim.conv2d( + proposal_feature_maps, num_outputs=3, kernel_size=1, scope='layer3') + + +class FakeFasterRCNNKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNKerasFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def get_proposal_feature_extractor_model(self, name): + + class ProposalFeatureExtractor(tf.keras.Model): + """Dummy proposal feature extraction.""" + + def __init__(self, name): + super(ProposalFeatureExtractor, self).__init__(name=name) + self.conv = None + + def build(self, input_shape): + self.conv = tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name='layer1') + + def call(self, inputs): + return self.conv(inputs) + + return ProposalFeatureExtractor(name=name) + + def get_box_classifier_feature_extractor_model(self, name): + return tf.keras.Sequential([tf.keras.layers.Conv2D( + 3, kernel_size=1, padding='SAME', name=name + '_layer2')]) + + +class FakeFasterRCNNKerasMultilevelFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Fake feature extractor to use in tests.""" + + def __init__(self): + super(FakeFasterRCNNKerasMultilevelFeatureExtractor, self).__init__( + is_training=False, + first_stage_features_stride=32, + weight_decay=0.0) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def get_proposal_feature_extractor_model(self, name): + + class ProposalFeatureExtractor(tf.keras.Model): + """Dummy proposal feature extraction.""" + + def __init__(self, name): + super(ProposalFeatureExtractor, self).__init__(name=name) + self.conv = None + + def build(self, input_shape): + self.conv = tf.keras.layers.Conv2D( + 3, kernel_size=3, name='layer1') + self.conv_1 = tf.keras.layers.Conv2D( + 3, kernel_size=3, name='layer1') + + def call(self, inputs): + output_1 = self.conv(inputs) + output_2 = self.conv_1(output_1) + return [output_1, output_2] + + return ProposalFeatureExtractor(name=name) + + +class FasterRCNNMetaArchTestBase(test_case.TestCase, parameterized.TestCase): + """Base class to test Faster R-CNN and R-FCN meta architectures.""" + + def _build_arg_scope_with_hyperparams(self, + hyperparams_text_proto, + is_training): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.build(hyperparams, is_training=is_training) + + def _build_keras_layer_hyperparams(self, hyperparams_text_proto): + hyperparams = hyperparams_pb2.Hyperparams() + text_format.Merge(hyperparams_text_proto, hyperparams) + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def _get_second_stage_box_predictor_text_proto( + self, share_box_across_classes=False): + share_box_field = 'true' if share_box_across_classes else 'false' + box_predictor_text_proto = """ + mask_rcnn_box_predictor {{ + fc_hyperparams {{ + op: FC + activation: NONE + regularizer {{ + l2_regularizer {{ + weight: 0.0005 + }} + }} + initializer {{ + variance_scaling_initializer {{ + factor: 1.0 + uniform: true + mode: FAN_AVG + }} + }} + }} + share_box_across_classes: {share_box_across_classes} + }} + """.format(share_box_across_classes=share_box_field) + return box_predictor_text_proto + + def _add_mask_to_second_stage_box_predictor_text_proto( + self, masks_are_class_agnostic=False): + agnostic = 'true' if masks_are_class_agnostic else 'false' + box_predictor_text_proto = """ + mask_rcnn_box_predictor { + predict_instance_masks: true + masks_are_class_agnostic: """ + agnostic + """ + mask_height: 14 + mask_width: 14 + conv_hyperparams { + op: CONV + regularizer { + l2_regularizer { + weight: 0.0 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + } + } + } + } + """ + return box_predictor_text_proto + + def _get_second_stage_box_predictor(self, num_classes, is_training, + predict_masks, masks_are_class_agnostic, + share_box_across_classes=False, + use_keras=False): + box_predictor_proto = box_predictor_pb2.BoxPredictor() + text_format.Merge(self._get_second_stage_box_predictor_text_proto( + share_box_across_classes), box_predictor_proto) + if predict_masks: + text_format.Merge( + self._add_mask_to_second_stage_box_predictor_text_proto( + masks_are_class_agnostic), + box_predictor_proto) + + if use_keras: + return box_predictor_builder.build_keras( + hyperparams_builder.KerasLayerHyperparams, + inplace_batchnorm_update=False, + freeze_batchnorm=False, + box_predictor_config=box_predictor_proto, + num_classes=num_classes, + num_predictions_per_location_list=None, + is_training=is_training) + else: + return box_predictor_builder.build( + hyperparams_builder.build, + box_predictor_proto, + num_classes=num_classes, + is_training=is_training) + + def _get_model(self, box_predictor, keras_model=False, **common_kwargs): + return faster_rcnn_meta_arch.FasterRCNNMetaArch( + initial_crop_size=3, + maxpool_kernel_size=1, + maxpool_stride=1, + second_stage_mask_rcnn_box_predictor=box_predictor, + **common_kwargs) + + def _build_model(self, + is_training, + number_of_stages, + second_stage_batch_size, + first_stage_max_proposals=8, + num_classes=2, + hard_mining=False, + softmax_second_stage_classification_loss=True, + predict_masks=False, + pad_to_max_dimension=None, + masks_are_class_agnostic=False, + use_matmul_crop_and_resize=False, + clip_anchors_to_image=False, + use_matmul_gather_in_matcher=False, + use_static_shapes=False, + calibration_mapping_value=None, + share_box_across_classes=False, + return_raw_detections_during_predict=False, + output_final_box_features=False, + multi_level=False): + use_keras = tf_version.is_tf2() + def image_resizer_fn(image, masks=None): + """Fake image resizer function.""" + resized_inputs = [] + resized_image = tf.identity(image) + if pad_to_max_dimension is not None: + resized_image = tf.image.pad_to_bounding_box(image, 0, 0, + pad_to_max_dimension, + pad_to_max_dimension) + resized_inputs.append(resized_image) + if masks is not None: + resized_masks = tf.identity(masks) + if pad_to_max_dimension is not None: + resized_masks = tf.image.pad_to_bounding_box(tf.transpose(masks, + [1, 2, 0]), + 0, 0, + pad_to_max_dimension, + pad_to_max_dimension) + resized_masks = tf.transpose(resized_masks, [2, 0, 1]) + resized_inputs.append(resized_masks) + resized_inputs.append(tf.shape(image)) + return resized_inputs + + # anchors in this test are designed so that a subset of anchors are inside + # the image and a subset of anchors are outside. + first_stage_anchor_generator = None + if multi_level: + min_level = 0 + max_level = 1 + anchor_scale = 0.1 + aspect_ratios = [1.0, 2.0, 0.5] + scales_per_octave = 2 + normalize_coordinates = False + (first_stage_anchor_generator + ) = multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( + min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave, + normalize_coordinates) + else: + first_stage_anchor_scales = (0.001, 0.005, 0.1) + first_stage_anchor_aspect_ratios = (0.5, 1.0, 2.0) + first_stage_anchor_strides = (1, 1) + first_stage_anchor_generator = grid_anchor_generator.GridAnchorGenerator( + first_stage_anchor_scales, + first_stage_anchor_aspect_ratios, + anchor_stride=first_stage_anchor_strides) + first_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', + 'proposal', + use_matmul_gather=use_matmul_gather_in_matcher) + + if use_keras: + if multi_level: + fake_feature_extractor = FakeFasterRCNNKerasMultilevelFeatureExtractor() + else: + fake_feature_extractor = FakeFasterRCNNKerasFeatureExtractor() + else: + if multi_level: + fake_feature_extractor = FakeFasterRCNNMultiLevelFeatureExtractor() + else: + fake_feature_extractor = FakeFasterRCNNFeatureExtractor() + + first_stage_box_predictor_hyperparams_text_proto = """ + op: CONV + activation: RELU + regularizer { + l2_regularizer { + weight: 0.00004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + } + } + """ + if use_keras: + first_stage_box_predictor_arg_scope_fn = ( + self._build_keras_layer_hyperparams( + first_stage_box_predictor_hyperparams_text_proto)) + else: + first_stage_box_predictor_arg_scope_fn = ( + self._build_arg_scope_with_hyperparams( + first_stage_box_predictor_hyperparams_text_proto, is_training)) + + first_stage_box_predictor_kernel_size = 3 + first_stage_atrous_rate = 1 + first_stage_box_predictor_depth = 512 + first_stage_minibatch_size = 3 + first_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=0.5, is_static=use_static_shapes) + + first_stage_nms_score_threshold = -1.0 + first_stage_nms_iou_threshold = 1.0 + first_stage_max_proposals = first_stage_max_proposals + first_stage_non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=first_stage_nms_score_threshold, + iou_thresh=first_stage_nms_iou_threshold, + max_size_per_class=first_stage_max_proposals, + max_total_size=first_stage_max_proposals, + use_static_shapes=use_static_shapes) + + first_stage_localization_loss_weight = 1.0 + first_stage_objectness_loss_weight = 1.0 + + post_processing_config = post_processing_pb2.PostProcessing() + post_processing_text_proto = """ + score_converter: IDENTITY + batch_non_max_suppression { + score_threshold: -20.0 + iou_threshold: 1.0 + max_detections_per_class: 5 + max_total_detections: 5 + use_static_shapes: """ +'{}'.format(use_static_shapes) + """ + } + """ + if calibration_mapping_value: + calibration_text_proto = """ + calibration_config { + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: %f + } + x_y_pair { + x: 1.0 + y: %f + }}}}""" % (calibration_mapping_value, calibration_mapping_value) + post_processing_text_proto = (post_processing_text_proto + + ' ' + calibration_text_proto) + text_format.Merge(post_processing_text_proto, post_processing_config) + second_stage_non_max_suppression_fn, second_stage_score_conversion_fn = ( + post_processing_builder.build(post_processing_config)) + + second_stage_target_assigner = target_assigner.create_target_assigner( + 'FasterRCNN', 'detection', + use_matmul_gather=use_matmul_gather_in_matcher) + second_stage_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=1.0, is_static=use_static_shapes) + + second_stage_localization_loss_weight = 1.0 + second_stage_classification_loss_weight = 1.0 + if softmax_second_stage_classification_loss: + second_stage_classification_loss = ( + losses.WeightedSoftmaxClassificationLoss()) + else: + second_stage_classification_loss = ( + losses.WeightedSigmoidClassificationLoss()) + + hard_example_miner = None + if hard_mining: + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=1, + iou_threshold=0.99, + loss_type='both', + cls_loss_weight=second_stage_classification_loss_weight, + loc_loss_weight=second_stage_localization_loss_weight, + max_negatives_per_positive=None) + + crop_and_resize_fn = ( + spatial_ops.multilevel_matmul_crop_and_resize + if use_matmul_crop_and_resize + else spatial_ops.multilevel_native_crop_and_resize) + common_kwargs = { + 'is_training': + is_training, + 'num_classes': + num_classes, + 'image_resizer_fn': + image_resizer_fn, + 'feature_extractor': + fake_feature_extractor, + 'number_of_stages': + number_of_stages, + 'first_stage_anchor_generator': + first_stage_anchor_generator, + 'first_stage_target_assigner': + first_stage_target_assigner, + 'first_stage_atrous_rate': + first_stage_atrous_rate, + 'first_stage_box_predictor_arg_scope_fn': + first_stage_box_predictor_arg_scope_fn, + 'first_stage_box_predictor_kernel_size': + first_stage_box_predictor_kernel_size, + 'first_stage_box_predictor_depth': + first_stage_box_predictor_depth, + 'first_stage_minibatch_size': + first_stage_minibatch_size, + 'first_stage_sampler': + first_stage_sampler, + 'first_stage_non_max_suppression_fn': + first_stage_non_max_suppression_fn, + 'first_stage_max_proposals': + first_stage_max_proposals, + 'first_stage_localization_loss_weight': + first_stage_localization_loss_weight, + 'first_stage_objectness_loss_weight': + first_stage_objectness_loss_weight, + 'second_stage_target_assigner': + second_stage_target_assigner, + 'second_stage_batch_size': + second_stage_batch_size, + 'second_stage_sampler': + second_stage_sampler, + 'second_stage_non_max_suppression_fn': + second_stage_non_max_suppression_fn, + 'second_stage_score_conversion_fn': + second_stage_score_conversion_fn, + 'second_stage_localization_loss_weight': + second_stage_localization_loss_weight, + 'second_stage_classification_loss_weight': + second_stage_classification_loss_weight, + 'second_stage_classification_loss': + second_stage_classification_loss, + 'hard_example_miner': + hard_example_miner, + 'crop_and_resize_fn': + crop_and_resize_fn, + 'clip_anchors_to_image': + clip_anchors_to_image, + 'use_static_shapes': + use_static_shapes, + 'resize_masks': + True, + 'return_raw_detections_during_predict': + return_raw_detections_during_predict, + 'output_final_box_features': + output_final_box_features + } + + return self._get_model( + self._get_second_stage_box_predictor( + num_classes=num_classes, + is_training=is_training, + use_keras=use_keras, + predict_masks=predict_masks, + masks_are_class_agnostic=masks_are_class_agnostic, + share_box_across_classes=share_box_across_classes), **common_kwargs) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_gives_correct_shapes_in_inference_mode_first_stage_only( + self, use_static_shapes=False): + batch_size = 2 + height = 10 + width = 12 + input_image_shape = (batch_size, height, width, 3) + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=1, + second_stage_batch_size=2, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + def graph_fn(images): + """Function to construct tf graph for the test.""" + + preprocessed_inputs, true_image_shapes = model.preprocess(images) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (prediction_dict['rpn_box_predictor_features'][0], + prediction_dict['rpn_features_to_crop'][0], + prediction_dict['image_shape'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors']) + + images = np.zeros(input_image_shape, dtype=np.float32) + + # In inference mode, anchors are clipped to the image window, but not + # pruned. Since MockFasterRCNN.extract_proposal_features returns a + # tensor with the same shape as its input, the expected number of anchors + # is height * width * the number of anchors per location (i.e. 3x3). + expected_num_anchors = height * width * 3 * 3 + expected_output_shapes = { + 'rpn_box_predictor_features': (batch_size, height, width, 512), + 'rpn_features_to_crop': (batch_size, height, width, 3), + 'rpn_box_encodings': (batch_size, expected_num_anchors, 4), + 'rpn_objectness_predictions_with_background': + (batch_size, expected_num_anchors, 2), + 'anchors': (expected_num_anchors, 4) + } + + if use_static_shapes: + results = self.execute(graph_fn, [images], graph=g) + else: + results = self.execute_cpu(graph_fn, [images], graph=g) + + self.assertAllEqual(results[0].shape, + expected_output_shapes['rpn_box_predictor_features']) + self.assertAllEqual(results[1].shape, + expected_output_shapes['rpn_features_to_crop']) + self.assertAllEqual(results[2], + input_image_shape) + self.assertAllEqual(results[3].shape, + expected_output_shapes['rpn_box_encodings']) + self.assertAllEqual( + results[4].shape, + expected_output_shapes['rpn_objectness_predictions_with_background']) + self.assertAllEqual(results[5].shape, + expected_output_shapes['anchors']) + + # Check that anchors are clipped to window. + anchors = results[5] + self.assertTrue(np.all(np.greater_equal(anchors, 0))) + self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) + self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_shape_in_inference_mode_first_stage_only_multi_level( + self, use_static_shapes): + batch_size = 2 + height = 50 + width = 52 + input_image_shape = (batch_size, height, width, 3) + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=1, + second_stage_batch_size=2, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes, + multi_level=True) + def graph_fn(images): + """Function to construct tf graph for the test.""" + + preprocessed_inputs, true_image_shapes = model.preprocess(images) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (prediction_dict['rpn_box_predictor_features'][0], + prediction_dict['rpn_box_predictor_features'][1], + prediction_dict['rpn_features_to_crop'][0], + prediction_dict['rpn_features_to_crop'][1], + prediction_dict['image_shape'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors']) + + images = np.zeros(input_image_shape, dtype=np.float32) + + # In inference mode, anchors are clipped to the image window, but not + # pruned. Since MockFasterRCNN.extract_proposal_features returns a + # tensor with the same shape as its input, the expected number of anchors + # is height * width * the number of anchors per location (i.e. 3x3). + expected_num_anchors = ((height-2) * (width-2) + (height-4) * (width-4)) * 6 + expected_output_shapes = { + 'rpn_box_predictor_features_0': (batch_size, height-2, width-2, 512), + 'rpn_box_predictor_features_1': (batch_size, height-4, width-4, 512), + 'rpn_features_to_crop_0': (batch_size, height-2, width-2, 3), + 'rpn_features_to_crop_1': (batch_size, height-4, width-4, 3), + 'rpn_box_encodings': (batch_size, expected_num_anchors, 4), + 'rpn_objectness_predictions_with_background': + (batch_size, expected_num_anchors, 2), + } + + if use_static_shapes: + expected_output_shapes['anchors'] = (expected_num_anchors, 4) + else: + expected_output_shapes['anchors'] = (18300, 4) + + if use_static_shapes: + results = self.execute(graph_fn, [images], graph=g) + else: + results = self.execute_cpu(graph_fn, [images], graph=g) + + self.assertAllEqual(results[0].shape, + expected_output_shapes['rpn_box_predictor_features_0']) + self.assertAllEqual(results[1].shape, + expected_output_shapes['rpn_box_predictor_features_1']) + self.assertAllEqual(results[2].shape, + expected_output_shapes['rpn_features_to_crop_0']) + self.assertAllEqual(results[3].shape, + expected_output_shapes['rpn_features_to_crop_1']) + self.assertAllEqual(results[4], + input_image_shape) + self.assertAllEqual(results[5].shape, + expected_output_shapes['rpn_box_encodings']) + self.assertAllEqual( + results[6].shape, + expected_output_shapes['rpn_objectness_predictions_with_background']) + self.assertAllEqual(results[7].shape, + expected_output_shapes['anchors']) + + # Check that anchors are clipped to window. + anchors = results[5] + self.assertTrue(np.all(np.greater_equal(anchors, 0))) + self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) + self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) + + def test_regularization_losses(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, number_of_stages=1, second_stage_batch_size=2) + def graph_fn(): + batch_size = 2 + height = 10 + width = 12 + input_image_shape = (batch_size, height, width, 3) + image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape)) + model.predict(image, true_image_shapes) + + reg_losses = tf.math.add_n(model.regularization_losses()) + return reg_losses + reg_losses = self.execute(graph_fn, [], graph=g) + self.assertGreaterEqual(reg_losses, 0) + + def test_predict_gives_valid_anchors_in_training_mode_first_stage_only(self): + expected_output_keys = set([ + 'rpn_box_predictor_features', 'rpn_features_to_crop', 'image_shape', + 'rpn_box_encodings', 'rpn_objectness_predictions_with_background', + 'anchors', 'feature_maps']) + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, number_of_stages=1, second_stage_batch_size=2,) + + batch_size = 2 + height = 10 + width = 12 + input_image_shape = (batch_size, height, width, 3) + def graph_fn(): + image, true_image_shapes = model.preprocess(tf.zeros(input_image_shape)) + prediction_dict = model.predict(image, true_image_shapes) + self.assertEqual(set(prediction_dict.keys()), expected_output_keys) + return (prediction_dict['image_shape'], prediction_dict['anchors'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background']) + + (image_shape, anchors, rpn_box_encodings, + rpn_objectness_predictions_with_background) = self.execute(graph_fn, [], + graph=g) + # At training time, anchors that exceed image bounds are pruned. Thus + # the `expected_num_anchors` in the above inference mode test is now + # a strict upper bound on the number of anchors. + num_anchors_strict_upper_bound = height * width * 3 * 3 + self.assertAllEqual(image_shape, input_image_shape) + self.assertTrue(len(anchors.shape) == 2 and anchors.shape[1] == 4) + num_anchors_out = anchors.shape[0] + self.assertLess(num_anchors_out, num_anchors_strict_upper_bound) + + self.assertTrue(np.all(np.greater_equal(anchors, 0))) + self.assertTrue(np.all(np.less_equal(anchors[:, 0], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 1], width))) + self.assertTrue(np.all(np.less_equal(anchors[:, 2], height))) + self.assertTrue(np.all(np.less_equal(anchors[:, 3], width))) + + self.assertAllEqual(rpn_box_encodings.shape, + (batch_size, num_anchors_out, 4)) + self.assertAllEqual( + rpn_objectness_predictions_with_background.shape, + (batch_size, num_anchors_out, 2)) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_correct_shapes_in_inference_mode_two_stages( + self, use_static_shapes): + + def compare_results(results, expected_output_shapes): + """Checks if the shape of the predictions are as expected.""" + self.assertAllEqual(results[0][0].shape, + expected_output_shapes['rpn_box_predictor_features']) + self.assertAllEqual(results[1][0].shape, + expected_output_shapes['rpn_features_to_crop']) + self.assertAllEqual(results[2].shape, + expected_output_shapes['image_shape']) + self.assertAllEqual(results[3].shape, + expected_output_shapes['rpn_box_encodings']) + self.assertAllEqual( + results[4].shape, + expected_output_shapes['rpn_objectness_predictions_with_background']) + self.assertAllEqual(results[5].shape, + expected_output_shapes['anchors']) + self.assertAllEqual(results[6].shape, + expected_output_shapes['refined_box_encodings']) + self.assertAllEqual( + results[7].shape, + expected_output_shapes['class_predictions_with_background']) + self.assertAllEqual(results[8].shape, + expected_output_shapes['num_proposals']) + self.assertAllEqual(results[9].shape, + expected_output_shapes['proposal_boxes']) + self.assertAllEqual(results[10].shape, + expected_output_shapes['proposal_boxes_normalized']) + self.assertAllEqual(results[11].shape, + expected_output_shapes['box_classifier_features']) + self.assertAllEqual(results[12].shape, + expected_output_shapes['final_anchors']) + batch_size = 2 + image_size = 10 + max_num_proposals = 8 + initial_crop_size = 3 + maxpool_stride = 1 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=2, + predict_masks=False, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + def graph_fn(): + """A function with TF compute.""" + if use_static_shapes: + images = tf.random_uniform((batch_size, image_size, image_size, 3)) + else: + images = tf.random_uniform((tf.random_uniform([], + minval=batch_size, + maxval=batch_size + 1, + dtype=tf.int32), + tf.random_uniform([], + minval=image_size, + maxval=image_size + 1, + dtype=tf.int32), + tf.random_uniform([], + minval=image_size, + maxval=image_size + 1, + dtype=tf.int32), 3)) + preprocessed_inputs, true_image_shapes = model.preprocess(images) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (prediction_dict['rpn_box_predictor_features'], + prediction_dict['rpn_features_to_crop'], + prediction_dict['image_shape'], + prediction_dict['rpn_box_encodings'], + prediction_dict['rpn_objectness_predictions_with_background'], + prediction_dict['anchors'], + prediction_dict['refined_box_encodings'], + prediction_dict['class_predictions_with_background'], + prediction_dict['num_proposals'], + prediction_dict['proposal_boxes'], + prediction_dict['proposal_boxes_normalized'], + prediction_dict['box_classifier_features'], + prediction_dict['final_anchors']) + expected_num_anchors = image_size * image_size * 3 * 3 + expected_shapes = { + 'rpn_box_predictor_features': + (2, image_size, image_size, 512), + 'rpn_features_to_crop': (2, image_size, image_size, 3), + 'image_shape': (4,), + 'rpn_box_encodings': (2, expected_num_anchors, 4), + 'rpn_objectness_predictions_with_background': + (2, expected_num_anchors, 2), + 'anchors': (expected_num_anchors, 4), + 'refined_box_encodings': (2 * max_num_proposals, 2, 4), + 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1), + 'num_proposals': (2,), + 'proposal_boxes': (2, max_num_proposals, 4), + 'proposal_boxes_normalized': (2, max_num_proposals, 4), + 'box_classifier_features': + self._get_box_classifier_features_shape(image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + 3), + 'feature_maps': [(2, image_size, image_size, 512)], + 'final_anchors': (2, max_num_proposals, 4) + } + + if use_static_shapes: + results = self.execute(graph_fn, [], graph=g) + else: + results = self.execute_cpu(graph_fn, [], graph=g) + compare_results(results, expected_shapes) + + @parameterized.parameters( + {'use_static_shapes': False}, + {'use_static_shapes': True}, + ) + def test_predict_gives_correct_shapes_in_train_mode_both_stages( + self, + use_static_shapes=False): + batch_size = 2 + image_size = 10 + max_num_proposals = 7 + initial_crop_size = 3 + maxpool_stride = 1 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, + second_stage_batch_size=7, + predict_masks=False, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + + def graph_fn(images, gt_boxes, gt_classes, gt_weights): + """Function to construct tf graph for the test.""" + preprocessed_inputs, true_image_shapes = model.preprocess(images) + model.provide_groundtruth( + groundtruth_boxes_list=tf.unstack(gt_boxes), + groundtruth_classes_list=tf.unstack(gt_classes), + groundtruth_weights_list=tf.unstack(gt_weights)) + result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes) + return (result_tensor_dict['refined_box_encodings'], + result_tensor_dict['class_predictions_with_background'], + result_tensor_dict['proposal_boxes'], + result_tensor_dict['proposal_boxes_normalized'], + result_tensor_dict['anchors'], + result_tensor_dict['rpn_box_encodings'], + result_tensor_dict['rpn_objectness_predictions_with_background'], + result_tensor_dict['rpn_features_to_crop'][0], + result_tensor_dict['rpn_box_predictor_features'][0], + result_tensor_dict['final_anchors'], + ) + + image_shape = (batch_size, image_size, image_size, 3) + images = np.zeros(image_shape, dtype=np.float32) + gt_boxes = np.stack([ + np.array([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=np.float32), + np.array([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=np.float32) + ]) + gt_classes = np.stack([ + np.array([[1, 0], [0, 1]], dtype=np.float32), + np.array([[1, 0], [1, 0]], dtype=np.float32) + ]) + gt_weights = np.stack([ + np.array([1, 1], dtype=np.float32), + np.array([1, 1], dtype=np.float32) + ]) + if use_static_shapes: + results = self.execute(graph_fn, + [images, gt_boxes, gt_classes, gt_weights], + graph=g) + else: + results = self.execute_cpu(graph_fn, + [images, gt_boxes, gt_classes, gt_weights], + graph=g) + + expected_shapes = { + 'rpn_box_predictor_features': (2, image_size, image_size, 512), + 'rpn_features_to_crop': (2, image_size, image_size, 3), + 'refined_box_encodings': (2 * max_num_proposals, 2, 4), + 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1), + 'proposal_boxes': (2, max_num_proposals, 4), + 'rpn_box_encodings': (2, image_size * image_size * 9, 4), + 'proposal_boxes_normalized': (2, max_num_proposals, 4), + 'box_classifier_features': + self._get_box_classifier_features_shape( + image_size, batch_size, max_num_proposals, initial_crop_size, + maxpool_stride, 3), + 'rpn_objectness_predictions_with_background': + (2, image_size * image_size * 9, 2), + 'final_anchors': (2, max_num_proposals, 4) + } + # TODO(rathodv): Possibly change utils/test_case.py to accept dictionaries + # and return dicionaries so don't have to rely on the order of tensors. + self.assertAllEqual(results[0].shape, + expected_shapes['refined_box_encodings']) + self.assertAllEqual(results[1].shape, + expected_shapes['class_predictions_with_background']) + self.assertAllEqual(results[2].shape, expected_shapes['proposal_boxes']) + self.assertAllEqual(results[3].shape, + expected_shapes['proposal_boxes_normalized']) + anchors_shape = results[4].shape + self.assertAllEqual(results[5].shape, + [batch_size, anchors_shape[0], 4]) + self.assertAllEqual(results[6].shape, + [batch_size, anchors_shape[0], 2]) + self.assertAllEqual(results[7].shape, + expected_shapes['rpn_features_to_crop']) + self.assertAllEqual(results[8].shape, + expected_shapes['rpn_box_predictor_features']) + self.assertAllEqual(results[9].shape, + expected_shapes['final_anchors']) + + @parameterized.parameters( + {'use_static_shapes': False, 'pad_to_max_dimension': None}, + {'use_static_shapes': True, 'pad_to_max_dimension': None}, + {'use_static_shapes': False, 'pad_to_max_dimension': 56,}, + {'use_static_shapes': True, 'pad_to_max_dimension': 56}, + ) + def test_postprocess_first_stage_only_inference_mode( + self, use_static_shapes=False, + pad_to_max_dimension=None): + batch_size = 2 + first_stage_max_proposals = 4 if use_static_shapes else 8 + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=1, second_stage_batch_size=6, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes, + use_matmul_gather_in_matcher=use_static_shapes, + first_stage_max_proposals=first_stage_max_proposals, + pad_to_max_dimension=pad_to_max_dimension) + + def graph_fn(images, + rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, + anchors): + """Function to construct tf graph for the test.""" + preprocessed_images, true_image_shapes = model.preprocess(images) + proposals = model.postprocess({ + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'rpn_features_to_crop': rpn_features_to_crop, + 'image_shape': tf.shape(preprocessed_images), + 'anchors': anchors}, true_image_shapes) + return (proposals['num_detections'], proposals['detection_boxes'], + proposals['detection_scores'], proposals['raw_detection_boxes'], + proposals['raw_detection_scores']) + + anchors = np.array( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=np.float32) + rpn_box_encodings = np.zeros( + (batch_size, anchors.shape[0], BOX_CODE_SIZE), dtype=np.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = np.array([ + [[-10, 13], + [10, -10], + [10, -11], + [-10, 12]], + [[10, -10], + [-10, 13], + [-10, 12], + [10, -11]]], dtype=np.float32) + rpn_features_to_crop = np.ones((batch_size, 8, 8, 10), dtype=np.float32) + image_shape = (batch_size, 32, 32, 3) + images = np.zeros(image_shape, dtype=np.float32) + + if use_static_shapes: + results = self.execute(graph_fn, + [images, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, anchors], graph=g) + else: + results = self.execute_cpu(graph_fn, + [images, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features_to_crop, anchors], graph=g) + + expected_proposal_boxes = [ + [[0, 0, .5, .5], [.5, .5, 1, 1], [0, .5, .5, 1], [.5, 0, 1.0, .5]] + + 4 * [4 * [0]], + [[0, .5, .5, 1], [.5, 0, 1.0, .5], [0, 0, .5, .5], [.5, .5, 1, 1]] + + 4 * [4 * [0]]] + expected_proposal_scores = [[1, 1, 0, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0]] + expected_num_proposals = [4, 4] + expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]], + [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]] + expected_raw_scores = [[[0., 1.], [1., 0.], [1., 0.], [0., 1.]], + [[1., 0.], [0., 1.], [0., 1.], [1., 0.]]] + + if pad_to_max_dimension is not None: + expected_raw_proposal_boxes = (np.array(expected_raw_proposal_boxes) * + 32 / pad_to_max_dimension) + expected_proposal_boxes = (np.array(expected_proposal_boxes) * + 32 / pad_to_max_dimension) + + self.assertAllClose(results[0], expected_num_proposals) + for indx, num_proposals in enumerate(expected_num_proposals): + self.assertAllClose(results[1][indx][0:num_proposals], + expected_proposal_boxes[indx][0:num_proposals]) + self.assertAllClose(results[2][indx][0:num_proposals], + expected_proposal_scores[indx][0:num_proposals]) + self.assertAllClose(results[3], expected_raw_proposal_boxes) + self.assertAllClose(results[4], expected_raw_scores) + + def _test_postprocess_first_stage_only_train_mode(self, + pad_to_max_dimension=None): + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=1, second_stage_batch_size=2, + pad_to_max_dimension=pad_to_max_dimension) + batch_size = 2 + + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [-10, 12], + [-10, 11], + [-10, 10]], + [[-10, 13], + [-10, 12], + [-10, 11], + [-10, 10]]], dtype=tf.float32) + rpn_features_to_crop = tf.ones((batch_size, 8, 8, 10), dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], + dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], + dtype=tf.float32)] + groundtruth_weights_list = [ + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 1], dtype=tf.float32) + ] + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_weights_list=groundtruth_weights_list) + proposals = model.postprocess({ + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'rpn_features_to_crop': rpn_features_to_crop, + 'anchors': anchors, + 'image_shape': image_shape}, true_image_shapes) + return (proposals['detection_boxes'], proposals['detection_scores'], + proposals['num_detections'], + proposals['detection_multiclass_scores'], + proposals['raw_detection_boxes'], + proposals['raw_detection_scores']) + + expected_proposal_boxes = [ + [[0, 0, .5, .5], [.5, .5, 1, 1]], [[0, .5, .5, 1], [.5, 0, 1, .5]]] + expected_proposal_scores = [[1, 1], + [1, 1]] + expected_proposal_multiclass_scores = [[[0., 1.], [0., 1.]], + [[0., 1.], [0., 1.]]] + expected_raw_proposal_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]], + [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [0.5, 0.5, 1., 1.]]] + expected_raw_scores = [[[0., 1.], [0., 1.], [0., 1.], [0., 1.]], + [[0., 1.], [0., 1.], [0., 1.], [0., 1.]]] + + (proposal_boxes, proposal_scores, batch_num_detections, + batch_multiclass_scores, raw_detection_boxes, + raw_detection_scores) = self.execute_cpu(graph_fn, [], graph=g) + for image_idx in range(batch_size): + num_detections = int(batch_num_detections[image_idx]) + boxes = proposal_boxes[image_idx][:num_detections, :].tolist() + scores = proposal_scores[image_idx][:num_detections].tolist() + multiclass_scores = batch_multiclass_scores[ + image_idx][:num_detections, :].tolist() + expected_boxes = expected_proposal_boxes[image_idx] + expected_scores = expected_proposal_scores[image_idx] + expected_multiclass_scores = expected_proposal_multiclass_scores[ + image_idx] + self.assertTrue( + test_utils.first_rows_close_as_set(boxes, expected_boxes)) + self.assertTrue( + test_utils.first_rows_close_as_set(scores, expected_scores)) + self.assertTrue( + test_utils.first_rows_close_as_set(multiclass_scores, + expected_multiclass_scores)) + + self.assertAllClose(raw_detection_boxes, expected_raw_proposal_boxes) + self.assertAllClose(raw_detection_scores, expected_raw_scores) + + @parameterized.parameters( + {'pad_to_max_dimension': 56}, + {'pad_to_max_dimension': None} + ) + def test_postprocess_first_stage_only_train_mode_padded_image( + self, pad_to_max_dimension): + self._test_postprocess_first_stage_only_train_mode(pad_to_max_dimension) + + @parameterized.parameters( + {'use_static_shapes': False, 'pad_to_max_dimension': None}, + {'use_static_shapes': True, 'pad_to_max_dimension': None}, + {'use_static_shapes': False, 'pad_to_max_dimension': 56}, + {'use_static_shapes': True, 'pad_to_max_dimension': 56}, + ) + def test_postprocess_second_stage_only_inference_mode( + self, use_static_shapes=False, + pad_to_max_dimension=None): + batch_size = 2 + num_classes = 2 + image_shape = np.array((2, 36, 48, 3), dtype=np.int32) + first_stage_max_proposals = 8 + total_num_padded_proposals = batch_size * first_stage_max_proposals + + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes, + use_matmul_gather_in_matcher=use_static_shapes, + pad_to_max_dimension=pad_to_max_dimension) + def graph_fn(images, + refined_box_encodings, + class_predictions_with_background, + num_proposals, + proposal_boxes): + """Function to construct tf graph for the test.""" + _, true_image_shapes = model.preprocess(images) + detections = model.postprocess({ + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': proposal_boxes, + }, true_image_shapes) + return (detections['num_detections'], detections['detection_boxes'], + detections['detection_scores'], detections['detection_classes'], + detections['raw_detection_boxes'], + detections['raw_detection_scores'], + detections['detection_multiclass_scores'], + detections['detection_anchor_indices']) + + proposal_boxes = np.array( + [[[1, 1, 2, 3], + [0, 0, 1, 1], + [.5, .5, .6, .6], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]], + [[2, 3, 6, 8], + [1, 2, 5, 3], + 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=np.float32) + + num_proposals = np.array([3, 2], dtype=np.int32) + refined_box_encodings = np.zeros( + [total_num_padded_proposals, num_classes, 4], dtype=np.float32) + class_predictions_with_background = np.ones( + [total_num_padded_proposals, num_classes+1], dtype=np.float32) + images = np.zeros(image_shape, dtype=np.float32) + + if use_static_shapes: + results = self.execute(graph_fn, + [images, refined_box_encodings, + class_predictions_with_background, + num_proposals, proposal_boxes], graph=g) + else: + results = self.execute_cpu(graph_fn, + [images, refined_box_encodings, + class_predictions_with_background, + num_proposals, proposal_boxes], graph=g) + # Note that max_total_detections=5 in the NMS config. + expected_num_detections = [5, 4] + expected_detection_classes = [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]] + expected_detection_scores = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]] + expected_multiclass_scores = [[[1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1]], + [[1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [1, 1, 1], + [0, 0, 0]]] + # Note that a single anchor can be used for multiple detections (predictions + # are made independently per class). + expected_anchor_indices = [[0, 1, 2, 0, 1], + [0, 1, 0, 1]] + + h = float(image_shape[1]) + w = float(image_shape[2]) + expected_raw_detection_boxes = np.array( + [[[1 / h, 1 / w, 2 / h, 3 / w], [0, 0, 1 / h, 1 / w], + [.5 / h, .5 / w, .6 / h, .6 / w], 4 * [0], 4 * [0], 4 * [0], 4 * [0], + 4 * [0]], + [[2 / h, 3 / w, 6 / h, 8 / w], [1 / h, 2 / w, 5 / h, 3 / w], 4 * [0], + 4 * [0], 4 * [0], 4 * [0], 4 * [0], 4 * [0]]], + dtype=np.float32) + + self.assertAllClose(results[0], expected_num_detections) + + for indx, num_proposals in enumerate(expected_num_detections): + self.assertAllClose(results[2][indx][0:num_proposals], + expected_detection_scores[indx][0:num_proposals]) + self.assertAllClose(results[3][indx][0:num_proposals], + expected_detection_classes[indx][0:num_proposals]) + self.assertAllClose(results[6][indx][0:num_proposals], + expected_multiclass_scores[indx][0:num_proposals]) + self.assertAllClose(results[7][indx][0:num_proposals], + expected_anchor_indices[indx][0:num_proposals]) + + self.assertAllClose(results[4], expected_raw_detection_boxes) + self.assertAllClose(results[5], + class_predictions_with_background.reshape([-1, 8, 3])) + if not use_static_shapes: + self.assertAllEqual(results[1].shape, [2, 5, 4]) + + def test_preprocess_preserves_dynamic_input_shapes(self): + width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, 5, width, 3]) + image = tf.random.uniform(shape) + model = self._build_model( + is_training=False, number_of_stages=2, second_stage_batch_size=6) + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue( + preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3])) + + def test_preprocess_preserves_static_input_shapes(self): + shape = tf.stack([2, 5, 5, 3]) + image = tf.random.uniform(shape) + model = self._build_model( + is_training=False, number_of_stages=2, second_stage_batch_size=6) + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue( + preprocessed_inputs.shape.is_compatible_with([2, 5, 5, 3])) + + # TODO(rathodv): Split test into two - with and without masks. + def test_loss_first_stage_only_mode(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=1, second_stage_batch_size=6) + batch_size = 2 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [10, -10], + [10, -11], + [-10, 12]], + [[10, -10], + [-10, 13], + [-10, 12], + [10, -11]]], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], + dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + self.assertNotIn('Loss/BoxClassifierLoss/localization_loss', + loss_dict) + self.assertNotIn('Loss/BoxClassifierLoss/classification_loss', + loss_dict) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss']) + loc_loss, obj_loss = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(loc_loss, 0) + self.assertAllClose(obj_loss, 0) + + # TODO(rathodv): Split test into two - with and without masks. + def test_loss_full(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, second_stage_batch_size=6) + batch_size = 3 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant( + [[[-10, 13], [10, -10], [10, -11], [-10, 12]], + [[10, -10], [-10, 13], [-10, 12], [10, -11]], + [[10, -10], [-10, 13], [-10, 12], [10, -11]]], + dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + num_proposals = tf.constant([6, 6, 6], dtype=tf.int32) + proposal_boxes = tf.constant( + 3 * [[[0, 0, 16, 16], [0, 16, 16, 32], [16, 0, 32, 16], + [16, 16, 32, 32], [0, 0, 16, 16], [0, 16, 16, 32]]], + dtype=tf.float32) + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [ + [-10, 10, -10], # first image + [10, -10, -10], + [10, -10, -10], + [-10, -10, 10], + [-10, 10, -10], + [10, -10, -10], + [10, -10, -10], # second image + [-10, 10, -10], + [-10, 10, -10], + [10, -10, -10], + [10, -10, -10], + [-10, 10, -10], + [10, -10, -10], # third image + [-10, 10, -10], + [-10, 10, -10], + [10, -10, -10], + [10, -10, -10], + [-10, 10, -10] + ], + dtype=tf.float32) + + mask_predictions_logits = 20 * tf.ones((batch_size * + model.max_num_proposals, + model.num_classes, + 14, 14), + dtype=tf.float32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32), + tf.constant([[0, .5, .5, 1], [.5, 0, 1, 1]], dtype=tf.float32) + ] + groundtruth_classes_list = [ + tf.constant([[1, 0], [0, 1]], dtype=tf.float32), + tf.constant([[1, 0], [1, 0]], dtype=tf.float32), + tf.constant([[1, 0], [0, 1]], dtype=tf.float32) + ] + + # Set all elements of groundtruth mask to 1.0. In this case all proposal + # crops of the groundtruth masks should return a mask that covers the + # entire proposal. Thus, if mask_predictions_logits element values are all + # greater than 20, the loss should be zero. + groundtruth_masks_list = [ + tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32), + tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32), + tf.convert_to_tensor(np.ones((2, 32, 32)), dtype=tf.float32) + ] + groundtruth_weights_list = [ + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 1], dtype=tf.float32), + tf.constant([1, 0], dtype=tf.float32) + ] + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals, + 'mask_predictions': mask_predictions_logits + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth( + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list, + groundtruth_weights_list=groundtruth_weights_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss'], + loss_dict['Loss/BoxClassifierLoss/mask_loss']) + (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, + box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(rpn_loc_loss, 0) + self.assertAllClose(rpn_obj_loss, 0) + self.assertAllClose(box_loc_loss, 0) + self.assertAllClose(box_cls_loss, 0) + self.assertAllClose(box_mask_loss, 0) + + def test_loss_full_zero_padded_proposals(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, number_of_stages=2, second_stage_batch_size=6) + batch_size = 1 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [10, -10], + [10, -11], + [10, -12]],], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([3], dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-10, 10, -10], + [10, -10, -10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + mask_predictions_logits = 20 * tf.ones((batch_size * + model.max_num_proposals, + model.num_classes, + 14, 14), + dtype=tf.float32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5]], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0]], dtype=tf.float32)] + + # Set all elements of groundtruth mask to 1.0. In this case all proposal + # crops of the groundtruth masks should return a mask that covers the + # entire proposal. Thus, if mask_predictions_logits element values are all + # greater than 20, the loss should be zero. + groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)), + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals, + 'mask_predictions': mask_predictions_logits + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss'], + loss_dict['Loss/BoxClassifierLoss/mask_loss']) + (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, + box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(rpn_loc_loss, 0) + self.assertAllClose(rpn_obj_loss, 0) + self.assertAllClose(box_loc_loss, 0) + self.assertAllClose(box_cls_loss, 0) + self.assertAllClose(box_mask_loss, 0) + + def test_loss_full_multiple_label_groundtruth(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, second_stage_batch_size=6, + softmax_second_stage_classification_loss=False) + batch_size = 1 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant([ + [[-10, 13], + [10, -10], + [10, -11], + [10, -12]],], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([3], dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + # second_stage_localization_loss should only be computed for predictions + # that match groundtruth. For multiple label groundtruth boxes, the loss + # should only be computed once for the label with the smaller index. + refined_box_encodings = tf.constant( + [[[0, 0, 0, 0], [1, 1, -1, -1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]], + [[1, 1, -1, -1], [1, 1, 1, 1]]], dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-100, 100, 100], + [100, -100, -100], + [100, -100, -100], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + mask_predictions_logits = 20 * tf.ones((batch_size * + model.max_num_proposals, + model.num_classes, + 14, 14), + dtype=tf.float32) + + groundtruth_boxes_list = [ + tf.constant([[0, 0, .5, .5]], dtype=tf.float32)] + # Box contains two ground truth labels. + groundtruth_classes_list = [tf.constant([[1, 1]], dtype=tf.float32)] + + # Set all elements of groundtruth mask to 1.0. In this case all proposal + # crops of the groundtruth masks should return a mask that covers the + # entire proposal. Thus, if mask_predictions_logits element values are all + # greater than 20, the loss should be zero. + groundtruth_masks_list = [tf.convert_to_tensor(np.ones((1, 32, 32)), + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals, + 'mask_predictions': mask_predictions_logits + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_masks_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss'], + loss_dict['Loss/BoxClassifierLoss/mask_loss']) + (rpn_loc_loss, rpn_obj_loss, box_loc_loss, box_cls_loss, + box_mask_loss) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(rpn_loc_loss, 0) + self.assertAllClose(rpn_obj_loss, 0) + self.assertAllClose(box_loc_loss, 0) + self.assertAllClose(box_cls_loss, 0) + self.assertAllClose(box_mask_loss, 0) + + @parameterized.parameters( + {'use_static_shapes': False, 'shared_boxes': False}, + {'use_static_shapes': False, 'shared_boxes': True}, + {'use_static_shapes': True, 'shared_boxes': False}, + {'use_static_shapes': True, 'shared_boxes': True}, + ) + def test_loss_full_zero_padded_proposals_nonzero_loss_with_two_images( + self, use_static_shapes=False, shared_boxes=False): + batch_size = 2 + first_stage_max_proposals = 8 + second_stage_batch_size = 6 + num_classes = 2 + with test_utils.GraphContextOrNone() as g: + model = self._build_model( + is_training=True, + number_of_stages=2, + second_stage_batch_size=second_stage_batch_size, + first_stage_max_proposals=first_stage_max_proposals, + num_classes=num_classes, + use_matmul_crop_and_resize=use_static_shapes, + clip_anchors_to_image=use_static_shapes, + use_static_shapes=use_static_shapes) + + def graph_fn(anchors, rpn_box_encodings, + rpn_objectness_predictions_with_background, images, + num_proposals, proposal_boxes, refined_box_encodings, + class_predictions_with_background, groundtruth_boxes, + groundtruth_classes): + """Function to construct tf graph for the test.""" + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': tf.shape(images), + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals + } + _, true_image_shapes = model.preprocess(images) + model.provide_groundtruth(tf.unstack(groundtruth_boxes), + tf.unstack(groundtruth_classes)) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/RPNLoss/localization_loss'], + loss_dict['Loss/RPNLoss/objectness_loss'], + loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss']) + + anchors = np.array( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=np.float32) + rpn_box_encodings = np.zeros( + [batch_size, anchors.shape[1], BOX_CODE_SIZE], dtype=np.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = np.array( + [[[-10, 13], + [10, -10], + [10, -11], + [10, -12]], + [[-10, 13], + [10, -10], + [10, -11], + [10, -12]]], dtype=np.float32) + images = np.zeros([batch_size, 32, 32, 3], dtype=np.float32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer. + num_proposals = np.array([3, 2], dtype=np.int32) + proposal_boxes = np.array( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 16, 16], + [0, 16, 16, 32], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=np.float32) + + refined_box_encodings = np.zeros( + (batch_size * second_stage_batch_size, 1 + if shared_boxes else num_classes, BOX_CODE_SIZE), + dtype=np.float32) + class_predictions_with_background = np.array( + [[-10, 10, -10], # first image + [10, -10, -10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0], + [-10, -10, 10], # second image + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0], + [0, 0, 0],], dtype=np.float32) + + # The first groundtruth box is 4/5 of the anchor size in both directions + # experiencing a loss of: + # 2 * SmoothL1(5 * log(4/5)) / num_proposals + # = 2 * (abs(5 * log(1/2)) - .5) / 3 + # The second groundtruth box is identical to the prediction and thus + # experiences zero loss. + # Total average loss is (abs(5 * log(1/2)) - .5) / 3. + groundtruth_boxes = np.stack([ + np.array([[0.05, 0.05, 0.45, 0.45]], dtype=np.float32), + np.array([[0.0, 0.0, 0.5, 0.5]], dtype=np.float32)]) + groundtruth_classes = np.stack([np.array([[1, 0]], dtype=np.float32), + np.array([[0, 1]], dtype=np.float32)]) + + execute_fn = self.execute_cpu + if use_static_shapes: + execute_fn = self.execute + + results = execute_fn(graph_fn, [ + anchors, rpn_box_encodings, rpn_objectness_predictions_with_background, + images, num_proposals, proposal_boxes, refined_box_encodings, + class_predictions_with_background, groundtruth_boxes, + groundtruth_classes + ], graph=g) + + exp_loc_loss = (-5 * np.log(.8) - 0.5) / 3.0 + + self.assertAllClose(results[0], exp_loc_loss, rtol=1e-4, atol=1e-4) + self.assertAllClose(results[1], 0.0) + self.assertAllClose(results[2], exp_loc_loss, rtol=1e-4, atol=1e-4) + self.assertAllClose(results[3], 0.0) + + def test_loss_with_hard_mining(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model(is_training=True, + number_of_stages=2, + second_stage_batch_size=None, + first_stage_max_proposals=6, + hard_mining=True) + batch_size = 1 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant( + [[[-10, 13], + [-10, 12], + [10, -11], + [10, -12]]], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([3], dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-10, 10, -10], # first image + [-10, -10, 10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + # The first groundtruth box is 4/5 of the anchor size in both directions + # experiencing a loss of: + # 2 * SmoothL1(5 * log(4/5)) / num_proposals + # = 2 * (abs(5 * log(1/2)) - .5) / 3 + # The second groundtruth box is 46/50 of the anchor size in both + # directions experiencing a loss of: + # 2 * SmoothL1(5 * log(42/50)) / num_proposals + # = 2 * (.5(5 * log(.92))^2 - .5) / 3. + # Since the first groundtruth box experiences greater loss, and we have + # set num_hard_examples=1 in the HardMiner, the final localization loss + # corresponds to that of the first groundtruth box. + groundtruth_boxes_list = [ + tf.constant([[0.05, 0.05, 0.45, 0.45], + [0.02, 0.52, 0.48, 0.98],], dtype=tf.float32)] + groundtruth_classes_list = [tf.constant([[1, 0], [0, 1]], + dtype=tf.float32)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss']) + loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g) + exp_loc_loss = 2 * (-5 * np.log(.8) - 0.5) / 3.0 + self.assertAllClose(loc_loss, exp_loc_loss) + self.assertAllClose(cls_loss, 0) + + def test_loss_with_hard_mining_and_losses_mask(self): + with test_utils.GraphContextOrNone() as g: + model = self._build_model(is_training=True, + number_of_stages=2, + second_stage_batch_size=None, + first_stage_max_proposals=6, + hard_mining=True) + batch_size = 2 + number_of_proposals = 3 + def graph_fn(): + """A function with TF compute.""" + anchors = tf.constant( + [[0, 0, 16, 16], + [0, 16, 16, 32], + [16, 0, 32, 16], + [16, 16, 32, 32]], dtype=tf.float32) + rpn_box_encodings = tf.zeros( + [batch_size, + anchors.get_shape().as_list()[0], + BOX_CODE_SIZE], dtype=tf.float32) + # use different numbers for the objectness category to break ties in + # order of boxes returned by NMS + rpn_objectness_predictions_with_background = tf.constant( + [[[-10, 13], + [-10, 12], + [10, -11], + [10, -12]], + [[-10, 13], + [-10, 12], + [10, -11], + [10, -12]]], dtype=tf.float32) + image_shape = tf.constant([batch_size, 32, 32, 3], dtype=tf.int32) + + # box_classifier_batch_size is 6, but here we assume that the number of + # actual proposals (not counting zero paddings) is fewer (3). + num_proposals = tf.constant([number_of_proposals, number_of_proposals], + dtype=tf.int32) + proposal_boxes = tf.constant( + [[[0, 0, 16, 16], # first image + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 16, 16], # second image + [0, 16, 16, 32], + [16, 0, 32, 16], + [0, 0, 0, 0], # begin paddings + [0, 0, 0, 0], + [0, 0, 0, 0]]], dtype=tf.float32) + + refined_box_encodings = tf.zeros( + (batch_size * model.max_num_proposals, + model.num_classes, + BOX_CODE_SIZE), dtype=tf.float32) + class_predictions_with_background = tf.constant( + [[-10, 10, -10], # first image + [-10, -10, 10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0], + [-10, 10, -10], # second image + [-10, -10, 10], + [10, -10, -10], + [0, 0, 0], # begin paddings + [0, 0, 0], + [0, 0, 0]], dtype=tf.float32) + + # The first groundtruth box is 4/5 of the anchor size in both directions + # experiencing a loss of: + # 2 * SmoothL1(5 * log(4/5)) / (num_proposals * batch_size) + # = 2 * (abs(5 * log(1/2)) - .5) / 3 + # The second groundtruth box is 46/50 of the anchor size in both + # directions experiencing a loss of: + # 2 * SmoothL1(5 * log(42/50)) / (num_proposals * batch_size) + # = 2 * (.5(5 * log(.92))^2 - .5) / 3. + # Since the first groundtruth box experiences greater loss, and we have + # set num_hard_examples=1 in the HardMiner, the final localization loss + # corresponds to that of the first groundtruth box. + groundtruth_boxes_list = [ + tf.constant([[0.05, 0.05, 0.45, 0.45], + [0.02, 0.52, 0.48, 0.98]], dtype=tf.float32), + tf.constant([[0.05, 0.05, 0.45, 0.45], + [0.02, 0.52, 0.48, 0.98]], dtype=tf.float32)] + groundtruth_classes_list = [ + tf.constant([[1, 0], [0, 1]], dtype=tf.float32), + tf.constant([[1, 0], [0, 1]], dtype=tf.float32)] + is_annotated_list = [tf.constant(True, dtype=tf.bool), + tf.constant(False, dtype=tf.bool)] + + prediction_dict = { + 'rpn_box_encodings': rpn_box_encodings, + 'rpn_objectness_predictions_with_background': + rpn_objectness_predictions_with_background, + 'image_shape': image_shape, + 'anchors': anchors, + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'proposal_boxes': proposal_boxes, + 'num_proposals': num_proposals + } + _, true_image_shapes = model.preprocess(tf.zeros(image_shape)) + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + is_annotated_list=is_annotated_list) + loss_dict = model.loss(prediction_dict, true_image_shapes) + return (loss_dict['Loss/BoxClassifierLoss/localization_loss'], + loss_dict['Loss/BoxClassifierLoss/classification_loss']) + exp_loc_loss = (2 * (-5 * np.log(.8) - 0.5) / + (number_of_proposals * batch_size)) + loc_loss, cls_loss = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllClose(loc_loss, exp_loc_loss) + self.assertAllClose(cls_loss, 0) + + def test_restore_map_for_classification_ckpt(self): + if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') + # Define mock tensorflow classification graph and save variables. + test_graph_classification = tf.Graph() + with test_graph_classification.as_default(): + image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3]) + with tf.variable_scope('mock_model'): + net = slim.conv2d(image, num_outputs=3, kernel_size=1, scope='layer1') + slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2') + + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.test_session(graph=test_graph_classification) as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + + # Create tensorflow detection graph and load variables from + # classification checkpoint. + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast(tf.random_uniform( + inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + var_map = model.restore_map(fine_tune_checkpoint_type='classification') + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + with self.test_session(graph=test_graph_classification) as sess: + saver.restore(sess, saved_model_path) + for var in sess.run(tf.report_uninitialized_variables()): + self.assertNotIn(model.first_stage_feature_extractor_scope, var) + self.assertNotIn(model.second_stage_feature_extractor_scope, var) + + def test_restore_map_for_detection_ckpt(self): + if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') + # Define mock tensorflow classification graph and save variables. + # Define first detection graph and save variables. + test_graph_detection1 = tf.Graph() + with test_graph_detection1.as_default(): + model = self._build_model( + is_training=False, + number_of_stages=2, second_stage_batch_size=6) + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast(tf.random_uniform( + inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.test_session(graph=test_graph_detection1) as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + + # Define second detection graph and restore variables. + test_graph_detection2 = tf.Graph() + with test_graph_detection2.as_default(): + model2 = self._build_model(is_training=False, + number_of_stages=2, + second_stage_batch_size=6, num_classes=42) + + inputs_shape2 = (2, 20, 20, 3) + inputs2 = tf.cast(tf.random_uniform( + inputs_shape2, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs2, true_image_shapes = model2.preprocess(inputs2) + prediction_dict2 = model2.predict(preprocessed_inputs2, true_image_shapes) + model2.postprocess(prediction_dict2, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model2.restore_map(fine_tune_checkpoint_type='detection') + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + with self.test_session(graph=test_graph_detection2) as sess: + saver.restore(sess, saved_model_path) + uninitialized_vars_list = sess.run(tf.report_uninitialized_variables()) + self.assertIn(six.b('another_variable'), uninitialized_vars_list) + for var in uninitialized_vars_list: + self.assertNotIn( + six.b(model2.first_stage_feature_extractor_scope), var) + self.assertNotIn( + six.b(model2.second_stage_feature_extractor_scope), var) + + def test_load_all_det_checkpoint_vars(self): + if tf_version.is_tf2(): self.skipTest('Skipping TF1 only test.') + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model = self._build_model( + is_training=False, + number_of_stages=2, + second_stage_batch_size=6, + num_classes=42) + + inputs_shape = (2, 20, 20, 3) + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model.restore_map( + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=True) + self.assertIsInstance(var_map, dict) + self.assertIn('another_variable', var_map) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..95c1ce17b2b9fef26f634f3dc58018032617d38c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch.py @@ -0,0 +1,389 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""R-FCN meta-architecture definition. + +R-FCN: Dai, Jifeng, et al. "R-FCN: Object Detection via Region-based +Fully Convolutional Networks." arXiv preprint arXiv:1605.06409 (2016). + +The R-FCN meta architecture is similar to Faster R-CNN and only differs in the +second stage. Hence this class inherits FasterRCNNMetaArch and overrides only +the `_predict_second_stage` method. + +Similar to Faster R-CNN we allow for two modes: number_of_stages=1 and +number_of_stages=2. In the former setting, all of the user facing methods +(e.g., predict, postprocess, loss) can be used as if the model consisted +only of the RPN, returning class agnostic proposals (these can be thought of as +approximate detections with no associated class information). In the latter +setting, proposals are computed, then passed through a second stage +"box classifier" to yield (multi-class) detections. + +Implementations of R-FCN models must define a new FasterRCNNFeatureExtractor and +override three methods: `preprocess`, `_extract_proposal_features` (the first +stage of the model), and `_extract_box_classifier_features` (the second stage of +the model). Optionally, the `restore_fn` method can be overridden. See tests +for an example. + +See notes in the documentation of Faster R-CNN meta-architecture as they all +apply here. +""" +import tensorflow.compat.v1 as tf + +from object_detection.core import box_predictor +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import ops + + +class RFCNMetaArch(faster_rcnn_meta_arch.FasterRCNNMetaArch): + """R-FCN Meta-architecture definition.""" + + def __init__(self, + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + second_stage_target_assigner, + second_stage_rfcn_box_predictor, + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + hard_example_miner, + parallel_iterations=16, + add_summaries=True, + clip_anchors_to_image=False, + use_static_shapes=False, + resize_masks=False, + freeze_batchnorm=False, + return_raw_detections_during_predict=False, + output_final_box_features=False): + """RFCNMetaArch Constructor. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + num_classes: Number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + image_resizer_fn: A callable for image resizing. This callable always + takes a rank-3 image tensor (corresponding to a single image) and + returns a rank-3 image tensor, possibly with new spatial dimensions. + See builders/image_resizer_builder.py. + feature_extractor: A FasterRCNNFeatureExtractor object. + number_of_stages: Valid values are {1, 2}. If 1 will only construct the + Region Proposal Network (RPN) part of the model. + first_stage_anchor_generator: An anchor_generator.AnchorGenerator object + (note that currently we only support + grid_anchor_generator.GridAnchorGenerator objects) + first_stage_target_assigner: Target assigner to use for first stage of + R-FCN (RPN). + first_stage_atrous_rate: A single integer indicating the atrous rate for + the single convolution op which is applied to the `rpn_features_to_crop` + tensor to obtain a tensor to be used for box prediction. Some feature + extractors optionally allow for producing feature maps computed at + denser resolutions. The atrous rate is used to compensate for the + denser feature maps by using an effectively larger receptive field. + (This should typically be set to 1). + first_stage_box_predictor_arg_scope_fn: Either a + Keras layer hyperparams object or a function to construct tf-slim + arg_scope for conv2d, separable_conv2d and fully_connected ops. Used + for the RPN box predictor. If it is a keras hyperparams object the + RPN box predictor will be a Keras model. If it is a function to + construct an arg scope it will be a tf-slim box predictor. + first_stage_box_predictor_kernel_size: Kernel size to use for the + convolution op just prior to RPN box predictions. + first_stage_box_predictor_depth: Output depth for the convolution op + just prior to RPN box predictions. + first_stage_minibatch_size: The "batch size" to use for computing the + objectness and location loss of the region proposal network. This + "batch size" refers to the number of anchors selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + first_stage_sampler: The sampler for the boxes used to calculate the RPN + loss after the first stage. + first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window`(with + all other inputs already set) and returns a dictionary containing + tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes`, `num_detections`. This is used to perform non max + suppression on the boxes predicted by the Region Proposal Network + (RPN). + See `post_processing.batch_multiclass_non_max_suppression` for the type + and shape of these tensors. + first_stage_max_proposals: Maximum number of boxes to retain after + performing Non-Max Suppression (NMS) on the boxes predicted by the + Region Proposal Network (RPN). + first_stage_localization_loss_weight: A float + first_stage_objectness_loss_weight: A float + crop_and_resize_fn: A differentiable resampler to use for cropping RPN + proposal features. + second_stage_target_assigner: Target assigner to use for second stage of + R-FCN. If the model is configured with multiple prediction heads, this + target assigner is used to generate targets for all heads (with the + correct `unmatched_class_label`). + second_stage_rfcn_box_predictor: RFCN box predictor to use for + second stage. + second_stage_batch_size: The batch size used for computing the + classification and refined location loss of the box classifier. This + "batch size" refers to the number of proposals selected as contributing + to the loss function for any given image within the image batch and is + only called "batch_size" due to terminology from the Faster R-CNN paper. + second_stage_sampler: The sampler for the boxes used for second stage + box classifier. + second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores`, optional `clip_window` and + optional (kwarg) `mask` inputs (with all other inputs already set) + and returns a dictionary containing tensors with keys: + `detection_boxes`, `detection_scores`, `detection_classes`, + `num_detections`, and (optionally) `detection_masks`. See + `post_processing.batch_multiclass_non_max_suppression` for the type and + shape of these tensors. + second_stage_score_conversion_fn: Callable elementwise nonlinearity + (that takes tensors as inputs and returns tensors). This is usually + used to convert logits to probabilities. + second_stage_localization_loss_weight: A float + second_stage_classification_loss_weight: A float + second_stage_classification_loss: A string indicating which loss function + to use, supports 'softmax' and 'sigmoid'. + hard_example_miner: A losses.HardExampleMiner object (can be None). + parallel_iterations: (Optional) The number of iterations allowed to run + in parallel for calls to tf.map_fn. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + clip_anchors_to_image: The anchors generated are clip to the + window size without filtering the nonoverlapping anchors. This generates + a static number of anchors. This argument is unused. + use_static_shapes: If True, uses implementation of ops with static shape + guarantees. + resize_masks: Indicates whether the masks presend in the groundtruth + should be resized in the model with `image_resizer_fn` + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + output_final_box_features: Whether to output final box features. If true, + it crops the feauture map based on the final box prediction and returns + in the dict as detection_features. + + Raises: + ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` + ValueError: If first_stage_anchor_generator is not of type + grid_anchor_generator.GridAnchorGenerator. + """ + # TODO(rathodv): add_summaries and crop_and_resize_fn is currently + # unused. Respect that directive in the future. + super(RFCNMetaArch, self).__init__( + is_training, + num_classes, + image_resizer_fn, + feature_extractor, + number_of_stages, + first_stage_anchor_generator, + first_stage_target_assigner, + first_stage_atrous_rate, + first_stage_box_predictor_arg_scope_fn, + first_stage_box_predictor_kernel_size, + first_stage_box_predictor_depth, + first_stage_minibatch_size, + first_stage_sampler, + first_stage_non_max_suppression_fn, + first_stage_max_proposals, + first_stage_localization_loss_weight, + first_stage_objectness_loss_weight, + crop_and_resize_fn, + None, # initial_crop_size is not used in R-FCN + None, # maxpool_kernel_size is not use in R-FCN + None, # maxpool_stride is not use in R-FCN + second_stage_target_assigner, + None, # fully_connected_box_predictor is not used in R-FCN. + second_stage_batch_size, + second_stage_sampler, + second_stage_non_max_suppression_fn, + second_stage_score_conversion_fn, + second_stage_localization_loss_weight, + second_stage_classification_loss_weight, + second_stage_classification_loss, + 1.0, # second stage mask prediction loss weight isn't used in R-FCN. + hard_example_miner, + parallel_iterations, + add_summaries, + clip_anchors_to_image, + use_static_shapes, + resize_masks, + freeze_batchnorm=freeze_batchnorm, + return_raw_detections_during_predict=( + return_raw_detections_during_predict), + output_final_box_features=output_final_box_features) + + self._rfcn_box_predictor = second_stage_rfcn_box_predictor + + def _predict_second_stage(self, rpn_box_encodings, + rpn_objectness_predictions_with_background, + rpn_features, + anchors, + image_shape, + true_image_shapes): + """Predicts the output tensors from 2nd stage of R-FCN. + + Args: + rpn_box_encodings: 3-D float tensor of shape + [batch_size, num_valid_anchors, self._box_coder.code_size] containing + predicted boxes. + rpn_objectness_predictions_with_background: 3-D float tensor of shape + [batch_size, num_valid_anchors, 2] containing class + predictions (logits) for each of the anchors. Note that this + tensor *includes* background class predictions (at class index 0). + rpn_features: A list of single 4-D float32 tensor with shape + [batch_size, height, width, depth] representing image features from the + RPN. + anchors: 2-D float tensor of shape + [num_anchors, self._box_coder.code_size]. + image_shape: A 1D int32 tensors of size [4] containing the image shape. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) refined_box_encodings: a 3-D tensor with shape + [total_num_proposals, num_classes, 4] representing predicted + (final) refined box encodings, where + total_num_proposals=batch_size*self._max_num_proposals + 2) class_predictions_with_background: a 2-D tensor with shape + [total_num_proposals, num_classes + 1] containing class + predictions (logits) for each of the anchors, where + total_num_proposals=batch_size*self._max_num_proposals. + Note that this tensor *includes* background class predictions + (at class index 0). + 3) num_proposals: An int32 tensor of shape [batch_size] representing the + number of proposals generated by the RPN. `num_proposals` allows us + to keep track of which entries are to be treated as zero paddings and + which are not since we always pad the number of proposals to be + `self.max_num_proposals` for each image. + 4) proposal_boxes: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing + decoded proposal bounding boxes (in absolute coordinates). + 5) proposal_boxes_normalized: A float32 tensor of shape + [batch_size, self.max_num_proposals, 4] representing decoded proposal + bounding boxes (in normalized coordinates). Can be used to override + the boxes proposed by the RPN, thus enabling one to extract box + classification and prediction for externally selected areas of the + image. + 6) box_classifier_features: a 4-D float32 tensor, of shape + [batch_size, feature_map_height, feature_map_width, depth], + representing the box classifier features. + """ + image_shape_2d = tf.tile(tf.expand_dims(image_shape[1:], 0), + [image_shape[0], 1]) + (proposal_boxes_normalized, _, _, num_proposals, _, + _) = self._postprocess_rpn(rpn_box_encodings, + rpn_objectness_predictions_with_background, + anchors, image_shape_2d, true_image_shapes) + + rpn_features = rpn_features[0] + box_classifier_features = ( + self._extract_box_classifier_features(rpn_features)) + + if self._rfcn_box_predictor.is_keras_model: + box_predictions = self._rfcn_box_predictor( + [box_classifier_features], + proposal_boxes=proposal_boxes_normalized) + else: + box_predictions = self._rfcn_box_predictor.predict( + [box_classifier_features], + num_predictions_per_location=[1], + scope=self.second_stage_box_predictor_scope, + proposal_boxes=proposal_boxes_normalized) + refined_box_encodings = tf.squeeze( + tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1), axis=1) + class_predictions_with_background = tf.squeeze( + tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1), + axis=1) + + absolute_proposal_boxes = ops.normalized_to_image_coordinates( + proposal_boxes_normalized, image_shape, + parallel_iterations=self._parallel_iterations) + + prediction_dict = { + 'refined_box_encodings': refined_box_encodings, + 'class_predictions_with_background': + class_predictions_with_background, + 'num_proposals': num_proposals, + 'proposal_boxes': absolute_proposal_boxes, + 'box_classifier_features': box_classifier_features, + 'proposal_boxes_normalized': proposal_boxes_normalized, + 'final_anchors': absolute_proposal_boxes + } + if self._return_raw_detections_during_predict: + prediction_dict.update(self._raw_detections_and_feature_map_inds( + refined_box_encodings, absolute_proposal_boxes)) + return prediction_dict + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + reg_losses = super(RFCNMetaArch, self).regularization_losses() + if self._rfcn_box_predictor.is_keras_model: + reg_losses.extend(self._rfcn_box_predictor.losses) + return reg_losses + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + update_ops = super(RFCNMetaArch, self).updates() + + if self._rfcn_box_predictor.is_keras_model: + update_ops.extend( + self._rfcn_box_predictor.get_updates_for(None)) + update_ops.extend( + self._rfcn_box_predictor.get_updates_for( + self._rfcn_box_predictor.inputs)) + return update_ops diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75332675aca274d166bca81d7c80d5d39e835355 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9e279bdf499901d6682cb6a195071f22c537f98e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/rfcn_meta_arch_test.py @@ -0,0 +1,67 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.rfcn_meta_arch.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib +from object_detection.meta_architectures import rfcn_meta_arch + + +class RFCNMetaArchTest( + faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase): + + def _get_second_stage_box_predictor_text_proto( + self, share_box_across_classes=False): + del share_box_across_classes + box_predictor_text_proto = """ + rfcn_box_predictor { + conv_hyperparams { + op: CONV + activation: NONE + regularizer { + l2_regularizer { + weight: 0.0005 + } + } + initializer { + variance_scaling_initializer { + factor: 1.0 + uniform: true + mode: FAN_AVG + } + } + } + } + """ + return box_predictor_text_proto + + def _get_model(self, box_predictor, **common_kwargs): + return rfcn_meta_arch.RFCNMetaArch( + second_stage_rfcn_box_predictor=box_predictor, **common_kwargs) + + def _get_box_classifier_features_shape(self, + image_size, + batch_size, + max_num_proposals, + initial_crop_size, + maxpool_stride, + num_features): + return (batch_size, image_size, image_size, num_features) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..eb1fd320d7061a72fe6fa48955b421eee3b0f96e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch.py @@ -0,0 +1,1355 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD Meta-architecture definition. + +General tensorflow implementation of convolutional Multibox/SSD detection +models. +""" +import abc +import tensorflow.compat.v1 as tf +from tensorflow.python.util.deprecation import deprecated_args +from object_detection.core import box_list +from object_detection.core import box_list_ops +from object_detection.core import matcher +from object_detection.core import model +from object_detection.core import standard_fields as fields +from object_detection.core import target_assigner +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import variables_helper +from object_detection.utils import visualization_utils + + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +class SSDFeatureExtractor(object): + """SSD Slim Feature Extractor definition.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + self._is_training = is_training + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + self._pad_to_multiple = pad_to_multiple + self._conv_hyperparams_fn = conv_hyperparams_fn + self._reuse_weights = reuse_weights + self._use_explicit_padding = use_explicit_padding + self._use_depthwise = use_depthwise + self._num_layers = num_layers + self._override_base_feature_extractor_hyperparams = ( + override_base_feature_extractor_hyperparams) + + @property + def is_keras_model(self): + return False + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Preprocesses images for feature extraction (minus image resizing). + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + pass + + @abc.abstractmethod + def extract_features(self, preprocessed_inputs): + """Extracts features from preprocessed inputs. + + This function is responsible for extracting feature maps from preprocessed + images. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + raise NotImplementedError + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + var_name = variable.op.name + if var_name.startswith(feature_extractor_scope + '/'): + var_name = var_name.replace(feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + + return variables_to_restore + + +class SSDKerasFeatureExtractor(tf.keras.Model): + """SSD Feature Extractor definition.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + name=None): + """Constructor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_config`. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDKerasFeatureExtractor, self).__init__(name=name) + + self._is_training = is_training + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + self._pad_to_multiple = pad_to_multiple + self._conv_hyperparams = conv_hyperparams + self._freeze_batchnorm = freeze_batchnorm + self._inplace_batchnorm_update = inplace_batchnorm_update + self._use_explicit_padding = use_explicit_padding + self._use_depthwise = use_depthwise + self._num_layers = num_layers + self._override_base_feature_extractor_hyperparams = ( + override_base_feature_extractor_hyperparams) + + @property + def is_keras_model(self): + return True + + @abc.abstractmethod + def preprocess(self, resized_inputs): + """Preprocesses images for feature extraction (minus image resizing). + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + """ + raise NotImplementedError + + @abc.abstractmethod + def _extract_features(self, preprocessed_inputs): + """Extracts features from preprocessed inputs. + + This function is responsible for extracting feature maps from preprocessed + images. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + raise NotImplementedError + + # This overrides the keras.Model `call` method with the _extract_features + # method. + def call(self, inputs, **kwargs): + return self._extract_features(inputs) + + +class SSDMetaArch(model.DetectionModel): + """SSD Meta-architecture definition.""" + + @deprecated_args(None, + 'NMS is always placed on TPU; do not use nms_on_host ' + 'as it has no effect.', 'nms_on_host') + def __init__(self, + is_training, + anchor_generator, + box_predictor, + box_coder, + feature_extractor, + encode_background_as_zeros, + image_resizer_fn, + non_max_suppression_fn, + score_conversion_fn, + classification_loss, + localization_loss, + classification_loss_weight, + localization_loss_weight, + normalize_loss_by_num_matches, + hard_example_miner, + target_assigner_instance, + add_summaries=True, + normalize_loc_loss_by_codesize=False, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + add_background_class=True, + explicit_background_class=False, + random_example_sampler=None, + expected_loss_weights_fn=None, + use_confidences_as_targets=False, + implicit_example_weight=0.5, + equalization_loss_config=None, + return_raw_detections_during_predict=False, + nms_on_host=True): + """SSDMetaArch Constructor. + + TODO(rathodv,jonathanhuang): group NMS parameters + score converter into + a class and loss parameters into a class and write config protos for + postprocessing and losses. + + Args: + is_training: A boolean indicating whether the training version of the + computation graph should be constructed. + anchor_generator: an anchor_generator.AnchorGenerator object. + box_predictor: a box_predictor.BoxPredictor object. + box_coder: a box_coder.BoxCoder object. + feature_extractor: a SSDFeatureExtractor object. + encode_background_as_zeros: boolean determining whether background + targets are to be encoded as an all zeros vector or a one-hot + vector (where background is the 0th class). + image_resizer_fn: a callable for image resizing. This callable always + takes a rank-3 image tensor (corresponding to a single image) and + returns a rank-3 image tensor, possibly with new spatial dimensions and + a 1-D tensor of shape [3] indicating shape of true image within + the resized image tensor as the resized image tensor could be padded. + See builders/image_resizer_builder.py. + non_max_suppression_fn: batch_multiclass_non_max_suppression + callable that takes `boxes`, `scores` and optional `clip_window` + inputs (with all other inputs already set) and returns a dictionary + hold tensors with keys: `detection_boxes`, `detection_scores`, + `detection_classes` and `num_detections`. See `post_processing. + batch_multiclass_non_max_suppression` for the type and shape of these + tensors. + score_conversion_fn: callable elementwise nonlinearity (that takes tensors + as inputs and returns tensors). This is usually used to convert logits + to probabilities. + classification_loss: an object_detection.core.losses.Loss object. + localization_loss: a object_detection.core.losses.Loss object. + classification_loss_weight: float + localization_loss_weight: float + normalize_loss_by_num_matches: boolean + hard_example_miner: a losses.HardExampleMiner object (can be None) + target_assigner_instance: target_assigner.TargetAssigner instance to use. + add_summaries: boolean (default: True) controlling whether summary ops + should be added to tensorflow graph. + normalize_loc_loss_by_codesize: whether to normalize localization loss + by code size of the box encoder. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + add_background_class: Whether to add an implicit background class to + one-hot encodings of groundtruth labels. Set to false if training a + single class model or using groundtruth labels with an explicit + background class. + explicit_background_class: Set to true if using groundtruth labels with an + explicit background class, as in multiclass scores. + random_example_sampler: a BalancedPositiveNegativeSampler object that can + perform random example sampling when computing loss. If None, random + sampling process is skipped. Note that random example sampler and hard + example miner can both be applied to the model. In that case, random + sampler will take effect first and hard example miner can only process + the random sampled examples. + expected_loss_weights_fn: If not None, use to calculate + loss by background/foreground weighting. Should take batch_cls_targets + as inputs and return foreground_weights, background_weights. See + expected_classification_loss_by_expected_sampling and + expected_classification_loss_by_reweighting_unmatched_anchors in + third_party/tensorflow_models/object_detection/utils/ops.py as examples. + use_confidences_as_targets: Whether to use groundtruth_condifences field + to assign the targets. + implicit_example_weight: a float number that specifies the weight used + for the implicit negative examples. + equalization_loss_config: a namedtuple that specifies configs for + computing equalization loss. + return_raw_detections_during_predict: Whether to return raw detection + boxes in the predict() method. These are decoded boxes that have not + been through postprocessing (i.e. NMS). Default False. + nms_on_host: boolean (default: True) controlling whether NMS should be + carried out on the host (outside of TPU). + """ + super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes) + self._is_training = is_training + self._freeze_batchnorm = freeze_batchnorm + self._inplace_batchnorm_update = inplace_batchnorm_update + + self._anchor_generator = anchor_generator + self._box_predictor = box_predictor + + self._box_coder = box_coder + self._feature_extractor = feature_extractor + self._add_background_class = add_background_class + self._explicit_background_class = explicit_background_class + + if add_background_class and explicit_background_class: + raise ValueError("Cannot have both 'add_background_class' and" + " 'explicit_background_class' true.") + + # Needed for fine-tuning from classification checkpoints whose + # variables do not have the feature extractor scope. + if self._feature_extractor.is_keras_model: + # Keras feature extractors will have a name they implicitly use to scope. + # So, all contained variables are prefixed by this name. + # To load from classification checkpoints, need to filter out this name. + self._extract_features_scope = feature_extractor.name + else: + # Slim feature extractors get an explicit naming scope + self._extract_features_scope = 'FeatureExtractor' + + if encode_background_as_zeros: + background_class = [0] + else: + background_class = [1] + + if self._add_background_class: + num_foreground_classes = self.num_classes + else: + num_foreground_classes = self.num_classes - 1 + + self._unmatched_class_label = tf.constant( + background_class + num_foreground_classes * [0], tf.float32) + + self._target_assigner = target_assigner_instance + + self._classification_loss = classification_loss + self._localization_loss = localization_loss + self._classification_loss_weight = classification_loss_weight + self._localization_loss_weight = localization_loss_weight + self._normalize_loss_by_num_matches = normalize_loss_by_num_matches + self._normalize_loc_loss_by_codesize = normalize_loc_loss_by_codesize + self._hard_example_miner = hard_example_miner + self._random_example_sampler = random_example_sampler + self._parallel_iterations = 16 + + self._image_resizer_fn = image_resizer_fn + self._non_max_suppression_fn = non_max_suppression_fn + self._score_conversion_fn = score_conversion_fn + + self._anchors = None + self._add_summaries = add_summaries + self._batched_prediction_tensor_names = [] + self._expected_loss_weights_fn = expected_loss_weights_fn + self._use_confidences_as_targets = use_confidences_as_targets + self._implicit_example_weight = implicit_example_weight + + self._equalization_loss_config = equalization_loss_config + + self._return_raw_detections_during_predict = ( + return_raw_detections_during_predict) + + @property + def feature_extractor(self): + return self._feature_extractor + + @property + def anchors(self): + if not self._anchors: + raise RuntimeError('anchors have not been constructed yet!') + if not isinstance(self._anchors, box_list.BoxList): + raise RuntimeError('anchors should be a BoxList object, but is not.') + return self._anchors + + @property + def batched_prediction_tensor_names(self): + if not self._batched_prediction_tensor_names: + raise RuntimeError('Must call predict() method to get batched prediction ' + 'tensor names.') + return self._batched_prediction_tensor_names + + def preprocess(self, inputs): + """Feature-extractor specific preprocessing. + + SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during + post-processing. On calling `preprocess` method, clip_window gets updated + based on `true_image_shapes` returned by `image_resizer_fn`. + + Args: + inputs: a [batch, height_in, width_in, channels] float tensor representing + a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: a [batch, height_out, width_out, channels] float + tensor representing a batch of images. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Raises: + ValueError: if inputs tensor does not have type tf.float32 + """ + with tf.name_scope('Preprocessor'): + normalized_inputs = self._feature_extractor.preprocess(inputs) + return shape_utils.resize_images_and_return_shapes( + normalized_inputs, self._image_resizer_fn) + + def _compute_clip_window(self, preprocessed_images, true_image_shapes): + """Computes clip window to use during post_processing. + + Computes a new clip window to use during post-processing based on + `resized_image_shapes` and `true_image_shapes` only if `preprocess` method + has been called. Otherwise returns a default clip window of [0, 0, 1, 1]. + + Args: + preprocessed_images: the [batch, height, width, channels] image + tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. Or None if the clip window should cover the full image. + + Returns: + a 2-D float32 tensor of the form [batch_size, 4] containing the clip + window for each image in the batch in normalized coordinates (relative to + the resized dimensions) where each clip window is of the form [ymin, xmin, + ymax, xmax] or a default clip window of [0, 0, 1, 1]. + + """ + if true_image_shapes is None: + return tf.constant([0, 0, 1, 1], dtype=tf.float32) + + resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( + preprocessed_images) + true_heights, true_widths, _ = tf.unstack( + tf.cast(true_image_shapes, dtype=tf.float32), axis=1) + padded_height = tf.cast(resized_inputs_shape[1], dtype=tf.float32) + padded_width = tf.cast(resized_inputs_shape[2], dtype=tf.float32) + return tf.stack( + [ + tf.zeros_like(true_heights), + tf.zeros_like(true_widths), true_heights / padded_height, + true_widths / padded_width + ], + axis=1) + + def predict(self, preprocessed_inputs, true_image_shapes): + """Predicts unpostprocessed tensors from input tensor. + + This function takes an input batch of images and runs it through the forward + pass of the network to yield unpostprocessesed predictions. + + A side effect of calling the predict method is that self._anchors is + populated with a box_list.BoxList of anchors. These anchors must be + constructed before the postprocess or loss functions can be called. + + Args: + preprocessed_inputs: a [batch, height, width, channels] image tensor. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + + Returns: + prediction_dict: a dictionary holding "raw" prediction tensors: + 1) preprocessed_inputs: the [batch, height, width, channels] image + tensor. + 2) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 3) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions (at class index 0). + 4) feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i]. + 5) anchors: 2-D float tensor of shape [num_anchors, 4] containing + the generated anchors in normalized coordinates. + 6) final_anchors: 3-D float tensor of shape [batch_size, num_anchors, 4] + containing the generated anchors in normalized coordinates. + If self._return_raw_detections_during_predict is True, the dictionary + will also contain: + 7) raw_detection_boxes: a 4-D float32 tensor with shape + [batch_size, self.max_num_proposals, 4] in normalized coordinates. + 8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape + [batch_size, self.max_num_proposals]. + """ + if self._inplace_batchnorm_update: + batchnorm_updates_collections = None + else: + batchnorm_updates_collections = tf.GraphKeys.UPDATE_OPS + if self._feature_extractor.is_keras_model: + feature_maps = self._feature_extractor(preprocessed_inputs) + else: + with slim.arg_scope([slim.batch_norm], + is_training=(self._is_training and + not self._freeze_batchnorm), + updates_collections=batchnorm_updates_collections): + with tf.variable_scope(None, self._extract_features_scope, + [preprocessed_inputs]): + feature_maps = self._feature_extractor.extract_features( + preprocessed_inputs) + + feature_map_spatial_dims = self._get_feature_map_spatial_dims( + feature_maps) + image_shape = shape_utils.combined_static_and_dynamic_shape( + preprocessed_inputs) + boxlist_list = self._anchor_generator.generate( + feature_map_spatial_dims, + im_height=image_shape[1], + im_width=image_shape[2]) + self._anchors = box_list_ops.concatenate(boxlist_list) + if self._box_predictor.is_keras_model: + predictor_results_dict = self._box_predictor(feature_maps) + else: + with slim.arg_scope([slim.batch_norm], + is_training=(self._is_training and + not self._freeze_batchnorm), + updates_collections=batchnorm_updates_collections): + predictor_results_dict = self._box_predictor.predict( + feature_maps, self._anchor_generator.num_anchors_per_location()) + predictions_dict = { + 'preprocessed_inputs': + preprocessed_inputs, + 'feature_maps': + feature_maps, + 'anchors': + self._anchors.get(), + 'final_anchors': + tf.tile( + tf.expand_dims(self._anchors.get(), 0), [image_shape[0], 1, 1]) + } + for prediction_key, prediction_list in iter(predictor_results_dict.items()): + prediction = tf.concat(prediction_list, axis=1) + if (prediction_key == 'box_encodings' and prediction.shape.ndims == 4 and + prediction.shape[2] == 1): + prediction = tf.squeeze(prediction, axis=2) + predictions_dict[prediction_key] = prediction + if self._return_raw_detections_during_predict: + predictions_dict.update(self._raw_detections_and_feature_map_inds( + predictions_dict['box_encodings'], boxlist_list)) + self._batched_prediction_tensor_names = [x for x in predictions_dict + if x != 'anchors'] + return predictions_dict + + def _raw_detections_and_feature_map_inds(self, box_encodings, boxlist_list): + anchors = self._anchors.get() + raw_detection_boxes, _ = self._batch_decode(box_encodings, anchors) + batch_size, _, _ = shape_utils.combined_static_and_dynamic_shape( + raw_detection_boxes) + feature_map_indices = ( + self._anchor_generator.anchor_index_to_feature_map_index(boxlist_list)) + feature_map_indices_batched = tf.tile( + tf.expand_dims(feature_map_indices, 0), + multiples=[batch_size, 1]) + return { + fields.PredictionFields.raw_detection_boxes: raw_detection_boxes, + fields.PredictionFields.raw_detection_feature_map_indices: + feature_map_indices_batched + } + + def _get_feature_map_spatial_dims(self, feature_maps): + """Return list of spatial dimensions for each feature map in a list. + + Args: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i]. + + Returns: + a list of pairs (height, width) for each feature map in feature_maps + """ + feature_map_shapes = [ + shape_utils.combined_static_and_dynamic_shape( + feature_map) for feature_map in feature_maps + ] + return [(shape[1], shape[2]) for shape in feature_map_shapes] + + def postprocess(self, prediction_dict, true_image_shapes): + """Converts prediction tensors to final detections. + + This function converts raw predictions tensors to final detection results by + slicing off the background class, decoding box predictions and applying + non max suppression and clipping to the image window. + + See base class for output format conventions. Note also that by default, + scores are to be interpreted as logits, but if a score_conversion_fn is + used, then scores are remapped (and may thus have a different + interpretation). + + Args: + prediction_dict: a dictionary holding prediction tensors with + 1) preprocessed_inputs: a [batch, height, width, channels] image + tensor. + 2) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 3) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + 4) mask_predictions: (optional) a 5-D float tensor of shape + [batch_size, num_anchors, q, mask_height, mask_width]. `q` can be + either number of classes or 1 depending on whether a separate mask is + predicted per class. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. Or None, if the clip window should cover the full image. + + Returns: + detections: a dictionary containing the following fields + detection_boxes: [batch, max_detections, 4] tensor with post-processed + detection boxes. + detection_scores: [batch, max_detections] tensor with scalar scores for + post-processed detection boxes. + detection_multiclass_scores: [batch, max_detections, + num_classes_with_background] tensor with class score distribution for + post-processed detection boxes including background class if any. + detection_classes: [batch, max_detections] tensor with classes for + post-processed detection classes. + detection_keypoints: [batch, max_detections, num_keypoints, 2] (if + encoded in the prediction_dict 'box_encodings') + detection_masks: [batch_size, max_detections, mask_height, mask_width] + (optional) + num_detections: [batch] + raw_detection_boxes: [batch, total_detections, 4] tensor with decoded + detection boxes before Non-Max Suppression. + raw_detection_score: [batch, total_detections, + num_classes_with_background] tensor of multi-class scores for raw + detection boxes. + Raises: + ValueError: if prediction_dict does not contain `box_encodings` or + `class_predictions_with_background` fields. + """ + if ('box_encodings' not in prediction_dict or + 'class_predictions_with_background' not in prediction_dict): + raise ValueError('prediction_dict does not contain expected entries.') + if 'anchors' not in prediction_dict: + prediction_dict['anchors'] = self.anchors.get() + with tf.name_scope('Postprocessor'): + preprocessed_images = prediction_dict['preprocessed_inputs'] + box_encodings = prediction_dict['box_encodings'] + box_encodings = tf.identity(box_encodings, 'raw_box_encodings') + class_predictions_with_background = ( + prediction_dict['class_predictions_with_background']) + detection_boxes, detection_keypoints = self._batch_decode( + box_encodings, prediction_dict['anchors']) + detection_boxes = tf.identity(detection_boxes, 'raw_box_locations') + detection_boxes = tf.expand_dims(detection_boxes, axis=2) + + detection_scores_with_background = self._score_conversion_fn( + class_predictions_with_background) + detection_scores = tf.identity(detection_scores_with_background, + 'raw_box_scores') + if self._add_background_class or self._explicit_background_class: + detection_scores = tf.slice(detection_scores, [0, 0, 1], [-1, -1, -1]) + additional_fields = None + + batch_size = ( + shape_utils.combined_static_and_dynamic_shape(preprocessed_images)[0]) + + if 'feature_maps' in prediction_dict: + feature_map_list = [] + for feature_map in prediction_dict['feature_maps']: + feature_map_list.append(tf.reshape(feature_map, [batch_size, -1])) + box_features = tf.concat(feature_map_list, 1) + box_features = tf.identity(box_features, 'raw_box_features') + additional_fields = { + 'multiclass_scores': detection_scores_with_background + } + if self._anchors is not None: + num_boxes = (self._anchors.num_boxes_static() or + self._anchors.num_boxes()) + anchor_indices = tf.range(num_boxes) + batch_anchor_indices = tf.tile( + tf.expand_dims(anchor_indices, 0), [batch_size, 1]) + # All additional fields need to be float. + additional_fields.update({ + 'anchor_indices': tf.cast(batch_anchor_indices, tf.float32), + }) + if detection_keypoints is not None: + detection_keypoints = tf.identity( + detection_keypoints, 'raw_keypoint_locations') + additional_fields[fields.BoxListFields.keypoints] = detection_keypoints + + (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, + nmsed_additional_fields, + num_detections) = self._non_max_suppression_fn( + detection_boxes, + detection_scores, + clip_window=self._compute_clip_window( + preprocessed_images, true_image_shapes), + additional_fields=additional_fields, + masks=prediction_dict.get('mask_predictions')) + + detection_dict = { + fields.DetectionResultFields.detection_boxes: + nmsed_boxes, + fields.DetectionResultFields.detection_scores: + nmsed_scores, + fields.DetectionResultFields.detection_classes: + nmsed_classes, + fields.DetectionResultFields.num_detections: + tf.cast(num_detections, dtype=tf.float32), + fields.DetectionResultFields.raw_detection_boxes: + tf.squeeze(detection_boxes, axis=2), + fields.DetectionResultFields.raw_detection_scores: + detection_scores_with_background + } + if (nmsed_additional_fields is not None and + fields.InputDataFields.multiclass_scores in nmsed_additional_fields): + detection_dict[ + fields.DetectionResultFields.detection_multiclass_scores] = ( + nmsed_additional_fields[ + fields.InputDataFields.multiclass_scores]) + if (nmsed_additional_fields is not None and + 'anchor_indices' in nmsed_additional_fields): + detection_dict.update({ + fields.DetectionResultFields.detection_anchor_indices: + tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32), + }) + if (nmsed_additional_fields is not None and + fields.BoxListFields.keypoints in nmsed_additional_fields): + detection_dict[fields.DetectionResultFields.detection_keypoints] = ( + nmsed_additional_fields[fields.BoxListFields.keypoints]) + if nmsed_masks is not None: + detection_dict[ + fields.DetectionResultFields.detection_masks] = nmsed_masks + return detection_dict + + def loss(self, prediction_dict, true_image_shapes, scope=None): + """Compute scalar loss tensors with respect to provided groundtruth. + + Calling this function requires that groundtruth tensors have been + provided via the provide_groundtruth function. + + Args: + prediction_dict: a dictionary holding prediction tensors with + 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 2) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + true_image_shapes: int32 tensor of shape [batch, 3] where each row is + of the form [height, width, channels] indicating the shapes + of true images in the resized images, as resized images can be padded + with zeros. + scope: Optional scope name. + + Returns: + a dictionary mapping loss keys (`localization_loss` and + `classification_loss`) to scalar tensors representing corresponding loss + values. + """ + with tf.name_scope(scope, 'Loss', prediction_dict.values()): + keypoints = None + if self.groundtruth_has_field(fields.BoxListFields.keypoints): + keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints) + weights = None + if self.groundtruth_has_field(fields.BoxListFields.weights): + weights = self.groundtruth_lists(fields.BoxListFields.weights) + confidences = None + if self.groundtruth_has_field(fields.BoxListFields.confidences): + confidences = self.groundtruth_lists(fields.BoxListFields.confidences) + (batch_cls_targets, batch_cls_weights, batch_reg_targets, + batch_reg_weights, batch_match) = self._assign_targets( + self.groundtruth_lists(fields.BoxListFields.boxes), + self.groundtruth_lists(fields.BoxListFields.classes), + keypoints, weights, confidences) + match_list = [matcher.Match(match) for match in tf.unstack(batch_match)] + if self._add_summaries: + self._summarize_target_assignment( + self.groundtruth_lists(fields.BoxListFields.boxes), match_list) + + if self._random_example_sampler: + batch_cls_per_anchor_weights = tf.reduce_mean( + batch_cls_weights, axis=-1) + batch_sampled_indicator = tf.cast( + shape_utils.static_or_dynamic_map_fn( + self._minibatch_subsample_fn, + [batch_cls_targets, batch_cls_per_anchor_weights], + dtype=tf.bool, + parallel_iterations=self._parallel_iterations, + back_prop=True), dtype=tf.float32) + batch_reg_weights = tf.multiply(batch_sampled_indicator, + batch_reg_weights) + batch_cls_weights = tf.multiply( + tf.expand_dims(batch_sampled_indicator, -1), + batch_cls_weights) + + losses_mask = None + if self.groundtruth_has_field(fields.InputDataFields.is_annotated): + losses_mask = tf.stack(self.groundtruth_lists( + fields.InputDataFields.is_annotated)) + + + location_losses = self._localization_loss( + prediction_dict['box_encodings'], + batch_reg_targets, + ignore_nan_targets=True, + weights=batch_reg_weights, + losses_mask=losses_mask) + + cls_losses = self._classification_loss( + prediction_dict['class_predictions_with_background'], + batch_cls_targets, + weights=batch_cls_weights, + losses_mask=losses_mask) + + if self._expected_loss_weights_fn: + # Need to compute losses for assigned targets against the + # unmatched_class_label as well as their assigned targets. + # simplest thing (but wasteful) is just to calculate all losses + # twice + batch_size, num_anchors, num_classes = batch_cls_targets.get_shape() + unmatched_targets = tf.ones([batch_size, num_anchors, 1 + ]) * self._unmatched_class_label + + unmatched_cls_losses = self._classification_loss( + prediction_dict['class_predictions_with_background'], + unmatched_targets, + weights=batch_cls_weights, + losses_mask=losses_mask) + + if cls_losses.get_shape().ndims == 3: + batch_size, num_anchors, num_classes = cls_losses.get_shape() + cls_losses = tf.reshape(cls_losses, [batch_size, -1]) + unmatched_cls_losses = tf.reshape(unmatched_cls_losses, + [batch_size, -1]) + batch_cls_targets = tf.reshape( + batch_cls_targets, [batch_size, num_anchors * num_classes, -1]) + batch_cls_targets = tf.concat( + [1 - batch_cls_targets, batch_cls_targets], axis=-1) + + location_losses = tf.tile(location_losses, [1, num_classes]) + + foreground_weights, background_weights = ( + self._expected_loss_weights_fn(batch_cls_targets)) + + cls_losses = ( + foreground_weights * cls_losses + + background_weights * unmatched_cls_losses) + + location_losses *= foreground_weights + + classification_loss = tf.reduce_sum(cls_losses) + localization_loss = tf.reduce_sum(location_losses) + elif self._hard_example_miner: + cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) + (localization_loss, classification_loss) = self._apply_hard_mining( + location_losses, cls_losses, prediction_dict, match_list) + if self._add_summaries: + self._hard_example_miner.summarize() + else: + cls_losses = ops.reduce_sum_trailing_dimensions(cls_losses, ndims=2) + localization_loss = tf.reduce_sum(location_losses) + classification_loss = tf.reduce_sum(cls_losses) + + # Optionally normalize by number of positive matches + normalizer = tf.constant(1.0, dtype=tf.float32) + if self._normalize_loss_by_num_matches: + normalizer = tf.maximum(tf.cast(tf.reduce_sum(batch_reg_weights), + dtype=tf.float32), + 1.0) + + localization_loss_normalizer = normalizer + if self._normalize_loc_loss_by_codesize: + localization_loss_normalizer *= self._box_coder.code_size + localization_loss = tf.multiply((self._localization_loss_weight / + localization_loss_normalizer), + localization_loss, + name='localization_loss') + classification_loss = tf.multiply((self._classification_loss_weight / + normalizer), classification_loss, + name='classification_loss') + + loss_dict = { + 'Loss/localization_loss': localization_loss, + 'Loss/classification_loss': classification_loss + } + + + return loss_dict + + def _minibatch_subsample_fn(self, inputs): + """Randomly samples anchors for one image. + + Args: + inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors, + num_classes] indicating targets assigned to each anchor. Second one + is a tensor of shape [num_anchors] indicating the class weight of each + anchor. + + Returns: + batch_sampled_indicator: bool tensor of shape [num_anchors] indicating + whether the anchor should be selected for loss computation. + """ + cls_targets, cls_weights = inputs + if self._add_background_class: + # Set background_class bits to 0 so that the positives_indicator + # computation would not consider background class. + background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1])) + regular_class = tf.slice(cls_targets, [0, 1], [-1, -1]) + cls_targets = tf.concat([background_class, regular_class], 1) + positives_indicator = tf.reduce_sum(cls_targets, axis=1) + return self._random_example_sampler.subsample( + tf.cast(cls_weights, tf.bool), + batch_size=None, + labels=tf.cast(positives_indicator, tf.bool)) + + def _summarize_anchor_classification_loss(self, class_ids, cls_losses): + positive_indices = tf.where(tf.greater(class_ids, 0)) + positive_anchor_cls_loss = tf.squeeze( + tf.gather(cls_losses, positive_indices), axis=1) + visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss, + 'PositiveAnchorLossCDF') + negative_indices = tf.where(tf.equal(class_ids, 0)) + negative_anchor_cls_loss = tf.squeeze( + tf.gather(cls_losses, negative_indices), axis=1) + visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss, + 'NegativeAnchorLossCDF') + + def _assign_targets(self, + groundtruth_boxes_list, + groundtruth_classes_list, + groundtruth_keypoints_list=None, + groundtruth_weights_list=None, + groundtruth_confidences_list=None): + """Assign groundtruth targets. + + Adds a background class to each one-hot encoding of groundtruth classes + and uses target assigner to obtain regression and classification targets. + + Args: + groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] + containing coordinates of the groundtruth boxes. + Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] + format and assumed to be normalized and clipped + relative to the image window with y_min <= y_max and x_min <= x_max. + groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of + shape [num_boxes, num_classes] containing the class targets with the 0th + index assumed to map to the first non-background class. + groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape + [num_boxes, num_keypoints, 2] + groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape + [num_boxes] containing weights for groundtruth boxes. + groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape + [num_boxes, num_classes] containing class confidences for + groundtruth boxes. + + Returns: + batch_cls_targets: a tensor with shape [batch_size, num_anchors, + num_classes], + batch_cls_weights: a tensor with shape [batch_size, num_anchors], + batch_reg_targets: a tensor with shape [batch_size, num_anchors, + box_code_dimension] + batch_reg_weights: a tensor with shape [batch_size, num_anchors], + match: an int32 tensor of shape [batch_size, num_anchors], containing + result of anchor groundtruth matching. Each position in the tensor + indicates an anchor and holds the following meaning: + (1) if match[x, i] >= 0, anchor i is matched with groundtruth + match[x, i]. + (2) if match[x, i]=-1, anchor i is marked to be background . + (3) if match[x, i]=-2, anchor i is ignored since it is not background + and does not have sufficient overlap to call it a foreground. + """ + groundtruth_boxlists = [ + box_list.BoxList(boxes) for boxes in groundtruth_boxes_list + ] + train_using_confidences = (self._is_training and + self._use_confidences_as_targets) + if self._add_background_class: + groundtruth_classes_with_background_list = [ + tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT') + for one_hot_encoding in groundtruth_classes_list + ] + if train_using_confidences: + groundtruth_confidences_with_background_list = [ + tf.pad(groundtruth_confidences, [[0, 0], [1, 0]], mode='CONSTANT') + for groundtruth_confidences in groundtruth_confidences_list + ] + else: + groundtruth_classes_with_background_list = groundtruth_classes_list + + if groundtruth_keypoints_list is not None: + for boxlist, keypoints in zip( + groundtruth_boxlists, groundtruth_keypoints_list): + boxlist.add_field(fields.BoxListFields.keypoints, keypoints) + if train_using_confidences: + return target_assigner.batch_assign_confidences( + self._target_assigner, + self.anchors, + groundtruth_boxlists, + groundtruth_confidences_with_background_list, + groundtruth_weights_list, + self._unmatched_class_label, + self._add_background_class, + self._implicit_example_weight) + else: + return target_assigner.batch_assign_targets( + self._target_assigner, + self.anchors, + groundtruth_boxlists, + groundtruth_classes_with_background_list, + self._unmatched_class_label, + groundtruth_weights_list) + + def _summarize_target_assignment(self, groundtruth_boxes_list, match_list): + """Creates tensorflow summaries for the input boxes and anchors. + + This function creates four summaries corresponding to the average + number (over images in a batch) of (1) groundtruth boxes, (2) anchors + marked as positive, (3) anchors marked as negative, and (4) anchors marked + as ignored. + + Args: + groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] + containing corners of the groundtruth boxes. + match_list: a list of matcher.Match objects encoding the match between + anchors and groundtruth boxes for each image of the batch, + with rows of the Match objects corresponding to groundtruth boxes + and columns corresponding to anchors. + """ + # TODO(rathodv): Add a test for these summaries. + try: + # TODO(kaftan): Integrate these summaries into the v2 style loops + with tf.compat.v2.init_scope(): + if tf.compat.v2.executing_eagerly(): + return + except AttributeError: + pass + + avg_num_gt_boxes = tf.reduce_mean( + tf.cast( + tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list]), + dtype=tf.float32)) + avg_num_matched_gt_boxes = tf.reduce_mean( + tf.cast( + tf.stack([match.num_matched_rows() for match in match_list]), + dtype=tf.float32)) + avg_pos_anchors = tf.reduce_mean( + tf.cast( + tf.stack([match.num_matched_columns() for match in match_list]), + dtype=tf.float32)) + avg_neg_anchors = tf.reduce_mean( + tf.cast( + tf.stack([match.num_unmatched_columns() for match in match_list]), + dtype=tf.float32)) + avg_ignored_anchors = tf.reduce_mean( + tf.cast( + tf.stack([match.num_ignored_columns() for match in match_list]), + dtype=tf.float32)) + + tf.summary.scalar('AvgNumGroundtruthBoxesPerImage', + avg_num_gt_boxes, + family='TargetAssignment') + tf.summary.scalar('AvgNumGroundtruthBoxesMatchedPerImage', + avg_num_matched_gt_boxes, + family='TargetAssignment') + tf.summary.scalar('AvgNumPositiveAnchorsPerImage', + avg_pos_anchors, + family='TargetAssignment') + tf.summary.scalar('AvgNumNegativeAnchorsPerImage', + avg_neg_anchors, + family='TargetAssignment') + tf.summary.scalar('AvgNumIgnoredAnchorsPerImage', + avg_ignored_anchors, + family='TargetAssignment') + + def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict, + match_list): + """Applies hard mining to anchorwise losses. + + Args: + location_losses: Float tensor of shape [batch_size, num_anchors] + representing anchorwise location losses. + cls_losses: Float tensor of shape [batch_size, num_anchors] + representing anchorwise classification losses. + prediction_dict: p a dictionary holding prediction tensors with + 1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors, + box_code_dimension] containing predicted boxes. + 2) class_predictions_with_background: 3-D float tensor of shape + [batch_size, num_anchors, num_classes+1] containing class predictions + (logits) for each of the anchors. Note that this tensor *includes* + background class predictions. + 3) anchors: (optional) 2-D float tensor of shape [num_anchors, 4]. + match_list: a list of matcher.Match objects encoding the match between + anchors and groundtruth boxes for each image of the batch, + with rows of the Match objects corresponding to groundtruth boxes + and columns corresponding to anchors. + + Returns: + mined_location_loss: a float scalar with sum of localization losses from + selected hard examples. + mined_cls_loss: a float scalar with sum of classification losses from + selected hard examples. + """ + class_predictions = prediction_dict['class_predictions_with_background'] + if self._add_background_class: + class_predictions = tf.slice(class_predictions, [0, 0, 1], [-1, -1, -1]) + + if 'anchors' not in prediction_dict: + prediction_dict['anchors'] = self.anchors.get() + decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'], + prediction_dict['anchors']) + decoded_box_tensors_list = tf.unstack(decoded_boxes) + class_prediction_list = tf.unstack(class_predictions) + decoded_boxlist_list = [] + for box_location, box_score in zip(decoded_box_tensors_list, + class_prediction_list): + decoded_boxlist = box_list.BoxList(box_location) + decoded_boxlist.add_field('scores', box_score) + decoded_boxlist_list.append(decoded_boxlist) + return self._hard_example_miner( + location_losses=location_losses, + cls_losses=cls_losses, + decoded_boxlist_list=decoded_boxlist_list, + match_list=match_list) + + def _batch_decode(self, box_encodings, anchors): + """Decodes a batch of box encodings with respect to the anchors. + + Args: + box_encodings: A float32 tensor of shape + [batch_size, num_anchors, box_code_size] containing box encodings. + anchors: A tensor of shape [num_anchors, 4]. + + Returns: + decoded_boxes: A float32 tensor of shape + [batch_size, num_anchors, 4] containing the decoded boxes. + decoded_keypoints: A float32 tensor of shape + [batch_size, num_anchors, num_keypoints, 2] containing the decoded + keypoints if present in the input `box_encodings`, None otherwise. + """ + combined_shape = shape_utils.combined_static_and_dynamic_shape( + box_encodings) + batch_size = combined_shape[0] + tiled_anchor_boxes = tf.tile(tf.expand_dims(anchors, 0), [batch_size, 1, 1]) + tiled_anchors_boxlist = box_list.BoxList( + tf.reshape(tiled_anchor_boxes, [-1, 4])) + decoded_boxes = self._box_coder.decode( + tf.reshape(box_encodings, [-1, self._box_coder.code_size]), + tiled_anchors_boxlist) + decoded_keypoints = None + if decoded_boxes.has_field(fields.BoxListFields.keypoints): + decoded_keypoints = decoded_boxes.get_field( + fields.BoxListFields.keypoints) + num_keypoints = decoded_keypoints.get_shape()[1] + decoded_keypoints = tf.reshape( + decoded_keypoints, + tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) + decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack( + [combined_shape[0], combined_shape[1], 4])) + return decoded_boxes, decoded_keypoints + + def regularization_losses(self): + """Returns a list of regularization losses for this model. + + Returns a list of regularization losses for this model that the estimator + needs to use during training/optimization. + + Returns: + A list of regularization loss tensors. + """ + losses = [] + slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + # Copy the slim losses to avoid modifying the collection + if slim_losses: + losses.extend(slim_losses) + if self._box_predictor.is_keras_model: + losses.extend(self._box_predictor.losses) + if self._feature_extractor.is_keras_model: + losses.extend(self._feature_extractor.losses) + return losses + + def restore_map(self, + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False): + """Returns a map of variables to load from a foreign checkpoint. + + See parent class for details. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + load_all_detection_checkpoint_vars: whether to load all variables (when + `fine_tune_checkpoint_type` is `detection`). If False, only variables + within the feature extractor scope are included. Default False. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + Raises: + ValueError: if fine_tune_checkpoint_type is neither `classification` + nor `detection`. + """ + if fine_tune_checkpoint_type == 'classification': + return self._feature_extractor.restore_from_classification_checkpoint_fn( + self._extract_features_scope) + + elif fine_tune_checkpoint_type == 'detection': + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + var_name = variable.op.name + if load_all_detection_checkpoint_vars: + variables_to_restore[var_name] = variable + else: + if var_name.startswith(self._extract_features_scope): + variables_to_restore[var_name] = variable + return variables_to_restore + + else: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + + def restore_from_objects(self, fine_tune_checkpoint_type='detection'): + """Returns a map of Trackable objects to load from a foreign checkpoint. + + Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module + or Checkpoint). This enables the model to initialize based on weights from + another task. For example, the feature extractor variables from a + classification model can be used to bootstrap training of an object + detector. When loading from an object detection model, the checkpoint model + should have the same parameters as this detection model with exception of + the num_classes parameter. + + Note that this function is intended to be used to restore Keras-based + models when running Tensorflow 2, whereas restore_map (above) is intended + to be used to restore Slim-based models when running Tensorflow 1.x. + + Args: + fine_tune_checkpoint_type: whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. Default 'detection'. + + Returns: + A dict mapping keys to Trackable objects (tf.Module or Checkpoint). + """ + if fine_tune_checkpoint_type == 'classification': + return { + 'feature_extractor': + self._feature_extractor.classification_backbone + } + elif fine_tune_checkpoint_type == 'detection': + fake_model = tf.train.Checkpoint( + _feature_extractor=self._feature_extractor) + return {'model': fake_model} + else: + raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format( + fine_tune_checkpoint_type)) + + def updates(self): + """Returns a list of update operators for this model. + + Returns a list of update operators for this model that must be executed at + each training step. The estimator's train op needs to have a control + dependency on these updates. + + Returns: + A list of update operators. + """ + update_ops = [] + slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + # Copy the slim ops to avoid modifying the collection + if slim_update_ops: + update_ops.extend(slim_update_ops) + if self._box_predictor.is_keras_model: + update_ops.extend(self._box_predictor.get_updates_for(None)) + update_ops.extend(self._box_predictor.get_updates_for( + self._box_predictor.inputs)) + if self._feature_extractor.is_keras_model: + update_ops.extend(self._feature_extractor.get_updates_for(None)) + update_ops.extend(self._feature_extractor.get_updates_for( + self._feature_extractor.inputs)) + return update_ops diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93c83c652b18c457cc073085aa0d36cdd71393e3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch_test.py new file mode 100644 index 0000000000000000000000000000000000000000..585eb1778f72deae1aeee45bfbf1d18fa3af1212 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch_test.py @@ -0,0 +1,680 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.meta_architectures.ssd_meta_arch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized + +import numpy as np +import six +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.meta_architectures import ssd_meta_arch_test_lib +from object_detection.protos import model_pb2 +from object_detection.utils import test_utils + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +keras = tf.keras.layers + + +class SsdMetaArchTest(ssd_meta_arch_test_lib.SSDMetaArchTestBase, + parameterized.TestCase): + + def _create_model( + self, + apply_hard_mining=True, + normalize_loc_loss_by_codesize=False, + add_background_class=True, + random_example_sampling=False, + expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE, + min_num_negative_samples=1, + desired_negative_sampling_ratio=3, + predict_mask=False, + use_static_shapes=False, + nms_max_size_per_class=5, + calibration_mapping_value=None, + return_raw_detections_during_predict=False): + return super(SsdMetaArchTest, self)._create_model( + model_fn=ssd_meta_arch.SSDMetaArch, + apply_hard_mining=apply_hard_mining, + normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, + add_background_class=add_background_class, + random_example_sampling=random_example_sampling, + expected_loss_weights=expected_loss_weights, + min_num_negative_samples=min_num_negative_samples, + desired_negative_sampling_ratio=desired_negative_sampling_ratio, + predict_mask=predict_mask, + use_static_shapes=use_static_shapes, + nms_max_size_per_class=nms_max_size_per_class, + calibration_mapping_value=calibration_mapping_value, + return_raw_detections_during_predict=( + return_raw_detections_during_predict)) + + def test_preprocess_preserves_shapes_with_dynamic_input_image(self): + width = tf.random.uniform([], minval=5, maxval=10, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, 5, width, 3]) + image = tf.random.uniform(shape) + model, _, _, _ = self._create_model() + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue( + preprocessed_inputs.shape.is_compatible_with([None, 5, None, 3])) + + def test_preprocess_preserves_shape_with_static_input_image(self): + image = tf.random.uniform([2, 3, 3, 3]) + model, _, _, _ = self._create_model() + preprocessed_inputs, _ = model.preprocess(image) + self.assertTrue(preprocessed_inputs.shape.is_compatible_with([2, 3, 3, 3])) + + def test_predict_result_shapes_on_image_with_dynamic_shape(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, code_size = self._create_model() + + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + prediction_dict = model.predict(image, true_image_shapes=None) + self.assertIn('box_encodings', prediction_dict) + self.assertIn('class_predictions_with_background', prediction_dict) + self.assertIn('feature_maps', prediction_dict) + self.assertIn('anchors', prediction_dict) + self.assertIn('final_anchors', prediction_dict) + return (prediction_dict['box_encodings'], + prediction_dict['final_anchors'], + prediction_dict['class_predictions_with_background'], + tf.constant(num_anchors), batch) + (box_encodings_out, final_anchors, class_predictions_with_background, + num_anchors, batch_size) = self.execute_cpu(graph_fn, [], graph=g) + self.assertAllEqual(box_encodings_out.shape, + (batch_size, num_anchors, code_size)) + self.assertAllEqual(final_anchors.shape, + (batch_size, num_anchors, code_size)) + self.assertAllEqual( + class_predictions_with_background.shape, + (batch_size, num_anchors, num_classes + 1)) + + def test_predict_result_shapes_on_image_with_static_shape(self): + + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, code_size = self._create_model() + + def graph_fn(input_image): + predictions = model.predict(input_image, true_image_shapes=None) + return (predictions['box_encodings'], + predictions['class_predictions_with_background'], + predictions['final_anchors']) + batch_size = 3 + image_size = 2 + channels = 3 + input_image = np.random.rand(batch_size, image_size, image_size, + channels).astype(np.float32) + expected_box_encodings_shape = (batch_size, num_anchors, code_size) + expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1) + final_anchors_shape = (batch_size, num_anchors, 4) + (box_encodings, class_predictions, final_anchors) = self.execute( + graph_fn, [input_image], graph=g) + self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape) + self.assertAllEqual(class_predictions.shape, + expected_class_predictions_shape) + self.assertAllEqual(final_anchors.shape, final_anchors_shape) + + def test_predict_with_raw_output_fields(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, code_size = self._create_model( + return_raw_detections_during_predict=True) + + def graph_fn(input_image): + predictions = model.predict(input_image, true_image_shapes=None) + return (predictions['box_encodings'], + predictions['class_predictions_with_background'], + predictions['final_anchors'], + predictions['raw_detection_boxes'], + predictions['raw_detection_feature_map_indices']) + batch_size = 3 + image_size = 2 + channels = 3 + input_image = np.random.rand(batch_size, image_size, image_size, + channels).astype(np.float32) + expected_box_encodings_shape = (batch_size, num_anchors, code_size) + expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1) + final_anchors_shape = (batch_size, num_anchors, 4) + expected_raw_detection_boxes_shape = (batch_size, num_anchors, 4) + (box_encodings, class_predictions, final_anchors, raw_detection_boxes, + raw_detection_feature_map_indices) = self.execute( + graph_fn, [input_image], graph=g) + self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape) + self.assertAllEqual(class_predictions.shape, + expected_class_predictions_shape) + self.assertAllEqual(final_anchors.shape, final_anchors_shape) + self.assertAllEqual(raw_detection_boxes.shape, + expected_raw_detection_boxes_shape) + self.assertAllEqual(raw_detection_feature_map_indices, + np.zeros((batch_size, num_anchors))) + + def test_raw_detection_boxes_agree_predict_postprocess(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model( + return_raw_detections_during_predict=True) + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + preprocessed_inputs, true_image_shapes = model.preprocess( + image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + raw_detection_boxes_predict = prediction_dict['raw_detection_boxes'] + detections = model.postprocess(prediction_dict, true_image_shapes) + raw_detection_boxes_postprocess = detections['raw_detection_boxes'] + return raw_detection_boxes_predict, raw_detection_boxes_postprocess + (raw_detection_boxes_predict_out, + raw_detection_boxes_postprocess_out) = self.execute_cpu(graph_fn, [], + graph=g) + self.assertAllEqual(raw_detection_boxes_predict_out, + raw_detection_boxes_postprocess_out) + + def test_postprocess_results_are_correct(self): + + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model() + + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + preprocessed_inputs, true_image_shapes = model.preprocess( + image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + return [ + batch, detections['detection_boxes'], detections['detection_scores'], + detections['detection_classes'], + detections['detection_multiclass_scores'], + detections['num_detections'], detections['raw_detection_boxes'], + detections['raw_detection_scores'], + detections['detection_anchor_indices'] + ] + + expected_boxes = [ + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0], # pruned prediction + [0, 0, 0, 0] + ], # padding + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0], # pruned prediction + [0, 0, 0, 0] + ] + ] # padding + expected_scores = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] + expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]] + + expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] + expected_num_detections = np.array([3, 3]) + + expected_raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]], + [[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.], + [0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]] + expected_raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0], [0, 0]]] + expected_detection_anchor_indices = [[0, 1, 2], [0, 1, 2]] + (batch, detection_boxes, detection_scores, detection_classes, + detection_multiclass_scores, num_detections, raw_detection_boxes, + raw_detection_scores, detection_anchor_indices) = self.execute_cpu( + graph_fn, [], graph=g) + for image_idx in range(batch): + self.assertTrue( + test_utils.first_rows_close_as_set( + detection_boxes[image_idx].tolist(), expected_boxes[image_idx])) + self.assertSameElements(detection_anchor_indices[image_idx], + expected_detection_anchor_indices[image_idx]) + self.assertAllClose(detection_scores, expected_scores) + self.assertAllClose(detection_classes, expected_classes) + self.assertAllClose(detection_multiclass_scores, expected_multiclass_scores) + self.assertAllClose(num_detections, expected_num_detections) + self.assertAllEqual(raw_detection_boxes, expected_raw_detection_boxes) + self.assertAllEqual(raw_detection_scores, + expected_raw_detection_scores) + + def test_postprocess_results_are_correct_static(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model(use_static_shapes=True, + nms_max_size_per_class=4) + + def graph_fn(input_image): + preprocessed_inputs, true_image_shapes = model.preprocess(input_image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + return (detections['detection_boxes'], detections['detection_scores'], + detections['detection_classes'], detections['num_detections'], + detections['detection_multiclass_scores']) + + expected_boxes = [ + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0] + ], # padding + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [0, 0, 0, 0] + ] + ] # padding + expected_scores = [[0, 0, 0, 0], [0, 0, 0, 0]] + expected_multiclass_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0], [0, 0]]] + expected_classes = [[0, 0, 0, 0], [0, 0, 0, 0]] + expected_num_detections = np.array([3, 3]) + batch_size = 2 + image_size = 2 + channels = 3 + input_image = np.random.rand(batch_size, image_size, image_size, + channels).astype(np.float32) + (detection_boxes, detection_scores, detection_classes, + num_detections, detection_multiclass_scores) = self.execute(graph_fn, + [input_image], + graph=g) + for image_idx in range(batch_size): + self.assertTrue(test_utils.first_rows_close_as_set( + detection_boxes[image_idx][ + 0:expected_num_detections[image_idx]].tolist(), + expected_boxes[image_idx][0:expected_num_detections[image_idx]])) + self.assertAllClose( + detection_scores[image_idx][0:expected_num_detections[image_idx]], + expected_scores[image_idx][0:expected_num_detections[image_idx]]) + self.assertAllClose( + detection_multiclass_scores[image_idx] + [0:expected_num_detections[image_idx]], + expected_multiclass_scores[image_idx] + [0:expected_num_detections[image_idx]]) + self.assertAllClose( + detection_classes[image_idx][0:expected_num_detections[image_idx]], + expected_classes[image_idx][0:expected_num_detections[image_idx]]) + self.assertAllClose(num_detections, + expected_num_detections) + + def test_postprocess_results_are_correct_with_calibration(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model(calibration_mapping_value=0.5) + + def graph_fn(): + size = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + batch = tf.random.uniform([], minval=2, maxval=3, dtype=tf.int32) + shape = tf.stack([batch, size, size, 3]) + image = tf.random.uniform(shape) + preprocessed_inputs, true_image_shapes = model.preprocess( + image) + prediction_dict = model.predict(preprocessed_inputs, + true_image_shapes) + detections = model.postprocess(prediction_dict, true_image_shapes) + return detections['detection_scores'], detections['raw_detection_scores'] + # Calibration mapping value below is set to map all scores to 0.5, except + # for the last two detections in each batch (see expected number of + # detections below. + expected_scores = [[0.5, 0.5, 0.5, 0., 0.], [0.5, 0.5, 0.5, 0., 0.]] + expected_raw_detection_scores = [ + [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], + [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] + ] + detection_scores, raw_detection_scores = self.execute_cpu(graph_fn, [], + graph=g) + self.assertAllClose(detection_scores, expected_scores) + self.assertAllEqual(raw_detection_scores, expected_raw_detection_scores) + + def test_loss_results_are_correct(self): + + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model( + apply_hard_mining=False) + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + (localization_loss, classification_loss) = self.execute( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], + graph=g) + + expected_localization_loss = 0.0 + expected_classification_loss = (batch_size * num_anchors + * (num_classes+1) * np.log(2.0)) + + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + def test_loss_results_are_correct_with_normalize_by_codesize_true(self): + with test_utils.GraphContextOrNone() as g: + model, _, _, _ = self._create_model( + apply_hard_mining=False, normalize_loc_loss_by_codesize=True) + + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'),) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.5 / 4 + localization_loss = self.execute(graph_fn, [preprocessed_input, + groundtruth_boxes1, + groundtruth_boxes2, + groundtruth_classes1, + groundtruth_classes2], graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + + def test_loss_results_are_correct_with_hard_example_mining(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model() + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + expected_classification_loss = (batch_size * num_anchors + * (num_classes+1) * np.log(2.0)) + (localization_loss, classification_loss) = self.execute_cpu( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + def test_loss_results_are_correct_without_add_background_class(self): + + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model( + apply_hard_mining=False, add_background_class=False) + + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict( + preprocessed_tensor, true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (loss_dict['Loss/localization_loss'], + loss_dict['Loss/classification_loss']) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + expected_classification_loss = ( + batch_size * num_anchors * num_classes * np.log(2.0)) + (localization_loss, classification_loss) = self.execute( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], graph=g) + + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + + def test_loss_results_are_correct_with_losses_mask(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, num_anchors, _ = self._create_model( + apply_hard_mining=False) + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2, + groundtruth_classes3): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2, + groundtruth_boxes3] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2, + groundtruth_classes3] + is_annotated_list = [tf.constant(True), tf.constant(True), + tf.constant(False)] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list, + is_annotated_list=is_annotated_list) + prediction_dict = model.predict(preprocessed_tensor, + true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + + batch_size = 3 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes3 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + groundtruth_classes3 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + # Note that we are subtracting 1 from batch_size, since the final image is + # not annotated. + expected_classification_loss = ((batch_size - 1) * num_anchors + * (num_classes+1) * np.log(2.0)) + (localization_loss, + classification_loss) = self.execute(graph_fn, [preprocessed_input, + groundtruth_boxes1, + groundtruth_boxes2, + groundtruth_boxes3, + groundtruth_classes1, + groundtruth_classes2, + groundtruth_classes3], + graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + def test_restore_map_for_detection_ckpt(self): + # TODO(rathodv): Support TF2.X + if self.is_tf2(): return + model, _, _, _ = self._create_model() + model.predict(tf.constant(np.array([[[[0, 0], [1, 1]], [[1, 0], [0, 1]]]], + dtype=np.float32)), + true_image_shapes=None) + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.session() as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + var_map = model.restore_map( + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=False) + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + saver.restore(sess, saved_model_path) + for var in sess.run(tf.report_uninitialized_variables()): + self.assertNotIn('FeatureExtractor', var) + + def test_restore_map_for_classification_ckpt(self): + # TODO(rathodv): Support TF2.X + if self.is_tf2(): return + # Define mock tensorflow classification graph and save variables. + test_graph_classification = tf.Graph() + with test_graph_classification.as_default(): + image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3]) + + with tf.variable_scope('mock_model'): + net = slim.conv2d(image, num_outputs=32, kernel_size=1, scope='layer1') + slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2') + + init_op = tf.global_variables_initializer() + saver = tf.train.Saver() + save_path = self.get_temp_dir() + with self.session(graph=test_graph_classification) as sess: + sess.run(init_op) + saved_model_path = saver.save(sess, save_path) + + # Create tensorflow detection graph and load variables from + # classification checkpoint. + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model, _, _, _ = self._create_model() + inputs_shape = [2, 2, 2, 3] + inputs = tf.cast(tf.random_uniform( + inputs_shape, minval=0, maxval=255, dtype=tf.int32), dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model.restore_map(fine_tune_checkpoint_type='classification') + self.assertNotIn('another_variable', var_map) + self.assertIsInstance(var_map, dict) + saver = tf.train.Saver(var_map) + with self.session(graph=test_graph_detection) as sess: + saver.restore(sess, saved_model_path) + for var in sess.run(tf.report_uninitialized_variables()): + self.assertNotIn(six.ensure_binary('FeatureExtractor'), var) + + def test_load_all_det_checkpoint_vars(self): + # TODO(rathodv): Support TF2.X + if self.is_tf2(): return + test_graph_detection = tf.Graph() + with test_graph_detection.as_default(): + model, _, _, _ = self._create_model() + inputs_shape = [2, 2, 2, 3] + inputs = tf.cast( + tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32), + dtype=tf.float32) + preprocessed_inputs, true_image_shapes = model.preprocess(inputs) + prediction_dict = model.predict(preprocessed_inputs, true_image_shapes) + model.postprocess(prediction_dict, true_image_shapes) + another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable + var_map = model.restore_map( + fine_tune_checkpoint_type='detection', + load_all_detection_checkpoint_vars=True) + self.assertIsInstance(var_map, dict) + self.assertIn('another_variable', var_map) + + def test_loss_results_are_correct_with_random_example_sampling(self): + with test_utils.GraphContextOrNone() as g: + model, num_classes, _, _ = self._create_model( + random_example_sampling=True) + + def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2): + groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2] + groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2] + model.provide_groundtruth(groundtruth_boxes_list, + groundtruth_classes_list) + prediction_dict = model.predict( + preprocessed_tensor, true_image_shapes=None) + loss_dict = model.loss(prediction_dict, true_image_shapes=None) + return (self._get_value_for_matching_key(loss_dict, + 'Loss/localization_loss'), + self._get_value_for_matching_key(loss_dict, + 'Loss/classification_loss')) + + batch_size = 2 + preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32) + groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32) + groundtruth_classes1 = np.array([[1]], dtype=np.float32) + groundtruth_classes2 = np.array([[1]], dtype=np.float32) + expected_localization_loss = 0.0 + # Among 4 anchors (1 positive, 3 negative) in this test, only 2 anchors are + # selected (1 positive, 1 negative) since random sampler will adjust number + # of negative examples to make sure positive example fraction in the batch + # is 0.5. + expected_classification_loss = ( + batch_size * 2 * (num_classes + 1) * np.log(2.0)) + (localization_loss, classification_loss) = self.execute_cpu( + graph_fn, [ + preprocessed_input, groundtruth_boxes1, groundtruth_boxes2, + groundtruth_classes1, groundtruth_classes2 + ], graph=g) + self.assertAllClose(localization_loss, expected_localization_loss) + self.assertAllClose(classification_loss, expected_classification_loss) + + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch_test_lib.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch_test_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..0991388b31ac6a5974c9297e50b9630b3966e489 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/meta_architectures/ssd_meta_arch_test_lib.py @@ -0,0 +1,259 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for SSD models meta architecture tests.""" + +import functools +import tensorflow.compat.v1 as tf +from google.protobuf import text_format + +from object_detection.builders import post_processing_builder +from object_detection.core import anchor_generator +from object_detection.core import balanced_positive_negative_sampler as sampler +from object_detection.core import box_list +from object_detection.core import losses +from object_detection.core import post_processing +from object_detection.core import region_similarity_calculator as sim_calc +from object_detection.core import target_assigner +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.protos import calibration_pb2 +from object_detection.protos import model_pb2 +from object_detection.utils import ops +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +# pylint: disable=g-import-not-at-top +try: + import tf_slim as slim +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +keras = tf.keras.layers + + +class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """Fake ssd feature extracture for ssd meta arch tests.""" + + def __init__(self): + super(FakeSSDFeatureExtractor, self).__init__( + is_training=True, + depth_multiplier=0, + min_depth=0, + pad_to_multiple=1, + conv_hyperparams_fn=None) + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def extract_features(self, preprocessed_inputs): + with tf.variable_scope('mock_model'): + features = slim.conv2d( + inputs=preprocessed_inputs, + num_outputs=32, + kernel_size=1, + scope='layer1') + return [features] + + +class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor): + """Fake keras based ssd feature extracture for ssd meta arch tests.""" + + def __init__(self): + with tf.name_scope('mock_model'): + super(FakeSSDKerasFeatureExtractor, self).__init__( + is_training=True, + depth_multiplier=0, + min_depth=0, + pad_to_multiple=1, + conv_hyperparams=None, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + ) + + self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1') + + def preprocess(self, resized_inputs): + return tf.identity(resized_inputs) + + def _extract_features(self, preprocessed_inputs, **kwargs): + with tf.name_scope('mock_model'): + return [self._conv(preprocessed_inputs)] + + +class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator): + """A simple 2x2 anchor grid on the unit square used for test only.""" + + def name_scope(self): + return 'MockAnchorGenerator' + + def num_anchors_per_location(self): + return [1] + + def _generate(self, feature_map_shape_list, im_height, im_width): + return [ + box_list.BoxList( + tf.constant( + [ + [0, 0, .5, .5], + [0, .5, .5, 1], + [.5, 0, 1, .5], + [1., 1., 1.5, 1.5] # Anchor that is outside clip_window. + ], + tf.float32)) + ] + + def num_anchors(self): + return 4 + + +class SSDMetaArchTestBase(test_case.TestCase): + """Base class to test SSD based meta architectures.""" + + def _create_model( + self, + model_fn=ssd_meta_arch.SSDMetaArch, + apply_hard_mining=True, + normalize_loc_loss_by_codesize=False, + add_background_class=True, + random_example_sampling=False, + expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE, + min_num_negative_samples=1, + desired_negative_sampling_ratio=3, + predict_mask=False, + use_static_shapes=False, + nms_max_size_per_class=5, + calibration_mapping_value=None, + return_raw_detections_during_predict=False): + is_training = False + num_classes = 1 + mock_anchor_generator = MockAnchorGenerator2x2() + use_keras = tf_version.is_tf2() + if use_keras: + mock_box_predictor = test_utils.MockKerasBoxPredictor( + is_training, num_classes, add_background_class=add_background_class) + else: + mock_box_predictor = test_utils.MockBoxPredictor( + is_training, num_classes, add_background_class=add_background_class) + mock_box_coder = test_utils.MockBoxCoder() + if use_keras: + fake_feature_extractor = FakeSSDKerasFeatureExtractor() + else: + fake_feature_extractor = FakeSSDFeatureExtractor() + mock_matcher = test_utils.MockMatcher() + region_similarity_calculator = sim_calc.IouSimilarity() + encode_background_as_zeros = False + + def image_resizer_fn(image): + return [tf.identity(image), tf.shape(image)] + + classification_loss = losses.WeightedSigmoidClassificationLoss() + localization_loss = losses.WeightedSmoothL1LocalizationLoss() + non_max_suppression_fn = functools.partial( + post_processing.batch_multiclass_non_max_suppression, + score_thresh=-20.0, + iou_thresh=1.0, + max_size_per_class=nms_max_size_per_class, + max_total_size=nms_max_size_per_class, + use_static_shapes=use_static_shapes) + score_conversion_fn = tf.identity + calibration_config = calibration_pb2.CalibrationConfig() + if calibration_mapping_value: + calibration_text_proto = """ + function_approximation { + x_y_pairs { + x_y_pair { + x: 0.0 + y: %f + } + x_y_pair { + x: 1.0 + y: %f + }}}""" % (calibration_mapping_value, calibration_mapping_value) + text_format.Merge(calibration_text_proto, calibration_config) + score_conversion_fn = ( + post_processing_builder._build_calibrated_score_converter( # pylint: disable=protected-access + tf.identity, calibration_config)) + classification_loss_weight = 1.0 + localization_loss_weight = 1.0 + negative_class_weight = 1.0 + normalize_loss_by_num_matches = False + + hard_example_miner = None + if apply_hard_mining: + # This hard example miner is expected to be a no-op. + hard_example_miner = losses.HardExampleMiner( + num_hard_examples=None, iou_threshold=1.0) + + random_example_sampler = None + if random_example_sampling: + random_example_sampler = sampler.BalancedPositiveNegativeSampler( + positive_fraction=0.5) + + target_assigner_instance = target_assigner.TargetAssigner( + region_similarity_calculator, + mock_matcher, + mock_box_coder, + negative_class_weight=negative_class_weight) + + model_config = model_pb2.DetectionModel() + if expected_loss_weights == model_config.ssd.loss.NONE: + expected_loss_weights_fn = None + else: + raise ValueError('Not a valid value for expected_loss_weights.') + + code_size = 4 + + kwargs = {} + if predict_mask: + kwargs.update({ + 'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict, + }) + + model = model_fn( + is_training=is_training, + anchor_generator=mock_anchor_generator, + box_predictor=mock_box_predictor, + box_coder=mock_box_coder, + feature_extractor=fake_feature_extractor, + encode_background_as_zeros=encode_background_as_zeros, + image_resizer_fn=image_resizer_fn, + non_max_suppression_fn=non_max_suppression_fn, + score_conversion_fn=score_conversion_fn, + classification_loss=classification_loss, + localization_loss=localization_loss, + classification_loss_weight=classification_loss_weight, + localization_loss_weight=localization_loss_weight, + normalize_loss_by_num_matches=normalize_loss_by_num_matches, + hard_example_miner=hard_example_miner, + target_assigner_instance=target_assigner_instance, + add_summaries=False, + normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, + freeze_batchnorm=False, + inplace_batchnorm_update=False, + add_background_class=add_background_class, + random_example_sampler=random_example_sampler, + expected_loss_weights_fn=expected_loss_weights_fn, + return_raw_detections_during_predict=( + return_raw_detections_during_predict), + **kwargs) + return model, num_classes, mock_anchor_generator.num_anchors(), code_size + + def _get_value_for_matching_key(self, dictionary, suffix): + for key in dictionary.keys(): + if key.endswith(suffix): + return dictionary[key] + raise ValueError('key not found {}'.format(suffix)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b7929c5c2e9f59edc385a980c5e64fb07ae7509 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/coco_evaluation.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/coco_evaluation.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1e9264616af25dbdce7514ab1b8dff1c8276be8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/coco_evaluation.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/coco_tools.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/coco_tools.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..521ce0d15c81714b42375972383ef11b4b2c5fc3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/coco_tools.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/lvis_evaluation.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/lvis_evaluation.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de8c80e917e2bd7ec5a7230f0e64545e311ef59e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/__pycache__/lvis_evaluation.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_evaluation.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..e3fc4b05639b94d7425a8811ef92e1878c13f4f8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_evaluation.py @@ -0,0 +1,228 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Class for evaluating object detections with calibration metrics.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.box_coders import mean_stddev_box_coder +from object_detection.core import box_list +from object_detection.core import region_similarity_calculator +from object_detection.core import standard_fields +from object_detection.core import target_assigner +from object_detection.matchers import argmax_matcher +from object_detection.metrics import calibration_metrics +from object_detection.utils import object_detection_evaluation + + +# TODO(zbeaver): Implement metrics per category. +class CalibrationDetectionEvaluator( + object_detection_evaluation.DetectionEvaluator): + """Class to evaluate calibration detection metrics.""" + + def __init__(self, + categories, + iou_threshold=0.5): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + iou_threshold: Threshold above which to consider a box as matched during + evaluation. + """ + super(CalibrationDetectionEvaluator, self).__init__(categories) + + # Constructing target_assigner to match detections to groundtruth. + similarity_calc = region_similarity_calculator.IouSimilarity() + matcher = argmax_matcher.ArgMaxMatcher( + matched_threshold=iou_threshold, unmatched_threshold=iou_threshold) + box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) + self._target_assigner = target_assigner.TargetAssigner( + similarity_calc, matcher, box_coder) + + def match_single_image_info(self, image_info): + """Match detections to groundtruth for a single image. + + Detections are matched to available groundtruth in the image based on the + IOU threshold from the constructor. The classes of the detections and + groundtruth matches are then compared. Detections that do not have IOU above + the required threshold or have different classes from their match are + considered negative matches. All inputs in `image_info` originate or are + inferred from the eval_dict passed to class method + `get_estimator_eval_metric_ops`. + + Args: + image_info: a tuple or list containing the following (in order): + - gt_boxes: tf.float32 tensor of groundtruth boxes. + - gt_classes: tf.int64 tensor of groundtruth classes associated with + groundtruth boxes. + - num_gt_box: scalar indicating the number of groundtruth boxes per + image. + - det_boxes: tf.float32 tensor of detection boxes. + - det_classes: tf.int64 tensor of detection classes associated with + detection boxes. + - num_det_box: scalar indicating the number of detection boxes per + image. + Returns: + is_class_matched: tf.int64 tensor identical in shape to det_boxes, + indicating whether detection boxes matched with and had the same + class as groundtruth annotations. + """ + (gt_boxes, gt_classes, num_gt_box, det_boxes, det_classes, + num_det_box) = image_info + detection_boxes = det_boxes[:num_det_box] + detection_classes = det_classes[:num_det_box] + groundtruth_boxes = gt_boxes[:num_gt_box] + groundtruth_classes = gt_classes[:num_gt_box] + det_boxlist = box_list.BoxList(detection_boxes) + gt_boxlist = box_list.BoxList(groundtruth_boxes) + + # Target assigner requires classes in one-hot format. An additional + # dimension is required since gt_classes are 1-indexed; the zero index is + # provided to all non-matches. + one_hot_depth = tf.cast(tf.add(tf.reduce_max(groundtruth_classes), 1), + dtype=tf.int32) + gt_classes_one_hot = tf.one_hot( + groundtruth_classes, one_hot_depth, dtype=tf.float32) + one_hot_cls_targets, _, _, _, _ = self._target_assigner.assign( + det_boxlist, + gt_boxlist, + gt_classes_one_hot, + unmatched_class_label=tf.zeros(shape=one_hot_depth, dtype=tf.float32)) + # Transform from one-hot back to indexes. + cls_targets = tf.argmax(one_hot_cls_targets, axis=1) + is_class_matched = tf.cast( + tf.equal(tf.cast(cls_targets, tf.int64), detection_classes), + dtype=tf.int64) + return is_class_matched + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + This function can take in groundtruth and detections for a batch of images, + or for a single image. For the latter case, the batch dimension for input + tensors need not be present. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + detection_boxes = eval_dict[detection_fields.detection_boxes] + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + num_gt_boxes_per_image = eval_dict.get( + 'num_groundtruth_boxes_per_image', None) + num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) + is_annotated_batched = eval_dict.get('is_annotated', None) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + detection_boxes = tf.expand_dims(detection_boxes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_boxes)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + + if is_annotated_batched is None: + is_annotated_batched = tf.constant([True]) + else: + is_annotated_batched = tf.expand_dims(is_annotated_batched, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_boxes)[1:2], + multiples=tf.shape(detection_boxes)[0:1]) + if is_annotated_batched is None: + is_annotated_batched = tf.ones_like(image_id, dtype=tf.bool) + + # Filter images based on is_annotated_batched and match detections. + image_info = [tf.boolean_mask(tensor, is_annotated_batched) for tensor in + [groundtruth_boxes, groundtruth_classes, + num_gt_boxes_per_image, detection_boxes, detection_classes, + num_det_boxes_per_image]] + is_class_matched = tf.map_fn( + self.match_single_image_info, image_info, dtype=tf.int64) + y_true = tf.squeeze(is_class_matched) + y_pred = tf.squeeze(tf.boolean_mask(detection_scores, is_annotated_batched)) + ece, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred) + return {'CalibrationError/ExpectedCalibrationError': (ece, update_op)} + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary of groundtruth numpy arrays required + for evaluations. + """ + raise NotImplementedError + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary of detection numpy arrays required for + evaluation. + """ + raise NotImplementedError + + def evaluate(self): + """Evaluates detections and returns a dictionary of metrics.""" + raise NotImplementedError + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + raise NotImplementedError diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_evaluation_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_evaluation_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0f3d6eb319f0819937c04e030c9e1937bf09db10 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_evaluation_tf1_test.py @@ -0,0 +1,203 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.object_detection.metrics.calibration_evaluation.""" # pylint: disable=line-too-long + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields +from object_detection.metrics import calibration_evaluation +from object_detection.utils import tf_version + + +def _get_categories_list(): + return [{ + 'id': 1, + 'name': 'person' + }, { + 'id': 2, + 'name': 'dog' + }, { + 'id': 3, + 'name': 'cat' + }] + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class CalibrationDetectionEvaluationTest(tf.test.TestCase): + + def _get_ece(self, ece_op, update_op): + """Return scalar expected calibration error.""" + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + _ = sess.run(update_op) + return sess.run(ece_op) + + def testGetECEWithMatchingGroundtruthAndDetections(self): + """Tests that ECE is calculated correctly when box matches exist.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # All gt and detection boxes match. + base_eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1], [2], [3]], dtype=tf.int64), + # Note that, in the zero ECE case, the detection class for image_2 + # should NOT match groundtruth, since the detection score is zero. + detection_fields.detection_scores: + tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32) + } + + # Zero ECE (perfectly calibrated). + zero_ece_eval_dict = base_eval_dict.copy() + zero_ece_eval_dict[detection_fields.detection_classes] = tf.constant( + [[1], [1], [3]], dtype=tf.int64) + zero_ece_op, zero_ece_update_op = ( + calibration_evaluator.get_estimator_eval_metric_ops(zero_ece_eval_dict) + ['CalibrationError/ExpectedCalibrationError']) + zero_ece = self._get_ece(zero_ece_op, zero_ece_update_op) + self.assertAlmostEqual(zero_ece, 0.0) + + # ECE of 1 (poorest calibration). + one_ece_eval_dict = base_eval_dict.copy() + one_ece_eval_dict[detection_fields.detection_classes] = tf.constant( + [[3], [2], [1]], dtype=tf.int64) + one_ece_op, one_ece_update_op = ( + calibration_evaluator.get_estimator_eval_metric_ops(one_ece_eval_dict) + ['CalibrationError/ExpectedCalibrationError']) + one_ece = self._get_ece(one_ece_op, one_ece_update_op) + self.assertAlmostEqual(one_ece, 1.0) + + def testGetECEWithUnmatchedGroundtruthAndDetections(self): + """Tests that ECE is correctly calculated when boxes are unmatched.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # No gt and detection boxes match. + eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[50., 50., 100., 100.]], + [[25., 25., 50., 50.]], + [[100., 100., 200., 200.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1], [2], [3]], dtype=tf.int64), + detection_fields.detection_classes: + tf.constant([[1], [1], [3]], dtype=tf.int64), + # Detection scores of zero when boxes are unmatched = ECE of zero. + detection_fields.detection_scores: + tf.constant([[0.0], [0.0], [0.0]], dtype=tf.float32) + } + + ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( + eval_dict)['CalibrationError/ExpectedCalibrationError'] + ece = self._get_ece(ece_op, update_op) + self.assertAlmostEqual(ece, 0.0) + + def testGetECEWithBatchedDetections(self): + """Tests that ECE is correct with multiple detections per image.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # Note that image_2 has mismatched classes and detection scores but should + # still produce ECE of 0 because detection scores are also 0. + eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]], + [[50., 50., 100., 100.], [100., 100., 200., 200.]], + [[25., 25., 50., 50.], [100., 100., 200., 200.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[100., 100., 200., 200.], [50., 50., 100., 100.]], + [[50., 50., 100., 100.], [25., 25., 50., 50.]], + [[25., 25., 50., 50.], [100., 100., 200., 200.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1, 2], [2, 3], [3, 1]], dtype=tf.int64), + detection_fields.detection_classes: + tf.constant([[1, 2], [1, 1], [3, 1]], dtype=tf.int64), + detection_fields.detection_scores: + tf.constant([[1.0, 1.0], [0.0, 0.0], [1.0, 1.0]], dtype=tf.float32) + } + + ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( + eval_dict)['CalibrationError/ExpectedCalibrationError'] + ece = self._get_ece(ece_op, update_op) + self.assertAlmostEqual(ece, 0.0) + + def testGetECEWhenImagesFilteredByIsAnnotated(self): + """Tests that ECE is correct when detections filtered by is_annotated.""" + calibration_evaluator = calibration_evaluation.CalibrationDetectionEvaluator( + _get_categories_list(), iou_threshold=0.5) + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + # ECE will be 0 only if the third image is filtered by is_annotated. + eval_dict = { + input_data_fields.key: + tf.constant(['image_1', 'image_2', 'image_3']), + input_data_fields.groundtruth_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + detection_fields.detection_boxes: + tf.constant([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]], + dtype=tf.float32), + input_data_fields.groundtruth_classes: + tf.constant([[1], [2], [1]], dtype=tf.int64), + detection_fields.detection_classes: + tf.constant([[1], [1], [3]], dtype=tf.int64), + detection_fields.detection_scores: + tf.constant([[1.0], [0.0], [1.0]], dtype=tf.float32), + 'is_annotated': tf.constant([True, True, False], dtype=tf.bool) + } + + ece_op, update_op = calibration_evaluator.get_estimator_eval_metric_ops( + eval_dict)['CalibrationError/ExpectedCalibrationError'] + ece = self._get_ece(ece_op, update_op) + self.assertAlmostEqual(ece, 0.0) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_metrics.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..611c81c3381604923af7831fb0ab030d56617ebb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_metrics.py @@ -0,0 +1,118 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Object detection calibration metrics. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +from tensorflow.python.ops import metrics_impl + + +def _safe_div(numerator, denominator): + """Divides two tensors element-wise, returning 0 if the denominator is <= 0. + + Args: + numerator: A real `Tensor`. + denominator: A real `Tensor`, with dtype matching `numerator`. + + Returns: + 0 if `denominator` <= 0, else `numerator` / `denominator` + """ + t = tf.truediv(numerator, denominator) + zero = tf.zeros_like(t, dtype=denominator.dtype) + condition = tf.greater(denominator, zero) + zero = tf.cast(zero, t.dtype) + return tf.where(condition, t, zero) + + +def _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name): + """Calculates Expected Calibration Error from accumulated statistics.""" + bin_accuracies = _safe_div(bin_true_sum, bin_counts) + bin_confidences = _safe_div(bin_preds_sum, bin_counts) + abs_bin_errors = tf.abs(bin_accuracies - bin_confidences) + bin_weights = _safe_div(bin_counts, tf.reduce_sum(bin_counts)) + return tf.reduce_sum(abs_bin_errors * bin_weights, name=name) + + +def expected_calibration_error(y_true, y_pred, nbins=20): + """Calculates Expected Calibration Error (ECE). + + ECE is a scalar summary statistic of calibration error. It is the + sample-weighted average of the difference between the predicted and true + probabilities of a positive detection across uniformly-spaced model + confidences [0, 1]. See referenced paper for a thorough explanation. + + Reference: + Guo, et. al, "On Calibration of Modern Neural Networks" + Page 2, Expected Calibration Error (ECE). + https://arxiv.org/pdf/1706.04599.pdf + + This function creates three local variables, `bin_counts`, `bin_true_sum`, and + `bin_preds_sum` that are used to compute ECE. For estimation of the metric + over a stream of data, the function creates an `update_op` operation that + updates these variables and returns the ECE. + + Args: + y_true: 1-D tf.int64 Tensor of binarized ground truth, corresponding to each + prediction in y_pred. + y_pred: 1-D tf.float32 tensor of model confidence scores in range + [0.0, 1.0]. + nbins: int specifying the number of uniformly-spaced bins into which y_pred + will be bucketed. + + Returns: + value_op: A value metric op that returns ece. + update_op: An operation that increments the `bin_counts`, `bin_true_sum`, + and `bin_preds_sum` variables appropriately and whose value matches `ece`. + + Raises: + InvalidArgumentError: if y_pred is not in [0.0, 1.0]. + """ + bin_counts = metrics_impl.metric_variable( + [nbins], tf.float32, name='bin_counts') + bin_true_sum = metrics_impl.metric_variable( + [nbins], tf.float32, name='true_sum') + bin_preds_sum = metrics_impl.metric_variable( + [nbins], tf.float32, name='preds_sum') + + with tf.control_dependencies([ + tf.assert_greater_equal(y_pred, 0.0), + tf.assert_less_equal(y_pred, 1.0), + ]): + bin_ids = tf.histogram_fixed_width_bins(y_pred, [0.0, 1.0], nbins=nbins) + + with tf.control_dependencies([bin_ids]): + update_bin_counts_op = tf.assign_add( + bin_counts, tf.cast(tf.bincount(bin_ids, minlength=nbins), + dtype=tf.float32)) + update_bin_true_sum_op = tf.assign_add( + bin_true_sum, + tf.cast(tf.bincount(bin_ids, weights=y_true, minlength=nbins), + dtype=tf.float32)) + update_bin_preds_sum_op = tf.assign_add( + bin_preds_sum, + tf.cast(tf.bincount(bin_ids, weights=y_pred, minlength=nbins), + dtype=tf.float32)) + + ece_update_op = _ece_from_bins( + update_bin_counts_op, + update_bin_true_sum_op, + update_bin_preds_sum_op, + name='update_op') + ece = _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name='value') + return ece, ece_update_op diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_metrics_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_metrics_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1adbca20dfae80e97927d462c9cc18de6ff823 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/calibration_metrics_tf1_test.py @@ -0,0 +1,112 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for calibration_metrics.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.metrics import calibration_metrics +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class CalibrationLibTest(tf.test.TestCase): + + @staticmethod + def _get_calibration_placeholders(): + """Returns TF placeholders for y_true and y_pred.""" + return (tf.placeholder(tf.int64, shape=(None)), + tf.placeholder(tf.float32, shape=(None))) + + def test_expected_calibration_error_all_bins_filled(self): + """Test expected calibration error when all bins contain predictions.""" + y_true, y_pred = self._get_calibration_placeholders() + expected_ece_op, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred, nbins=2) + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + # Bin calibration errors (|confidence - accuracy| * bin_weight): + # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08 + # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1 + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0., 0.2, 0.4, 0.5, 1.0]), + y_true: np.array([0, 0, 1, 0, 1]) + }) + actual_ece = 0.08 + 0.1 + expected_ece = sess.run(expected_ece_op) + self.assertAlmostEqual(actual_ece, expected_ece) + + def test_expected_calibration_error_all_bins_not_filled(self): + """Test expected calibration error when no predictions for one bin.""" + y_true, y_pred = self._get_calibration_placeholders() + expected_ece_op, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred, nbins=2) + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + # Bin calibration errors (|confidence - accuracy| * bin_weight): + # - [0,0.5): |0.2 - 0.333| * (3/5) = 0.08 + # - [0.5, 1]: |0.75 - 0.5| * (2/5) = 0.1 + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0., 0.2, 0.4]), + y_true: np.array([0, 0, 1]) + }) + actual_ece = np.abs(0.2 - (1 / 3.)) + expected_ece = sess.run(expected_ece_op) + self.assertAlmostEqual(actual_ece, expected_ece) + + def test_expected_calibration_error_with_multiple_data_streams(self): + """Test expected calibration error when multiple data batches provided.""" + y_true, y_pred = self._get_calibration_placeholders() + expected_ece_op, update_op = calibration_metrics.expected_calibration_error( + y_true, y_pred, nbins=2) + with self.test_session() as sess: + metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES) + sess.run(tf.variables_initializer(var_list=metrics_vars)) + # Identical data to test_expected_calibration_error_all_bins_filled, + # except split over three batches. + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0., 0.2]), + y_true: np.array([0, 0]) + }) + sess.run( + update_op, + feed_dict={ + y_pred: np.array([0.4, 0.5]), + y_true: np.array([1, 0]) + }) + sess.run( + update_op, feed_dict={ + y_pred: np.array([1.0]), + y_true: np.array([1]) + }) + actual_ece = 0.08 + 0.1 + expected_ece = sess.run(expected_ece_op) + self.assertAlmostEqual(actual_ece, expected_ece) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_evaluation.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..9c5e3056eb5bbd42510d9b41cfcd5df3cbc00268 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_evaluation.py @@ -0,0 +1,1875 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Class for evaluating object detections with COCO metrics.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.metrics import coco_tools +from object_detection.utils import json_utils +from object_detection.utils import np_mask_ops +from object_detection.utils import object_detection_evaluation + + +class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator): + """Class to evaluate COCO detection metrics.""" + + def __init__(self, + categories, + include_metrics_per_category=False, + all_metrics_per_category=False, + skip_predictions_for_unlabeled_class=False, + super_categories=None): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + include_metrics_per_category: If True, include metrics for each category. + all_metrics_per_category: Whether to include all the summary metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + skip_predictions_for_unlabeled_class: Skip predictions that do not match + with the labeled classes for the image. + super_categories: None or a python dict mapping super-category names + (strings) to lists of categories (corresponding to category names + in the label_map). Metrics are aggregated along these super-categories + and added to the `per_category_ap` and are associated with the name + `PerformanceBySuperCategory/`. + """ + super(CocoDetectionEvaluator, self).__init__(categories) + # _image_ids is a dictionary that maps unique image ids to Booleans which + # indicate whether a corresponding detection has been added. + self._image_ids = {} + self._groundtruth_list = [] + self._detection_boxes_list = [] + self._category_id_set = set([cat['id'] for cat in self._categories]) + self._annotation_id = 1 + self._metrics = None + self._include_metrics_per_category = include_metrics_per_category + self._all_metrics_per_category = all_metrics_per_category + self._skip_predictions_for_unlabeled_class = skip_predictions_for_unlabeled_class + self._groundtruth_labeled_classes = {} + self._super_categories = super_categories + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._image_ids.clear() + self._groundtruth_list = [] + self._detection_boxes_list = [] + + def add_single_ground_truth_image_info(self, + image_id, + groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + InputDataFields.groundtruth_area (optional): float numpy array of + shape [num_boxes] containing the area (in the original absolute + coordinates) of the annotated object. + InputDataFields.groundtruth_keypoints (optional): float numpy array of + keypoints with shape [num_boxes, num_keypoints, 2]. + InputDataFields.groundtruth_keypoint_visibilities (optional): integer + numpy array of keypoint visibilities with shape [num_gt_boxes, + num_keypoints]. Integer is treated as an enum with 0=not labeled, + 1=labeled but not visible and 2=labeled and visible. + InputDataFields.groundtruth_labeled_classes (optional): a dictionary of + image_id to groundtruth_labeled_class, where groundtruth_labeled_class + is a 1-indexed integer numpy array indicating which classes have been + annotated over the image. + """ + if image_id in self._image_ids: + tf.logging.warning('Ignoring ground truth with image id %s since it was ' + 'previously added', image_id) + return + + # Drop optional fields if empty tensor. + groundtruth_is_crowd = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_is_crowd) + groundtruth_area = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_area) + groundtruth_keypoints = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_keypoints) + groundtruth_keypoint_visibilities = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_keypoint_visibilities) + if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]: + groundtruth_is_crowd = None + if groundtruth_area is not None and not groundtruth_area.shape[0]: + groundtruth_area = None + if groundtruth_keypoints is not None and not groundtruth_keypoints.shape[0]: + groundtruth_keypoints = None + if groundtruth_keypoint_visibilities is not None and not groundtruth_keypoint_visibilities.shape[ + 0]: + groundtruth_keypoint_visibilities = None + + self._groundtruth_list.extend( + coco_tools.ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self._annotation_id, + category_id_set=self._category_id_set, + groundtruth_boxes=groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_boxes], + groundtruth_classes=groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_classes], + groundtruth_is_crowd=groundtruth_is_crowd, + groundtruth_area=groundtruth_area, + groundtruth_keypoints=groundtruth_keypoints, + groundtruth_keypoint_visibilities=groundtruth_keypoint_visibilities) + ) + + self._annotation_id += groundtruth_dict[standard_fields.InputDataFields. + groundtruth_boxes].shape[0] + self._groundtruth_labeled_classes[image_id] = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_labeled_classes) + # Boolean to indicate whether a detection has been added for this image. + self._image_ids[image_id] = False + + def add_single_detected_image_info(self, + image_id, + detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` detection boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_keypoints (optional): float numpy array + of keypoints with shape [num_boxes, num_keypoints, 2]. + Raises: + ValueError: If groundtruth for the image_id is not available. + """ + if image_id not in self._image_ids: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + if self._image_ids[image_id]: + tf.logging.warning('Ignoring detection with image id %s since it was ' + 'previously added', image_id) + return + + # Drop optional fields if empty tensor. + detection_keypoints = detections_dict.get( + standard_fields.DetectionResultFields.detection_keypoints) + if detection_keypoints is not None and not detection_keypoints.shape[0]: + detection_keypoints = None + + if self._skip_predictions_for_unlabeled_class: + det_classes = detections_dict[ + standard_fields.DetectionResultFields.detection_classes] + num_det_boxes = det_classes.shape[0] + keep_box_ids = [] + for box_id in range(num_det_boxes): + if det_classes[box_id] in self._groundtruth_labeled_classes[image_id]: + keep_box_ids.append(box_id) + self._detection_boxes_list.extend( + coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id=image_id, + category_id_set=self._category_id_set, + detection_boxes=detections_dict[ + standard_fields.DetectionResultFields.detection_boxes] + [keep_box_ids], + detection_scores=detections_dict[ + standard_fields.DetectionResultFields.detection_scores] + [keep_box_ids], + detection_classes=detections_dict[ + standard_fields.DetectionResultFields.detection_classes] + [keep_box_ids], + detection_keypoints=detection_keypoints)) + else: + self._detection_boxes_list.extend( + coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id=image_id, + category_id_set=self._category_id_set, + detection_boxes=detections_dict[ + standard_fields.DetectionResultFields.detection_boxes], + detection_scores=detections_dict[ + standard_fields.DetectionResultFields.detection_scores], + detection_classes=detections_dict[ + standard_fields.DetectionResultFields.detection_classes], + detection_keypoints=detection_keypoints)) + self._image_ids[image_id] = True + + def dump_detections_to_json_file(self, json_output_path): + """Saves the detections into json_output_path in the format used by MS COCO. + + Args: + json_output_path: String containing the output file's path. It can be also + None. In that case nothing will be written to the output file. + """ + if json_output_path and json_output_path is not None: + with tf.gfile.GFile(json_output_path, 'w') as fid: + tf.logging.info('Dumping detections to output json file.') + json_utils.Dump( + obj=self._detection_boxes_list, fid=fid, float_digits=4, indent=2) + + def evaluate(self): + """Evaluates the detection boxes and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metrics: + 'DetectionBoxes_Precision/mAP': mean average precision over classes + averaged over IOU thresholds ranging from .5 to .95 with .05 + increments. + 'DetectionBoxes_Precision/mAP@.50IOU': mean average precision at 50% IOU + 'DetectionBoxes_Precision/mAP@.75IOU': mean average precision at 75% IOU + 'DetectionBoxes_Precision/mAP (small)': mean average precision for small + objects (area < 32^2 pixels). + 'DetectionBoxes_Precision/mAP (medium)': mean average precision for + medium sized objects (32^2 pixels < area < 96^2 pixels). + 'DetectionBoxes_Precision/mAP (large)': mean average precision for large + objects (96^2 pixels < area < 10000^2 pixels). + 'DetectionBoxes_Recall/AR@1': average recall with 1 detection. + 'DetectionBoxes_Recall/AR@10': average recall with 10 detections. + 'DetectionBoxes_Recall/AR@100': average recall with 100 detections. + 'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects + with 100. + 'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects + with 100. + 'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects + with 100 detections. + + 2. per_category_ap: if include_metrics_per_category is True, category + specific results with keys of the form: + 'Precision mAP ByCategory/category' (without the supercategory part if + no supercategories exist). For backward compatibility + 'PerformanceByCategory' is included in the output regardless of + all_metrics_per_category. + If super_categories are provided, then this will additionally include + metrics aggregated along the super_categories with keys of the form: + `PerformanceBySuperCategory/` + """ + tf.logging.info('Performing evaluation on %d images.', len(self._image_ids)) + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [{'id': image_id} for image_id in self._image_ids], + 'categories': self._categories + } + coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations( + self._detection_boxes_list) + box_evaluator = coco_tools.COCOEvalWrapper( + coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False) + box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( + include_metrics_per_category=self._include_metrics_per_category, + all_metrics_per_category=self._all_metrics_per_category, + super_categories=self._super_categories) + box_metrics.update(box_per_category_ap) + box_metrics = {'DetectionBoxes_'+ key: value + for key, value in iter(box_metrics.items())} + return box_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + + def update_op(image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_is_crowd_batched, + groundtruth_labeled_classes_batched, num_gt_boxes_per_image, + detection_boxes_batched, detection_scores_batched, + detection_classes_batched, num_det_boxes_per_image, + is_annotated_batched): + """Update operation for adding batch of images to Coco evaluator.""" + for (image_id, gt_box, gt_class, gt_is_crowd, gt_labeled_classes, + num_gt_box, det_box, det_score, det_class, + num_det_box, is_annotated) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_is_crowd_batched, + groundtruth_labeled_classes_batched, num_gt_boxes_per_image, + detection_boxes_batched, detection_scores_batched, + detection_classes_batched, num_det_boxes_per_image, + is_annotated_batched): + if is_annotated: + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_boxes': gt_box[:num_gt_box], + 'groundtruth_classes': gt_class[:num_gt_box], + 'groundtruth_is_crowd': gt_is_crowd[:num_gt_box], + 'groundtruth_labeled_classes': gt_labeled_classes + }) + self.add_single_detected_image_info( + image_id, + {'detection_boxes': det_box[:num_det_box], + 'detection_scores': det_score[:num_det_box], + 'detection_classes': det_class[:num_det_box]}) + + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_is_crowd = eval_dict.get( + input_data_fields.groundtruth_is_crowd, None) + groundtruth_labeled_classes = eval_dict.get( + input_data_fields.groundtruth_labeled_classes, None) + detection_boxes = eval_dict[detection_fields.detection_boxes] + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + num_gt_boxes_per_image = eval_dict.get( + 'num_groundtruth_boxes_per_image', None) + num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) + is_annotated = eval_dict.get('is_annotated', None) + + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + # If groundtruth_labeled_classes is not provided, make it equal to the + # detection_classes. This assumes that all predictions will be kept to + # compute eval metrics. + if groundtruth_labeled_classes is None: + groundtruth_labeled_classes = detection_classes + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + groundtruth_labeled_classes = tf.expand_dims(groundtruth_labeled_classes, + 0) + detection_boxes = tf.expand_dims(detection_boxes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_boxes)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + + if is_annotated is None: + is_annotated = tf.constant([True]) + else: + is_annotated = tf.expand_dims(is_annotated, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_boxes)[1:2], + multiples=tf.shape(detection_boxes)[0:1]) + if is_annotated is None: + is_annotated = tf.ones_like(image_id, dtype=tf.bool) + + return tf.py_func(update_op, [ + image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd, + groundtruth_labeled_classes, num_gt_boxes_per_image, detection_boxes, + detection_scores, detection_classes, num_det_boxes_per_image, + is_annotated + ], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + This function can take in groundtruth and detections for a batch of images, + or for a single image. For the latter case, the batch dimension for input + tensors need not be present. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + metric_names = ['DetectionBoxes_Precision/mAP', + 'DetectionBoxes_Precision/mAP@.50IOU', + 'DetectionBoxes_Precision/mAP@.75IOU', + 'DetectionBoxes_Precision/mAP (large)', + 'DetectionBoxes_Precision/mAP (medium)', + 'DetectionBoxes_Precision/mAP (small)', + 'DetectionBoxes_Recall/AR@1', + 'DetectionBoxes_Recall/AR@10', + 'DetectionBoxes_Recall/AR@100', + 'DetectionBoxes_Recall/AR@100 (large)', + 'DetectionBoxes_Recall/AR@100 (medium)', + 'DetectionBoxes_Recall/AR@100 (small)'] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' + + category_dict['name']) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + +def convert_masks_to_binary(masks): + """Converts masks to 0 or 1 and uint8 type.""" + return (masks > 0).astype(np.uint8) + + +class CocoKeypointEvaluator(CocoDetectionEvaluator): + """Class to evaluate COCO keypoint metrics.""" + + def __init__(self, + category_id, + category_keypoints, + class_text, + oks_sigmas=None): + """Constructor. + + Args: + category_id: An integer id uniquely identifying this category. + category_keypoints: A list specifying keypoint mappings, with items: + 'id': (required) an integer id identifying the keypoint. + 'name': (required) a string representing the keypoint name. + class_text: A string representing the category name for which keypoint + metrics are to be computed. + oks_sigmas: A dict of keypoint name to standard deviation values for OKS + metrics. If not provided, default value of 0.05 will be used. + """ + self._category_id = category_id + self._category_name = class_text + self._keypoint_ids = sorted( + [keypoint['id'] for keypoint in category_keypoints]) + kpt_id_to_name = {kpt['id']: kpt['name'] for kpt in category_keypoints} + if oks_sigmas: + self._oks_sigmas = np.array([ + oks_sigmas[kpt_id_to_name[idx]] for idx in self._keypoint_ids + ]) + else: + # Default all per-keypoint sigmas to 0. + self._oks_sigmas = np.full((len(self._keypoint_ids)), 0.05) + tf.logging.warning('No default keypoint OKS sigmas provided. Will use ' + '0.05') + tf.logging.info('Using the following keypoint OKS sigmas: {}'.format( + self._oks_sigmas)) + self._metrics = None + super(CocoKeypointEvaluator, self).__init__([{ + 'id': self._category_id, + 'name': class_text + }]) + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image with keypoints. + + If the image has already been added, a warning is logged, and groundtruth + is ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + InputDataFields.groundtruth_area (optional): float numpy array of + shape [num_boxes] containing the area (in the original absolute + coordinates) of the annotated object. + InputDataFields.groundtruth_keypoints: float numpy array of + keypoints with shape [num_boxes, num_keypoints, 2]. + InputDataFields.groundtruth_keypoint_visibilities (optional): integer + numpy array of keypoint visibilities with shape [num_gt_boxes, + num_keypoints]. Integer is treated as an enum with 0=not labels, + 1=labeled but not visible and 2=labeled and visible. + """ + + # Keep only the groundtruth for our category and its keypoints. + groundtruth_classes = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_classes] + groundtruth_boxes = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_boxes] + groundtruth_keypoints = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_keypoints] + class_indices = [ + idx for idx, gt_class_id in enumerate(groundtruth_classes) + if gt_class_id == self._category_id + ] + filtered_groundtruth_classes = np.take( + groundtruth_classes, class_indices, axis=0) + filtered_groundtruth_boxes = np.take( + groundtruth_boxes, class_indices, axis=0) + filtered_groundtruth_keypoints = np.take( + groundtruth_keypoints, class_indices, axis=0) + filtered_groundtruth_keypoints = np.take( + filtered_groundtruth_keypoints, self._keypoint_ids, axis=1) + + filtered_groundtruth_dict = {} + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_classes] = filtered_groundtruth_classes + filtered_groundtruth_dict[standard_fields.InputDataFields + .groundtruth_boxes] = filtered_groundtruth_boxes + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_keypoints] = filtered_groundtruth_keypoints + + if (standard_fields.InputDataFields.groundtruth_is_crowd in + groundtruth_dict.keys()): + groundtruth_is_crowd = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_is_crowd] + filtered_groundtruth_is_crowd = np.take(groundtruth_is_crowd, + class_indices, 0) + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_is_crowd] = filtered_groundtruth_is_crowd + if (standard_fields.InputDataFields.groundtruth_area in + groundtruth_dict.keys()): + groundtruth_area = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_area] + filtered_groundtruth_area = np.take(groundtruth_area, class_indices, 0) + filtered_groundtruth_dict[ + standard_fields.InputDataFields + .groundtruth_area] = filtered_groundtruth_area + if (standard_fields.InputDataFields.groundtruth_keypoint_visibilities in + groundtruth_dict.keys()): + groundtruth_keypoint_visibilities = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_keypoint_visibilities] + filtered_groundtruth_keypoint_visibilities = np.take( + groundtruth_keypoint_visibilities, class_indices, axis=0) + filtered_groundtruth_keypoint_visibilities = np.take( + filtered_groundtruth_keypoint_visibilities, + self._keypoint_ids, + axis=1) + filtered_groundtruth_dict[ + standard_fields.InputDataFields. + groundtruth_keypoint_visibilities] = filtered_groundtruth_keypoint_visibilities + + super(CocoKeypointEvaluator, + self).add_single_ground_truth_image_info(image_id, + filtered_groundtruth_dict) + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image and the specific category for which keypoints are evaluated. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` detection boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_keypoints: float numpy array of + keypoints with shape [num_boxes, num_keypoints, 2]. + + Raises: + ValueError: If groundtruth for the image_id is not available. + """ + + # Keep only the detections for our category and its keypoints. + detection_classes = detections_dict[ + standard_fields.DetectionResultFields.detection_classes] + detection_boxes = detections_dict[ + standard_fields.DetectionResultFields.detection_boxes] + detection_scores = detections_dict[ + standard_fields.DetectionResultFields.detection_scores] + detection_keypoints = detections_dict[ + standard_fields.DetectionResultFields.detection_keypoints] + class_indices = [ + idx for idx, class_id in enumerate(detection_classes) + if class_id == self._category_id + ] + filtered_detection_classes = np.take( + detection_classes, class_indices, axis=0) + filtered_detection_boxes = np.take(detection_boxes, class_indices, axis=0) + filtered_detection_scores = np.take(detection_scores, class_indices, axis=0) + filtered_detection_keypoints = np.take( + detection_keypoints, class_indices, axis=0) + filtered_detection_keypoints = np.take( + filtered_detection_keypoints, self._keypoint_ids, axis=1) + + filtered_detections_dict = {} + filtered_detections_dict[standard_fields.DetectionResultFields + .detection_classes] = filtered_detection_classes + filtered_detections_dict[standard_fields.DetectionResultFields + .detection_boxes] = filtered_detection_boxes + filtered_detections_dict[standard_fields.DetectionResultFields + .detection_scores] = filtered_detection_scores + filtered_detections_dict[standard_fields.DetectionResultFields. + detection_keypoints] = filtered_detection_keypoints + + super(CocoKeypointEvaluator, + self).add_single_detected_image_info(image_id, + filtered_detections_dict) + + def evaluate(self): + """Evaluates the keypoints and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metrics: + 'Keypoints_Precision/mAP': mean average precision over classes + averaged over OKS thresholds ranging from .5 to .95 with .05 + increments. + 'Keypoints_Precision/mAP@.50IOU': mean average precision at 50% OKS + 'Keypoints_Precision/mAP@.75IOU': mean average precision at 75% OKS + 'Keypoints_Precision/mAP (medium)': mean average precision for medium + sized objects (32^2 pixels < area < 96^2 pixels). + 'Keypoints_Precision/mAP (large)': mean average precision for large + objects (96^2 pixels < area < 10000^2 pixels). + 'Keypoints_Recall/AR@1': average recall with 1 detection. + 'Keypoints_Recall/AR@10': average recall with 10 detections. + 'Keypoints_Recall/AR@100': average recall with 100 detections. + 'Keypoints_Recall/AR@100 (medium)': average recall for medium objects with + 100. + 'Keypoints_Recall/AR@100 (large)': average recall for large objects with + 100 detections. + """ + tf.logging.info('Performing evaluation on %d images.', len(self._image_ids)) + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [{'id': image_id} for image_id in self._image_ids], + 'categories': self._categories + } + coco_wrapped_groundtruth = coco_tools.COCOWrapper( + groundtruth_dict, detection_type='bbox') + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations( + self._detection_boxes_list) + keypoint_evaluator = coco_tools.COCOEvalWrapper( + coco_wrapped_groundtruth, + coco_wrapped_detections, + agnostic_mode=False, + iou_type='keypoints', + oks_sigmas=self._oks_sigmas) + keypoint_metrics, _ = keypoint_evaluator.ComputeMetrics( + include_metrics_per_category=False, all_metrics_per_category=False) + keypoint_metrics = { + 'Keypoints_' + key: value + for key, value in iter(keypoint_metrics.items()) + } + return keypoint_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + def update_op( + image_id_batched, + groundtruth_boxes_batched, + groundtruth_classes_batched, + groundtruth_is_crowd_batched, + groundtruth_area_batched, + groundtruth_keypoints_batched, + groundtruth_keypoint_visibilities_batched, + num_gt_boxes_per_image, + detection_boxes_batched, + detection_scores_batched, + detection_classes_batched, + detection_keypoints_batched, + num_det_boxes_per_image, + is_annotated_batched): + """Update operation for adding batch of images to Coco evaluator.""" + + for (image_id, gt_box, gt_class, gt_is_crowd, gt_area, gt_keyp, + gt_keyp_vis, num_gt_box, det_box, det_score, det_class, det_keyp, + num_det_box, is_annotated) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_is_crowd_batched, + groundtruth_area_batched, groundtruth_keypoints_batched, + groundtruth_keypoint_visibilities_batched, + num_gt_boxes_per_image, detection_boxes_batched, + detection_scores_batched, detection_classes_batched, + detection_keypoints_batched, num_det_boxes_per_image, + is_annotated_batched): + if is_annotated: + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_boxes': gt_box[:num_gt_box], + 'groundtruth_classes': gt_class[:num_gt_box], + 'groundtruth_is_crowd': gt_is_crowd[:num_gt_box], + 'groundtruth_area': gt_area[:num_gt_box], + 'groundtruth_keypoints': gt_keyp[:num_gt_box], + 'groundtruth_keypoint_visibilities': gt_keyp_vis[:num_gt_box] + }) + self.add_single_detected_image_info( + image_id, { + 'detection_boxes': det_box[:num_det_box], + 'detection_scores': det_score[:num_det_box], + 'detection_classes': det_class[:num_det_box], + 'detection_keypoints': det_keyp[:num_det_box], + }) + + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd, + None) + groundtruth_area = eval_dict.get(input_data_fields.groundtruth_area, None) + groundtruth_keypoints = eval_dict[input_data_fields.groundtruth_keypoints] + groundtruth_keypoint_visibilities = eval_dict.get( + input_data_fields.groundtruth_keypoint_visibilities, None) + detection_boxes = eval_dict[detection_fields.detection_boxes] + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + detection_keypoints = eval_dict[detection_fields.detection_keypoints] + num_gt_boxes_per_image = eval_dict.get( + 'num_groundtruth_boxes_per_image', None) + num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None) + is_annotated = eval_dict.get('is_annotated', None) + + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + if groundtruth_area is None: + groundtruth_area = tf.zeros_like(groundtruth_classes, dtype=tf.float32) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + groundtruth_area = tf.expand_dims(groundtruth_area, 0) + groundtruth_keypoints = tf.expand_dims(groundtruth_keypoints, 0) + detection_boxes = tf.expand_dims(detection_boxes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_keypoints = tf.expand_dims(detection_keypoints, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_boxes)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + + if is_annotated is None: + is_annotated = tf.constant([True]) + else: + is_annotated = tf.expand_dims(is_annotated, 0) + + if groundtruth_keypoint_visibilities is None: + groundtruth_keypoint_visibilities = tf.fill([ + tf.shape(groundtruth_boxes)[1], + tf.shape(groundtruth_keypoints)[2] + ], tf.constant(2, dtype=tf.int32)) + groundtruth_keypoint_visibilities = tf.expand_dims( + groundtruth_keypoint_visibilities, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_boxes)[1:2], + multiples=tf.shape(detection_boxes)[0:1]) + if is_annotated is None: + is_annotated = tf.ones_like(image_id, dtype=tf.bool) + if groundtruth_keypoint_visibilities is None: + groundtruth_keypoint_visibilities = tf.fill([ + tf.shape(groundtruth_keypoints)[1], + tf.shape(groundtruth_keypoints)[2] + ], tf.constant(2, dtype=tf.int32)) + groundtruth_keypoint_visibilities = tf.tile( + tf.expand_dims(groundtruth_keypoint_visibilities, 0), + multiples=[tf.shape(groundtruth_keypoints)[0], 1, 1]) + + return tf.py_func(update_op, [ + image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd, + groundtruth_area, groundtruth_keypoints, + groundtruth_keypoint_visibilities, num_gt_boxes_per_image, + detection_boxes, detection_scores, detection_classes, + detection_keypoints, num_det_boxes_per_image, is_annotated + ], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + This function can take in groundtruth and detections for a batch of images, + or for a single image. For the latter case, the batch dimension for input + tensors need not be present. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + category = self._category_name + metric_names = [ + 'Keypoints_Precision/mAP ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP@.50IOU ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP@.75IOU ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP (large) ByCategory/{}'.format(category), + 'Keypoints_Precision/mAP (medium) ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@1 ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@10 ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@100 ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@100 (large) ByCategory/{}'.format(category), + 'Keypoints_Recall/AR@100 (medium) ByCategory/{}'.format(category) + ] + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + +class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator): + """Class to evaluate COCO detection metrics.""" + + def __init__(self, categories, + include_metrics_per_category=False, + super_categories=None): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + include_metrics_per_category: If True, include metrics for each category. + super_categories: None or a python dict mapping super-category names + (strings) to lists of categories (corresponding to category names + in the label_map). Metrics are aggregated along these super-categories + and added to the `per_category_ap` and are associated with the name + `PerformanceBySuperCategory/`. + """ + super(CocoMaskEvaluator, self).__init__(categories) + self._image_id_to_mask_shape_map = {} + self._image_ids_with_detections = set([]) + self._groundtruth_list = [] + self._detection_masks_list = [] + self._category_id_set = set([cat['id'] for cat in self._categories]) + self._annotation_id = 1 + self._include_metrics_per_category = include_metrics_per_category + self._super_categories = super_categories + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._image_id_to_mask_shape_map.clear() + self._image_ids_with_detections.clear() + self._groundtruth_list = [] + self._detection_masks_list = [] + + def add_single_ground_truth_image_info(self, + image_id, + groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape + [num_boxes, image_height, image_width] containing groundtruth masks + corresponding to the boxes. The elements of the array must be in + {0, 1}. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + InputDataFields.groundtruth_area (optional): float numpy array of + shape [num_boxes] containing the area (in the original absolute + coordinates) of the annotated object. + """ + if image_id in self._image_id_to_mask_shape_map: + tf.logging.warning('Ignoring ground truth with image id %s since it was ' + 'previously added', image_id) + return + + # Drop optional fields if empty tensor. + groundtruth_is_crowd = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_is_crowd) + groundtruth_area = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_area) + if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]: + groundtruth_is_crowd = None + if groundtruth_area is not None and not groundtruth_area.shape[0]: + groundtruth_area = None + + groundtruth_instance_masks = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks] + groundtruth_instance_masks = convert_masks_to_binary( + groundtruth_instance_masks) + self._groundtruth_list.extend( + coco_tools. + ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self._annotation_id, + category_id_set=self._category_id_set, + groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields. + groundtruth_boxes], + groundtruth_classes=groundtruth_dict[standard_fields. + InputDataFields. + groundtruth_classes], + groundtruth_masks=groundtruth_instance_masks, + groundtruth_is_crowd=groundtruth_is_crowd, + groundtruth_area=groundtruth_area)) + self._annotation_id += groundtruth_dict[standard_fields.InputDataFields. + groundtruth_boxes].shape[0] + self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks].shape + + def add_single_detected_image_info(self, + image_id, + detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_masks: optional uint8 numpy array of + shape [num_boxes, image_height, image_width] containing instance + masks corresponding to the boxes. The elements of the array must be + in {0, 1}. + + Raises: + ValueError: If groundtruth for the image_id is not available or if + spatial shapes of groundtruth_instance_masks and detection_masks are + incompatible. + """ + if image_id not in self._image_id_to_mask_shape_map: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + if image_id in self._image_ids_with_detections: + tf.logging.warning('Ignoring detection with image id %s since it was ' + 'previously added', image_id) + return + + groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id] + detection_masks = detections_dict[standard_fields.DetectionResultFields. + detection_masks] + if groundtruth_masks_shape[1:] != detection_masks.shape[1:]: + raise ValueError('Spatial shape of groundtruth masks and detection masks ' + 'are incompatible: {} vs {}'.format( + groundtruth_masks_shape, + detection_masks.shape)) + detection_masks = convert_masks_to_binary(detection_masks) + self._detection_masks_list.extend( + coco_tools.ExportSingleImageDetectionMasksToCoco( + image_id=image_id, + category_id_set=self._category_id_set, + detection_masks=detection_masks, + detection_scores=detections_dict[standard_fields. + DetectionResultFields. + detection_scores], + detection_classes=detections_dict[standard_fields. + DetectionResultFields. + detection_classes])) + self._image_ids_with_detections.update([image_id]) + + def dump_detections_to_json_file(self, json_output_path): + """Saves the detections into json_output_path in the format used by MS COCO. + + Args: + json_output_path: String containing the output file's path. It can be also + None. In that case nothing will be written to the output file. + """ + if json_output_path and json_output_path is not None: + tf.logging.info('Dumping detections to output json file.') + with tf.gfile.GFile(json_output_path, 'w') as fid: + json_utils.Dump( + obj=self._detection_masks_list, fid=fid, float_digits=4, indent=2) + + def evaluate(self): + """Evaluates the detection masks and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metrics: + 'DetectionMasks_Precision/mAP': mean average precision over classes + averaged over IOU thresholds ranging from .5 to .95 with .05 increments. + 'DetectionMasks_Precision/mAP@.50IOU': mean average precision at 50% IOU. + 'DetectionMasks_Precision/mAP@.75IOU': mean average precision at 75% IOU. + 'DetectionMasks_Precision/mAP (small)': mean average precision for small + objects (area < 32^2 pixels). + 'DetectionMasks_Precision/mAP (medium)': mean average precision for medium + sized objects (32^2 pixels < area < 96^2 pixels). + 'DetectionMasks_Precision/mAP (large)': mean average precision for large + objects (96^2 pixels < area < 10000^2 pixels). + 'DetectionMasks_Recall/AR@1': average recall with 1 detection. + 'DetectionMasks_Recall/AR@10': average recall with 10 detections. + 'DetectionMasks_Recall/AR@100': average recall with 100 detections. + 'DetectionMasks_Recall/AR@100 (small)': average recall for small objects + with 100 detections. + 'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects + with 100 detections. + 'DetectionMasks_Recall/AR@100 (large)': average recall for large objects + with 100 detections. + + 2. per_category_ap: if include_metrics_per_category is True, category + specific results with keys of the form: + 'Precision mAP ByCategory/category' (without the supercategory part if + no supercategories exist). For backward compatibility + 'PerformanceByCategory' is included in the output regardless of + all_metrics_per_category. + If super_categories are provided, then this will additionally include + metrics aggregated along the super_categories with keys of the form: + `PerformanceBySuperCategory/` + """ + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]} + for image_id, shape in self._image_id_to_mask_shape_map. + items()], + 'categories': self._categories + } + coco_wrapped_groundtruth = coco_tools.COCOWrapper( + groundtruth_dict, detection_type='segmentation') + coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations( + self._detection_masks_list) + mask_evaluator = coco_tools.COCOEvalWrapper( + coco_wrapped_groundtruth, coco_wrapped_detection_masks, + agnostic_mode=False, iou_type='segm') + mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics( + include_metrics_per_category=self._include_metrics_per_category, + super_categories=self._super_categories) + mask_metrics.update(mask_per_category_ap) + mask_metrics = {'DetectionMasks_'+ key: value + for key, value in mask_metrics.items()} + return mask_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + def update_op(image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, + groundtruth_instance_masks_batched, + groundtruth_is_crowd_batched, num_gt_boxes_per_image, + detection_scores_batched, detection_classes_batched, + detection_masks_batched, num_det_boxes_per_image, + original_image_spatial_shape): + """Update op for metrics.""" + + for (image_id, groundtruth_boxes, groundtruth_classes, + groundtruth_instance_masks, groundtruth_is_crowd, num_gt_box, + detection_scores, detection_classes, + detection_masks, num_det_box, original_image_shape) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_instance_masks_batched, + groundtruth_is_crowd_batched, num_gt_boxes_per_image, + detection_scores_batched, detection_classes_batched, + detection_masks_batched, num_det_boxes_per_image, + original_image_spatial_shape): + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_boxes': + groundtruth_boxes[:num_gt_box], + 'groundtruth_classes': + groundtruth_classes[:num_gt_box], + 'groundtruth_instance_masks': + groundtruth_instance_masks[:num_gt_box][ + :original_image_shape[0], :original_image_shape[1]], + 'groundtruth_is_crowd': + groundtruth_is_crowd[:num_gt_box] + }) + self.add_single_detected_image_info( + image_id, { + 'detection_scores': detection_scores[:num_det_box], + 'detection_classes': detection_classes[:num_det_box], + 'detection_masks': detection_masks[:num_det_box][ + :original_image_shape[0], :original_image_shape[1]] + }) + + # Unpack items from the evaluation dictionary. + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + original_image_spatial_shape = eval_dict[ + input_data_fields.original_image_spatial_shape] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_instance_masks = eval_dict[ + input_data_fields.groundtruth_instance_masks] + groundtruth_is_crowd = eval_dict.get( + input_data_fields.groundtruth_is_crowd, None) + num_gt_boxes_per_image = eval_dict.get( + input_data_fields.num_groundtruth_boxes, None) + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + detection_masks = eval_dict[detection_fields.detection_masks] + num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections, + None) + + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_masks = tf.expand_dims(detection_masks, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_scores)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_scores)[1:2], + multiples=tf.shape(detection_scores)[0:1]) + + return tf.py_func(update_op, [ + image_id, groundtruth_boxes, groundtruth_classes, + groundtruth_instance_masks, groundtruth_is_crowd, + num_gt_boxes_per_image, detection_scores, detection_classes, + detection_masks, num_det_boxes_per_image, original_image_spatial_shape + ], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + metric_names = ['DetectionMasks_Precision/mAP', + 'DetectionMasks_Precision/mAP@.50IOU', + 'DetectionMasks_Precision/mAP@.75IOU', + 'DetectionMasks_Precision/mAP (small)', + 'DetectionMasks_Precision/mAP (medium)', + 'DetectionMasks_Precision/mAP (large)', + 'DetectionMasks_Recall/AR@1', + 'DetectionMasks_Recall/AR@10', + 'DetectionMasks_Recall/AR@100', + 'DetectionMasks_Recall/AR@100 (small)', + 'DetectionMasks_Recall/AR@100 (medium)', + 'DetectionMasks_Recall/AR@100 (large)'] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' + + category_dict['name']) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + +class CocoPanopticSegmentationEvaluator( + object_detection_evaluation.DetectionEvaluator): + """Class to evaluate PQ (panoptic quality) metric on COCO dataset. + + More details about this metric: https://arxiv.org/pdf/1801.00868.pdf. + """ + + def __init__(self, + categories, + include_metrics_per_category=False, + iou_threshold=0.5, + ioa_threshold=0.5): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + include_metrics_per_category: If True, include metrics for each category. + iou_threshold: intersection-over-union threshold for mask matching (with + normal groundtruths). + ioa_threshold: intersection-over-area threshold for mask matching with + "is_crowd" groundtruths. + """ + super(CocoPanopticSegmentationEvaluator, self).__init__(categories) + self._groundtruth_masks = {} + self._groundtruth_class_labels = {} + self._groundtruth_is_crowd = {} + self._predicted_masks = {} + self._predicted_class_labels = {} + self._include_metrics_per_category = include_metrics_per_category + self._iou_threshold = iou_threshold + self._ioa_threshold = ioa_threshold + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._groundtruth_masks.clear() + self._groundtruth_class_labels.clear() + self._groundtruth_is_crowd.clear() + self._predicted_masks.clear() + self._predicted_class_labels.clear() + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_masks] containing 1-indexed groundtruth classes for the mask. + InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape + [num_masks, image_height, image_width] containing groundtruth masks. + The elements of the array must be in {0, 1}. + InputDataFields.groundtruth_is_crowd (optional): integer numpy array of + shape [num_boxes] containing iscrowd flag for groundtruth boxes. + """ + + if image_id in self._groundtruth_masks: + tf.logging.warning( + 'Ignoring groundtruth with image %s, since it has already been ' + 'added to the ground truth database.', image_id) + return + + self._groundtruth_masks[image_id] = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks] + self._groundtruth_class_labels[image_id] = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_classes] + groundtruth_is_crowd = groundtruth_dict.get( + standard_fields.InputDataFields.groundtruth_is_crowd) + # Drop groundtruth_is_crowd if empty tensor. + if groundtruth_is_crowd is not None and not groundtruth_is_crowd.size > 0: + groundtruth_is_crowd = None + if groundtruth_is_crowd is not None: + self._groundtruth_is_crowd[image_id] = groundtruth_is_crowd + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_classes: integer numpy array of shape + [num_masks] containing 1-indexed detection classes for the masks. + DetectionResultFields.detection_masks: optional uint8 numpy array of + shape [num_masks, image_height, image_width] containing instance + masks. The elements of the array must be in {0, 1}. + + Raises: + ValueError: If results and groundtruth shape don't match. + """ + + if image_id not in self._groundtruth_masks: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + detection_masks = detections_dict[ + standard_fields.DetectionResultFields.detection_masks] + self._predicted_masks[image_id] = detection_masks + self._predicted_class_labels[image_id] = detections_dict[ + standard_fields.DetectionResultFields.detection_classes] + groundtruth_mask_shape = self._groundtruth_masks[image_id].shape + if groundtruth_mask_shape[1:] != detection_masks.shape[1:]: + raise ValueError("The shape of results doesn't match groundtruth.") + + def evaluate(self): + """Evaluates the detection masks and returns a dictionary of coco metrics. + + Returns: + A dictionary holding - + + 1. summary_metric: + 'PanopticQuality@%.2fIOU': mean panoptic quality averaged over classes at + the required IOU. + 'SegmentationQuality@%.2fIOU': mean segmentation quality averaged over + classes at the required IOU. + 'RecognitionQuality@%.2fIOU': mean recognition quality averaged over + classes at the required IOU. + 'NumValidClasses': number of valid classes. A valid class should have at + least one normal (is_crowd=0) groundtruth mask or one predicted mask. + 'NumTotalClasses': number of total classes. + + 2. per_category_pq: if include_metrics_per_category is True, category + specific results with keys of the form: + 'PanopticQuality@%.2fIOU_ByCategory/category'. + """ + # Evaluate and accumulate the iou/tp/fp/fn. + sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn = self._evaluate_all_masks() + # Compute PQ metric for each category and average over all classes. + mask_metrics = self._compute_panoptic_metrics(sum_tp_iou, sum_num_tp, + sum_num_fp, sum_num_fn) + return mask_metrics + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_gt_masks_per_image' and 'num_det_masks_per_image' to properly unpad + the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + + def update_op(image_id_batched, groundtruth_classes_batched, + groundtruth_instance_masks_batched, + groundtruth_is_crowd_batched, num_gt_masks_per_image, + detection_classes_batched, detection_masks_batched, + num_det_masks_per_image): + """Update op for metrics.""" + for (image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_mask, detection_classes, + detection_masks, num_det_mask) in zip( + image_id_batched, groundtruth_classes_batched, + groundtruth_instance_masks_batched, groundtruth_is_crowd_batched, + num_gt_masks_per_image, detection_classes_batched, + detection_masks_batched, num_det_masks_per_image): + + self.add_single_ground_truth_image_info( + image_id, { + 'groundtruth_classes': + groundtruth_classes[:num_gt_mask], + 'groundtruth_instance_masks': + groundtruth_instance_masks[:num_gt_mask], + 'groundtruth_is_crowd': + groundtruth_is_crowd[:num_gt_mask] + }) + self.add_single_detected_image_info( + image_id, { + 'detection_classes': detection_classes[:num_det_mask], + 'detection_masks': detection_masks[:num_det_mask] + }) + + # Unpack items from the evaluation dictionary. + (image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_masks_per_image, detection_classes, + detection_masks, num_det_masks_per_image + ) = self._unpack_evaluation_dictionary_items(eval_dict) + + update_op = tf.py_func(update_op, [ + image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_masks_per_image, detection_classes, + detection_masks, num_det_masks_per_image + ], []) + + metric_names = [ + 'PanopticQuality@%.2fIOU' % self._iou_threshold, + 'SegmentationQuality@%.2fIOU' % self._iou_threshold, + 'RecognitionQuality@%.2fIOU' % self._iou_threshold + ] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('PanopticQuality@%.2fIOU_ByCategory/%s' % + (self._iou_threshold, category_dict['name'])) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + + def value_func(): + return np.float32(self._metrics[metric_name]) + + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + def _evaluate_all_masks(self): + """Evaluate all masks and compute sum iou/TP/FP/FN.""" + + sum_num_tp = {category['id']: 0 for category in self._categories} + sum_num_fp = sum_num_tp.copy() + sum_num_fn = sum_num_tp.copy() + sum_tp_iou = sum_num_tp.copy() + + for image_id in self._groundtruth_class_labels: + # Separate normal and is_crowd groundtruth + crowd_gt_indices = self._groundtruth_is_crowd.get(image_id) + (normal_gt_masks, normal_gt_classes, crowd_gt_masks, + crowd_gt_classes) = self._separate_normal_and_crowd_labels( + crowd_gt_indices, self._groundtruth_masks[image_id], + self._groundtruth_class_labels[image_id]) + + # Mask matching to normal GT. + predicted_masks = self._predicted_masks[image_id] + predicted_class_labels = self._predicted_class_labels[image_id] + (overlaps, pred_matched, + gt_matched) = self._match_predictions_to_groundtruths( + predicted_masks, + predicted_class_labels, + normal_gt_masks, + normal_gt_classes, + self._iou_threshold, + is_crowd=False, + with_replacement=False) + + # Accumulate true positives. + for (class_id, is_matched, overlap) in zip(predicted_class_labels, + pred_matched, overlaps): + if is_matched: + sum_num_tp[class_id] += 1 + sum_tp_iou[class_id] += overlap + + # Accumulate false negatives. + for (class_id, is_matched) in zip(normal_gt_classes, gt_matched): + if not is_matched: + sum_num_fn[class_id] += 1 + + # Match remaining predictions to crowd gt. + remained_pred_indices = np.logical_not(pred_matched) + remained_pred_masks = predicted_masks[remained_pred_indices, :, :] + remained_pred_classes = predicted_class_labels[remained_pred_indices] + _, pred_matched, _ = self._match_predictions_to_groundtruths( + remained_pred_masks, + remained_pred_classes, + crowd_gt_masks, + crowd_gt_classes, + self._ioa_threshold, + is_crowd=True, + with_replacement=True) + + # Accumulate false positives + for (class_id, is_matched) in zip(remained_pred_classes, pred_matched): + if not is_matched: + sum_num_fp[class_id] += 1 + return sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn + + def _compute_panoptic_metrics(self, sum_tp_iou, sum_num_tp, sum_num_fp, + sum_num_fn): + """Compute PQ metric for each category and average over all classes. + + Args: + sum_tp_iou: dict, summed true positive intersection-over-union (IoU) for + each class, keyed by class_id. + sum_num_tp: the total number of true positives for each class, keyed by + class_id. + sum_num_fp: the total number of false positives for each class, keyed by + class_id. + sum_num_fn: the total number of false negatives for each class, keyed by + class_id. + + Returns: + mask_metrics: a dictionary containing averaged metrics over all classes, + and per-category metrics if required. + """ + mask_metrics = {} + sum_pq = 0 + sum_sq = 0 + sum_rq = 0 + num_valid_classes = 0 + for category in self._categories: + class_id = category['id'] + (panoptic_quality, segmentation_quality, + recognition_quality) = self._compute_panoptic_metrics_single_class( + sum_tp_iou[class_id], sum_num_tp[class_id], sum_num_fp[class_id], + sum_num_fn[class_id]) + if panoptic_quality is not None: + sum_pq += panoptic_quality + sum_sq += segmentation_quality + sum_rq += recognition_quality + num_valid_classes += 1 + if self._include_metrics_per_category: + mask_metrics['PanopticQuality@%.2fIOU_ByCategory/%s' % + (self._iou_threshold, + category['name'])] = panoptic_quality + mask_metrics['PanopticQuality@%.2fIOU' % + self._iou_threshold] = sum_pq / num_valid_classes + mask_metrics['SegmentationQuality@%.2fIOU' % + self._iou_threshold] = sum_sq / num_valid_classes + mask_metrics['RecognitionQuality@%.2fIOU' % + self._iou_threshold] = sum_rq / num_valid_classes + mask_metrics['NumValidClasses'] = num_valid_classes + mask_metrics['NumTotalClasses'] = len(self._categories) + return mask_metrics + + def _compute_panoptic_metrics_single_class(self, sum_tp_iou, num_tp, num_fp, + num_fn): + """Compute panoptic metrics: panoptic/segmentation/recognition quality. + + More computation details in https://arxiv.org/pdf/1801.00868.pdf. + Args: + sum_tp_iou: summed true positive intersection-over-union (IoU) for a + specific class. + num_tp: the total number of true positives for a specific class. + num_fp: the total number of false positives for a specific class. + num_fn: the total number of false negatives for a specific class. + + Returns: + panoptic_quality: sum_tp_iou / (num_tp + 0.5*num_fp + 0.5*num_fn). + segmentation_quality: sum_tp_iou / num_tp. + recognition_quality: num_tp / (num_tp + 0.5*num_fp + 0.5*num_fn). + """ + denominator = num_tp + 0.5 * num_fp + 0.5 * num_fn + # Calculate metric only if there is at least one GT or one prediction. + if denominator > 0: + recognition_quality = num_tp / denominator + if num_tp > 0: + segmentation_quality = sum_tp_iou / num_tp + else: + # If there is no TP for this category. + segmentation_quality = 0 + panoptic_quality = segmentation_quality * recognition_quality + return panoptic_quality, segmentation_quality, recognition_quality + else: + return None, None, None + + def _separate_normal_and_crowd_labels(self, crowd_gt_indices, + groundtruth_masks, groundtruth_classes): + """Separate normal and crowd groundtruth class_labels and masks. + + Args: + crowd_gt_indices: None or array of shape [num_groundtruths]. If None, all + groundtruths are treated as normal ones. + groundtruth_masks: array of shape [num_groundtruths, height, width]. + groundtruth_classes: array of shape [num_groundtruths]. + + Returns: + normal_gt_masks: array of shape [num_normal_groundtruths, height, width]. + normal_gt_classes: array of shape [num_normal_groundtruths]. + crowd_gt_masks: array of shape [num_crowd_groundtruths, height, width]. + crowd_gt_classes: array of shape [num_crowd_groundtruths]. + Raises: + ValueError: if the shape of groundtruth classes doesn't match groundtruth + masks or if the shape of crowd_gt_indices. + """ + if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]: + raise ValueError( + "The number of masks doesn't match the number of labels.") + if crowd_gt_indices is None: + # All gts are treated as normal + crowd_gt_indices = np.zeros(groundtruth_masks.shape, dtype=np.bool) + else: + if groundtruth_masks.shape[0] != crowd_gt_indices.shape[0]: + raise ValueError( + "The number of masks doesn't match the number of is_crowd labels.") + crowd_gt_indices = crowd_gt_indices.astype(np.bool) + normal_gt_indices = np.logical_not(crowd_gt_indices) + if normal_gt_indices.size: + normal_gt_masks = groundtruth_masks[normal_gt_indices, :, :] + normal_gt_classes = groundtruth_classes[normal_gt_indices] + crowd_gt_masks = groundtruth_masks[crowd_gt_indices, :, :] + crowd_gt_classes = groundtruth_classes[crowd_gt_indices] + else: + # No groundtruths available, groundtruth_masks.shape = (0, h, w) + normal_gt_masks = groundtruth_masks + normal_gt_classes = groundtruth_classes + crowd_gt_masks = groundtruth_masks + crowd_gt_classes = groundtruth_classes + return normal_gt_masks, normal_gt_classes, crowd_gt_masks, crowd_gt_classes + + def _match_predictions_to_groundtruths(self, + predicted_masks, + predicted_classes, + groundtruth_masks, + groundtruth_classes, + matching_threshold, + is_crowd=False, + with_replacement=False): + """Match the predicted masks to groundtruths. + + Args: + predicted_masks: array of shape [num_predictions, height, width]. + predicted_classes: array of shape [num_predictions]. + groundtruth_masks: array of shape [num_groundtruths, height, width]. + groundtruth_classes: array of shape [num_groundtruths]. + matching_threshold: if the overlap between a prediction and a groundtruth + is larger than this threshold, the prediction is true positive. + is_crowd: whether the groundtruths are crowd annotation or not. If True, + use intersection over area (IoA) as the overlapping metric; otherwise + use intersection over union (IoU). + with_replacement: whether a groundtruth can be matched to multiple + predictions. By default, for normal groundtruths, only 1-1 matching is + allowed for normal groundtruths; for crowd groundtruths, 1-to-many must + be allowed. + + Returns: + best_overlaps: array of shape [num_predictions]. Values representing the + IoU + or IoA with best matched groundtruth. + pred_matched: array of shape [num_predictions]. Boolean value representing + whether the ith prediction is matched to a groundtruth. + gt_matched: array of shape [num_groundtruth]. Boolean value representing + whether the ith groundtruth is matched to a prediction. + Raises: + ValueError: if the shape of groundtruth/predicted masks doesn't match + groundtruth/predicted classes. + """ + if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]: + raise ValueError( + "The number of GT masks doesn't match the number of labels.") + if predicted_masks.shape[0] != predicted_classes.shape[0]: + raise ValueError( + "The number of predicted masks doesn't match the number of labels.") + gt_matched = np.zeros(groundtruth_classes.shape, dtype=np.bool) + pred_matched = np.zeros(predicted_classes.shape, dtype=np.bool) + best_overlaps = np.zeros(predicted_classes.shape) + for pid in range(predicted_classes.shape[0]): + best_overlap = 0 + matched_gt_id = -1 + for gid in range(groundtruth_classes.shape[0]): + if predicted_classes[pid] == groundtruth_classes[gid]: + if (not with_replacement) and gt_matched[gid]: + continue + if not is_crowd: + overlap = np_mask_ops.iou(predicted_masks[pid:pid + 1], + groundtruth_masks[gid:gid + 1])[0, 0] + else: + overlap = np_mask_ops.ioa(groundtruth_masks[gid:gid + 1], + predicted_masks[pid:pid + 1])[0, 0] + if overlap >= matching_threshold and overlap > best_overlap: + matched_gt_id = gid + best_overlap = overlap + if matched_gt_id >= 0: + gt_matched[matched_gt_id] = True + pred_matched[pid] = True + best_overlaps[pid] = best_overlap + return best_overlaps, pred_matched, gt_matched + + def _unpack_evaluation_dictionary_items(self, eval_dict): + """Unpack items from the evaluation dictionary.""" + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_instance_masks = eval_dict[ + input_data_fields.groundtruth_instance_masks] + groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd, + None) + num_gt_masks_per_image = eval_dict.get( + input_data_fields.num_groundtruth_boxes, None) + detection_classes = eval_dict[detection_fields.detection_classes] + detection_masks = eval_dict[detection_fields.detection_masks] + num_det_masks_per_image = eval_dict.get(detection_fields.num_detections, + None) + if groundtruth_is_crowd is None: + groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0) + groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_masks = tf.expand_dims(detection_masks, 0) + + if num_gt_masks_per_image is None: + num_gt_masks_per_image = tf.shape(groundtruth_classes)[1:2] + else: + num_gt_masks_per_image = tf.expand_dims(num_gt_masks_per_image, 0) + + if num_det_masks_per_image is None: + num_det_masks_per_image = tf.shape(detection_classes)[1:2] + else: + num_det_masks_per_image = tf.expand_dims(num_det_masks_per_image, 0) + else: + if num_gt_masks_per_image is None: + num_gt_masks_per_image = tf.tile( + tf.shape(groundtruth_classes)[1:2], + multiples=tf.shape(groundtruth_classes)[0:1]) + if num_det_masks_per_image is None: + num_det_masks_per_image = tf.tile( + tf.shape(detection_classes)[1:2], + multiples=tf.shape(detection_classes)[0:1]) + return (image_id, groundtruth_classes, groundtruth_instance_masks, + groundtruth_is_crowd, num_gt_masks_per_image, detection_classes, + detection_masks, num_det_masks_per_image) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_evaluation_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_evaluation_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4d6dc2c1b562db294dead2798eb2f7de23963a7e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_evaluation_test.py @@ -0,0 +1,2106 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields +from object_detection.metrics import coco_evaluation +from object_detection.utils import tf_version + + +def _get_categories_list(): + return [{ + 'id': 1, + 'name': 'person' + }, { + 'id': 2, + 'name': 'dog' + }, { + 'id': 3, + 'name': 'cat' + }] + + +def _get_category_keypoints_dict(): + return { + 'person': [{ + 'id': 0, + 'name': 'left_eye' + }, { + 'id': 3, + 'name': 'right_eye' + }], + 'dog': [{ + 'id': 1, + 'name': 'tail_start' + }, { + 'id': 2, + 'name': 'mouth' + }] + } + + +class CocoDetectionEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + """Tests that mAP is calculated correctly on GT and Detections.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image3', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): + """Tests computing mAP with is_crowd GT boxes skipped.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1, 2]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([0, 1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self): + """Tests computing mAP with empty is_crowd array passed in.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + + def testRejectionOnDuplicateGroundtruth(self): + """Tests that groundtruth cannot be added more than once for an image.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + # Add groundtruth + image_key1 = 'img1' + groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], + dtype=float) + groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int) + coco_evaluator.add_single_ground_truth_image_info(image_key1, { + standard_fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes1, + standard_fields.InputDataFields.groundtruth_classes: + groundtruth_class_labels1 + }) + groundtruth_lists_len = len(coco_evaluator._groundtruth_list) + + # Add groundtruth with the same image id. + coco_evaluator.add_single_ground_truth_image_info(image_key1, { + standard_fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes1, + standard_fields.InputDataFields.groundtruth_classes: + groundtruth_class_labels1 + }) + self.assertEqual(groundtruth_lists_len, + len(coco_evaluator._groundtruth_list)) + + def testRejectionOnDuplicateDetections(self): + """Tests that detections cannot be added more than once for an image.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + # Add groundtruth + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[99., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + detections_lists_len = len(coco_evaluator._detection_boxes_list) + coco_evaluator.add_single_detected_image_info( + image_id='image1', # Note that this image id was previously added. + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + self.assertEqual(detections_lists_len, + len(coco_evaluator._detection_boxes_list)) + + def testExceptionRaisedWithMissingGroundtruth(self): + """Tests that exception is raised for detection with missing groundtruth.""" + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + with self.assertRaises(ValueError): + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoEvaluationPyFuncTest(tf.test.TestCase): + + def _MatchingGroundtruthAndDetections(self, coco_evaluator): + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run(update_op, + feed_dict={ + image_id: 'image1', + groundtruth_boxes: np.array([[100., 100., 200., 200.]]), + groundtruth_classes: np.array([1]), + detection_boxes: np.array([[100., 100., 200., 200.]]), + detection_scores: np.array([.8]), + detection_classes: np.array([1]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([3]), + detection_boxes: np.array([[50., 50., 100., 100.]]), + detection_scores: np.array([.7]), + detection_classes: np.array([3]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([2]), + detection_boxes: np.array([[25., 25., 50., 50.]]), + detection_scores: np.array([.9]), + detection_classes: np.array([2]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + self._MatchingGroundtruthAndDetections(coco_evaluator) + + # Configured to skip unmatched detector predictions with + # groundtruth_labeled_classes, but reverts to fully-labeled eval since there + # are no groundtruth_labeled_classes set. + def testGetMAPWithSkipUnmatchedPredictionsIgnoreGrountruthLabeledClasses( + self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list(), skip_predictions_for_unlabeled_class=True) + self._MatchingGroundtruthAndDetections(coco_evaluator) + + # Test skipping unmatched detector predictions with + # groundtruth_labeled_classes. + def testGetMAPWithSkipUnmatchedPredictions(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list(), skip_predictions_for_unlabeled_class=True) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_labeled_classes = tf.placeholder(tf.float32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: + image_id, + input_data_fields.groundtruth_boxes: + groundtruth_boxes, + input_data_fields.groundtruth_classes: + groundtruth_classes, + input_data_fields.groundtruth_labeled_classes: + groundtruth_labeled_classes, + detection_fields.detection_boxes: + detection_boxes, + detection_fields.detection_scores: + detection_scores, + detection_fields.detection_classes: + detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + # Only class 1 is exhaustively labeled for image1. + groundtruth_labeled_classes: + np.array([1]), + detection_boxes: + np.array([[100., 100., 200., 200.], [100., 100., 200., + 200.]]), + detection_scores: + np.array([.8, .95]), + detection_classes: + np.array([1, 2]) + }) + sess.run( + update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([3]), + groundtruth_labeled_classes: np.array([3]), + detection_boxes: np.array([[50., 50., 100., 100.]]), + detection_scores: np.array([.7]), + detection_classes: np.array([3]) + }) + sess.run( + update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([2]), + groundtruth_labeled_classes: np.array([2]), + detection_boxes: np.array([[25., 25., 50., 50.]]), + detection_scores: np.array([.9]), + detection_classes: np.array([2]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsIsAnnotated(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + is_annotated = tf.placeholder(tf.bool, shape=()) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + 'is_annotated': is_annotated, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run(update_op, + feed_dict={ + image_id: 'image1', + groundtruth_boxes: np.array([[100., 100., 200., 200.]]), + groundtruth_classes: np.array([1]), + is_annotated: True, + detection_boxes: np.array([[100., 100., 200., 200.]]), + detection_scores: np.array([.8]), + detection_classes: np.array([1]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([3]), + is_annotated: True, + detection_boxes: np.array([[50., 50., 100., 100.]]), + detection_scores: np.array([.7]), + detection_classes: np.array([3]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([2]), + is_annotated: True, + detection_boxes: np.array([[25., 25., 50., 50.]]), + detection_scores: np.array([.9]), + detection_classes: np.array([2]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image4', + groundtruth_boxes: np.zeros((0, 4)), + groundtruth_classes: np.zeros((0)), + is_annotated: False, # Note that this image isn't annotated. + detection_boxes: np.array([[25., 25., 50., 50.], + [25., 25., 70., 50.], + [25., 25., 80., 50.], + [25., 25., 90., 50.]]), + detection_scores: np.array([0.6, 0.7, 0.8, 0.9]), + detection_classes: np.array([1, 2, 2, 3]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsPadded(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.], [-1, -1, -1, -1]]), + groundtruth_classes: + np.array([1, -1]), + detection_boxes: + np.array([[100., 100., 200., 200.], [0., 0., 0., 0.]]), + detection_scores: + np.array([.8, 0.]), + detection_classes: + np.array([1, -1]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image2', + groundtruth_boxes: + np.array([[50., 50., 100., 100.], [-1, -1, -1, -1]]), + groundtruth_classes: + np.array([3, -1]), + detection_boxes: + np.array([[50., 50., 100., 100.], [0., 0., 0., 0.]]), + detection_scores: + np.array([.7, 0.]), + detection_classes: + np.array([3, -1]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image3', + groundtruth_boxes: + np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]), + groundtruth_classes: + np.array([2, 2]), + detection_boxes: + np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]), + detection_scores: + np.array([.95, .9]), + detection_classes: + np.array([2, 2]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + batch_size = 3 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run(update_op, + feed_dict={ + image_id: ['image1', 'image2', 'image3'], + groundtruth_boxes: np.array([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]]), + groundtruth_classes: np.array([[1], [3], [2]]), + detection_boxes: np.array([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]]), + detection_scores: np.array([[.8], [.7], [.9]]), + detection_classes: np.array([[1], [3], [2]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self): + coco_evaluator = coco_evaluation.CocoDetectionEvaluator( + _get_categories_list()) + batch_size = 3 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + num_gt_boxes_per_image = tf.placeholder(tf.int32, shape=(None)) + detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + num_det_boxes_per_image = tf.placeholder(tf.int32, shape=(None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + 'num_groundtruth_boxes_per_image': num_gt_boxes_per_image, + 'num_det_boxes_per_image': num_det_boxes_per_image + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image1', 'image2', 'image3'], + groundtruth_boxes: + np.array([[[100., 100., 200., 200.], [-1, -1, -1, -1]], + [[50., 50., 100., 100.], [-1, -1, -1, -1]], + [[25., 25., 50., 50.], [10., 10., 15., 15.]]]), + groundtruth_classes: + np.array([[1, -1], [3, -1], [2, 2]]), + num_gt_boxes_per_image: + np.array([1, 1, 2]), + detection_boxes: + np.array([[[100., 100., 200., 200.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], + [[50., 50., 100., 100.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], + [[25., 25., 50., 50.], + [10., 10., 15., 15.], + [10., 10., 15., 15.]]]), + detection_scores: + np.array([[.8, 0., 0.], [.7, 0., 0.], [.95, .9, 0.9]]), + detection_classes: + np.array([[1, -1, -1], [3, -1, -1], [2, 2, 2]]), + num_det_boxes_per_image: + np.array([1, 1, 3]), + }) + + # Check the number of bounding boxes added. + self.assertEqual(len(coco_evaluator._groundtruth_list), 4) + self.assertEqual(len(coco_evaluator._detection_boxes_list), 5) + + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_boxes_list) + self.assertFalse(coco_evaluator._image_ids) + + +class CocoKeypointEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingKeypoints(self): + """Tests that correct mAP for keypoints is calculated.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + + def testGroundtruthListValues(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]), + standard_fields.InputDataFields.groundtruth_area: np.array([15.]) + }) + gt_dict = coco_evaluator._groundtruth_list[0] + self.assertEqual(gt_dict['id'], 1) + self.assertAlmostEqual(gt_dict['bbox'], [100.0, 100.0, 100.0, 100.0]) + self.assertAlmostEqual( + gt_dict['keypoints'], [160.0, 150.0, 2, 180.0, 170.0, 2]) + self.assertEqual(gt_dict['num_keypoints'], 2) + self.assertAlmostEqual(gt_dict['area'], 15.0) + + def testKeypointVisibilitiesAreOptional(self): + """Tests that evaluator works when visibilities aren't provided.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + + def testFiltersDetectionsFromOtherCategories(self): + """Tests that the evaluator ignores detections from other categories.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=2, category_keypoints=category_keypoint_dict['person'], + class_text='dog') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [170., 180.], [110., 120.], + [130., 140.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 2, 2, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.9]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [170., 180.], [110., 120.], + [130., 140.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/dog'], + -1.0) + + def testHandlesUnlabeledKeypointData(self): + """Tests that the evaluator handles missing keypoints GT.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[0, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[50., 60.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + + def testIgnoresCrowdAnnotations(self): + """Tests that the evaluator ignores GT marked as crowd.""" + category_keypoint_dict = _get_category_keypoints_dict() + coco_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([1]), + standard_fields.InputDataFields.groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + standard_fields.InputDataFields.groundtruth_keypoint_visibilities: + np.array([[2, 0, 0, 2]]) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + -1.0) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoKeypointEvaluationPyFuncTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingKeypoints(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_keypoints: detection_keypoints, + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + detection_boxes: + np.array([[100., 100., 200., 200.]]), + detection_scores: + np.array([.8]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image2', + groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]), + detection_boxes: + np.array([[50., 50., 100., 100.]]), + detection_scores: + np.array([.7]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], 1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + def testGetOneMAPWithMatchingKeypointsAndVisibilities(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + groundtruth_keypoint_visibilities = tf.placeholder( + tf.float32, shape=(None, 4)) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: + image_id, + input_data_fields.groundtruth_boxes: + groundtruth_boxes, + input_data_fields.groundtruth_classes: + groundtruth_classes, + input_data_fields.groundtruth_keypoints: + groundtruth_keypoints, + input_data_fields.groundtruth_keypoint_visibilities: + groundtruth_keypoint_visibilities, + detection_fields.detection_boxes: + detection_boxes, + detection_fields.detection_scores: + detection_scores, + detection_fields.detection_classes: + detection_classes, + detection_fields.detection_keypoints: + detection_keypoints, + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + groundtruth_keypoint_visibilities: + np.array([[0, 0, 0, 2]]), + detection_boxes: + np.array([[100., 100., 200., 200.]]), + detection_scores: + np.array([.8]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[50., 60.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], -1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], -1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + def testGetOneMAPWithMatchingKeypointsIsAnnotated(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + is_annotated = tf.placeholder(tf.bool, shape=()) + detection_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_keypoints = tf.placeholder(tf.float32, shape=(None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + 'is_annotated': is_annotated, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_keypoints: detection_keypoints, + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]]), + is_annotated: + True, + detection_boxes: + np.array([[100., 100., 200., 200.]]), + detection_scores: + np.array([.8]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image2', + groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1]), + groundtruth_keypoints: + np.array([[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]), + is_annotated: + True, + detection_boxes: + np.array([[50., 50., 100., 100.]]), + detection_scores: + np.array([.7]), + detection_classes: + np.array([1]), + detection_keypoints: + np.array([[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]) + }) + sess.run( + update_op, + feed_dict={ + image_id: + 'image3', + groundtruth_boxes: + np.zeros((0, 4)), + groundtruth_classes: + np.zeros((0)), + groundtruth_keypoints: + np.zeros((0, 4, 2)), + is_annotated: + False, # Note that this image isn't annotated. + detection_boxes: + np.array([[25., 25., 50., 50.], [25., 25., 70., 50.], + [25., 25., 80., 50.], [25., 25., 90., 50.]]), + detection_scores: + np.array([0.6, 0.7, 0.8, 0.9]), + detection_classes: + np.array([1, 2, 2, 3]), + detection_keypoints: + np.array([[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], 1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + def testGetOneMAPWithMatchingKeypointsBatched(self): + category_keypoint_dict = _get_category_keypoints_dict() + coco_keypoint_evaluator = coco_evaluation.CocoKeypointEvaluator( + category_id=1, category_keypoints=category_keypoint_dict['person'], + class_text='person') + batch_size = 2 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + groundtruth_keypoints = tf.placeholder( + tf.float32, shape=(batch_size, None, 4, 2)) + detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_keypoints = tf.placeholder( + tf.float32, shape=(batch_size, None, 4, 2)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_keypoints: groundtruth_keypoints, + detection_fields.detection_boxes: detection_boxes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_keypoints: detection_keypoints + } + + eval_metric_ops = coco_keypoint_evaluator.get_estimator_eval_metric_ops( + eval_dict) + + _, update_op = eval_metric_ops['Keypoints_Precision/mAP ByCategory/person'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image1', 'image2'], + groundtruth_boxes: + np.array([[[100., 100., 200., 200.]], [[50., 50., 100., + 100.]]]), + groundtruth_classes: + np.array([[1], [3]]), + groundtruth_keypoints: + np.array([[[[150., 160.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [170., 180.]]], + [[[75., 76.], [float('nan'), + float('nan')], + [float('nan'), float('nan')], [77., 78.]]]]), + detection_boxes: + np.array([[[100., 100., 200., 200.]], [[50., 50., 100., + 100.]]]), + detection_scores: + np.array([[.8], [.7]]), + detection_classes: + np.array([[1], [3]]), + detection_keypoints: + np.array([[[[150., 160.], [1., 2.], [3., 4.], [170., 180.]]], + [[[75., 76.], [5., 6.], [7., 8.], [77., 78.]]]]) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['Keypoints_Precision/mAP ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.50IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP@.75IOU ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Precision/mAP (medium) ByCategory/person'], -1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@1 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@10 ByCategory/person'], + 1.0) + self.assertAlmostEqual(metrics['Keypoints_Recall/AR@100 ByCategory/person'], + 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (large) ByCategory/person'], 1.0) + self.assertAlmostEqual( + metrics['Keypoints_Recall/AR@100 (medium) ByCategory/person'], -1.0) + self.assertFalse(coco_keypoint_evaluator._groundtruth_list) + self.assertFalse(coco_keypoint_evaluator._detection_boxes_list) + self.assertFalse(coco_keypoint_evaluator._image_ids) + + +class CocoMaskEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.pad(np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[100., 100., 200., 200.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + np.pad(np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.pad(np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[50., 50., 100., 100.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + np.pad(np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image3', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.InputDataFields.groundtruth_classes: np.array([1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.pad(np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + coco_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict={ + standard_fields.DetectionResultFields.detection_boxes: + np.array([[25., 25., 50., 50.]]), + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + # The value of 5 is equivalent to 1, since masks will be + # thresholded and binarized before evaluation. + np.pad(5 * np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), mode='constant') + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + coco_evaluator.clear() + self.assertFalse(coco_evaluator._image_id_to_mask_shape_map) + self.assertFalse(coco_evaluator._image_ids_with_detections) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._detection_masks_list) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self): + """Tests computing mAP with is_crowd GT boxes skipped.""" + coco_evaluator = coco_evaluation.CocoMaskEvaluator( + _get_categories_list()) + coco_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]), + standard_fields.InputDataFields.groundtruth_classes: + np.array([1, 2]), + standard_fields.InputDataFields.groundtruth_is_crowd: + np.array([0, 1]), + standard_fields.InputDataFields.groundtruth_instance_masks: + np.concatenate( + [np.pad(np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (100, 56), (100, 56)), mode='constant'), + np.pad(np.ones([1, 101, 101], dtype=np.uint8), + ((0, 0), (99, 56), (99, 56)), mode='constant')], + axis=0) + }) + coco_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + standard_fields.DetectionResultFields.detection_scores: + np.array([.8]), + standard_fields.DetectionResultFields.detection_classes: + np.array([1]), + standard_fields.DetectionResultFields.detection_masks: + np.pad(np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (100, 56), (100, 56)), mode='constant') + }) + metrics = coco_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoMaskEvaluationPyFuncTest(tf.test.TestCase): + + def testAddEvalDict(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + original_image_spatial_shape = tf.placeholder(tf.int32, shape=(None, 2)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.original_image_spatial_shape: + original_image_spatial_shape, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + update_op = coco_evaluator.add_eval_dict(eval_dict) + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1, 2]), + groundtruth_masks: + np.stack([ + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant') + ]), + original_image_spatial_shape: np.array([[120, 120]]), + detection_scores: + np.array([.9, .8]), + detection_classes: + np.array([2, 1]), + detection_masks: + np.stack([ + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant'), + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + ]) + }) + self.assertLen(coco_evaluator._groundtruth_list, 2) + self.assertLen(coco_evaluator._detection_masks_list, 2) + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(None)) + groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + original_image_spatial_shape = tf.placeholder(tf.int32, shape=(None, 2)) + detection_scores = tf.placeholder(tf.float32, shape=(None)) + detection_classes = tf.placeholder(tf.float32, shape=(None)) + detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.original_image_spatial_shape: + original_image_spatial_shape, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: + 'image1', + groundtruth_boxes: + np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]), + groundtruth_classes: + np.array([1, 2]), + groundtruth_masks: + np.stack([ + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant') + ]), + original_image_spatial_shape: np.array([[120, 120], [120, 120]]), + detection_scores: + np.array([.9, .8]), + detection_classes: + np.array([2, 1]), + detection_masks: + np.stack([ + np.pad( + np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant'), + np.pad( + np.ones([100, 100], dtype=np.uint8), ((10, 10), + (10, 10)), + mode='constant'), + ]) + }) + sess.run(update_op, + feed_dict={ + image_id: 'image2', + groundtruth_boxes: np.array([[50., 50., 100., 100.]]), + groundtruth_classes: np.array([1]), + groundtruth_masks: np.pad(np.ones([1, 50, 50], + dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant'), + original_image_spatial_shape: np.array([[70, 70]]), + detection_scores: np.array([.8]), + detection_classes: np.array([1]), + detection_masks: np.pad(np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant') + }) + sess.run(update_op, + feed_dict={ + image_id: 'image3', + groundtruth_boxes: np.array([[25., 25., 50., 50.]]), + groundtruth_classes: np.array([1]), + groundtruth_masks: np.pad(np.ones([1, 25, 25], + dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant'), + original_image_spatial_shape: np.array([[45, 45]]), + detection_scores: np.array([.8]), + detection_classes: np.array([1]), + detection_masks: np.pad(np.ones([1, 25, 25], + dtype=np.uint8), + ((0, 0), (10, 10), (10, 10)), + mode='constant') + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._image_ids_with_detections) + self.assertFalse(coco_evaluator._image_id_to_mask_shape_map) + self.assertFalse(coco_evaluator._detection_masks_list) + + def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self): + coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list()) + batch_size = 3 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4)) + groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + groundtruth_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + original_image_spatial_shape = tf.placeholder(tf.int32, shape=(None, 2)) + detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None)) + detection_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.original_image_spatial_shape: + original_image_spatial_shape, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + + eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['DetectionMasks_Precision/mAP'] + + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image1', 'image2', 'image3'], + groundtruth_boxes: + np.array([[[100., 100., 200., 200.]], + [[50., 50., 100., 100.]], + [[25., 25., 50., 50.]]]), + groundtruth_classes: + np.array([[1], [1], [1]]), + groundtruth_masks: + np.stack([ + np.pad( + np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (0, 0), (0, 0)), + mode='constant'), + np.pad( + np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (25, 25), (25, 25)), + mode='constant'), + np.pad( + np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (37, 38), (37, 38)), + mode='constant') + ], + axis=0), + original_image_spatial_shape: np.array( + [[100, 100], [100, 100], [100, 100]]), + detection_scores: + np.array([[.8], [.8], [.8]]), + detection_classes: + np.array([[1], [1], [1]]), + detection_masks: + np.stack([ + np.pad( + np.ones([1, 100, 100], dtype=np.uint8), + ((0, 0), (0, 0), (0, 0)), + mode='constant'), + np.pad( + np.ones([1, 50, 50], dtype=np.uint8), + ((0, 0), (25, 25), (25, 25)), + mode='constant'), + np.pad( + np.ones([1, 25, 25], dtype=np.uint8), + ((0, 0), (37, 38), (37, 38)), + mode='constant') + ], + axis=0) + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.50IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP@.75IOU'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'], + 1.0) + self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0) + self.assertFalse(coco_evaluator._groundtruth_list) + self.assertFalse(coco_evaluator._image_ids_with_detections) + self.assertFalse(coco_evaluator._image_id_to_mask_shape_map) + self.assertFalse(coco_evaluator._detection_masks_list) + + +def _get_panoptic_test_data(): + # image1 contains 3 people in gt, (2 normal annotation and 1 "is_crowd" + # annotation), and 3 people in prediction. + gt_masks1 = np.zeros((3, 50, 50), dtype=np.uint8) + result_masks1 = np.zeros((3, 50, 50), dtype=np.uint8) + gt_masks1[0, 10:20, 20:30] = 1 + result_masks1[0, 10:18, 20:30] = 1 + gt_masks1[1, 25:30, 25:35] = 1 + result_masks1[1, 18:25, 25:30] = 1 + gt_masks1[2, 40:50, 40:50] = 1 + result_masks1[2, 47:50, 47:50] = 1 + gt_class1 = np.array([1, 1, 1]) + gt_is_crowd1 = np.array([0, 0, 1]) + result_class1 = np.array([1, 1, 1]) + + # image2 contains 1 dog and 1 cat in gt, while 1 person and 1 dog in + # prediction. + gt_masks2 = np.zeros((2, 30, 40), dtype=np.uint8) + result_masks2 = np.zeros((2, 30, 40), dtype=np.uint8) + gt_masks2[0, 5:15, 20:35] = 1 + gt_masks2[1, 20:30, 0:10] = 1 + result_masks2[0, 20:25, 10:15] = 1 + result_masks2[1, 6:15, 15:35] = 1 + gt_class2 = np.array([2, 3]) + gt_is_crowd2 = np.array([0, 0]) + result_class2 = np.array([1, 2]) + + gt_class = [gt_class1, gt_class2] + gt_masks = [gt_masks1, gt_masks2] + gt_is_crowd = [gt_is_crowd1, gt_is_crowd2] + result_class = [result_class1, result_class2] + result_masks = [result_masks1, result_masks2] + return gt_class, gt_masks, gt_is_crowd, result_class, result_masks + + +class CocoPanopticEvaluationTest(tf.test.TestCase): + + def test_panoptic_quality(self): + pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator( + _get_categories_list(), include_metrics_per_category=True) + (gt_class, gt_masks, gt_is_crowd, result_class, + result_masks) = _get_panoptic_test_data() + + for i in range(2): + pq_evaluator.add_single_ground_truth_image_info( + image_id='image%d' % i, + groundtruth_dict={ + standard_fields.InputDataFields.groundtruth_classes: + gt_class[i], + standard_fields.InputDataFields.groundtruth_instance_masks: + gt_masks[i], + standard_fields.InputDataFields.groundtruth_is_crowd: + gt_is_crowd[i] + }) + + pq_evaluator.add_single_detected_image_info( + image_id='image%d' % i, + detections_dict={ + standard_fields.DetectionResultFields.detection_classes: + result_class[i], + standard_fields.DetectionResultFields.detection_masks: + result_masks[i] + }) + + metrics = pq_evaluator.evaluate() + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/person'], + 0.32) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/dog'], + 135.0 / 195) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU_ByCategory/cat'], 0) + self.assertAlmostEqual(metrics['SegmentationQuality@0.50IOU'], + (0.8 + 135.0 / 195) / 3) + self.assertAlmostEqual(metrics['RecognitionQuality@0.50IOU'], (0.4 + 1) / 3) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'], + (0.32 + 135.0 / 195) / 3) + self.assertEqual(metrics['NumValidClasses'], 3) + self.assertEqual(metrics['NumTotalClasses'], 3) + + +@unittest.skipIf(tf_version.is_tf2(), 'Only Supported in TF1.X') +class CocoPanopticEvaluationPyFuncTest(tf.test.TestCase): + + def testPanopticQualityNoBatch(self): + pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator( + _get_categories_list(), include_metrics_per_category=True) + + image_id = tf.placeholder(tf.string, shape=()) + groundtruth_classes = tf.placeholder(tf.int32, shape=(None)) + groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + groundtruth_is_crowd = tf.placeholder(tf.int32, shape=(None)) + detection_classes = tf.placeholder(tf.int32, shape=(None)) + detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.groundtruth_is_crowd: groundtruth_is_crowd, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + } + + eval_metric_ops = pq_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['PanopticQuality@0.50IOU'] + (gt_class, gt_masks, gt_is_crowd, result_class, + result_masks) = _get_panoptic_test_data() + + with self.test_session() as sess: + for i in range(2): + sess.run( + update_op, + feed_dict={ + image_id: 'image%d' % i, + groundtruth_classes: gt_class[i], + groundtruth_masks: gt_masks[i], + groundtruth_is_crowd: gt_is_crowd[i], + detection_classes: result_class[i], + detection_masks: result_masks[i] + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'], + (0.32 + 135.0 / 195) / 3) + + def testPanopticQualityBatched(self): + pq_evaluator = coco_evaluation.CocoPanopticSegmentationEvaluator( + _get_categories_list(), include_metrics_per_category=True) + batch_size = 2 + image_id = tf.placeholder(tf.string, shape=(batch_size)) + groundtruth_classes = tf.placeholder(tf.int32, shape=(batch_size, None)) + groundtruth_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + groundtruth_is_crowd = tf.placeholder(tf.int32, shape=(batch_size, None)) + detection_classes = tf.placeholder(tf.int32, shape=(batch_size, None)) + detection_masks = tf.placeholder( + tf.uint8, shape=(batch_size, None, None, None)) + num_gt_masks_per_image = tf.placeholder(tf.int32, shape=(batch_size)) + num_det_masks_per_image = tf.placeholder(tf.int32, shape=(batch_size)) + + input_data_fields = standard_fields.InputDataFields + detection_fields = standard_fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.groundtruth_is_crowd: groundtruth_is_crowd, + input_data_fields.num_groundtruth_boxes: num_gt_masks_per_image, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks, + detection_fields.num_detections: num_det_masks_per_image, + } + + eval_metric_ops = pq_evaluator.get_estimator_eval_metric_ops(eval_dict) + + _, update_op = eval_metric_ops['PanopticQuality@0.50IOU'] + (gt_class, gt_masks, gt_is_crowd, result_class, + result_masks) = _get_panoptic_test_data() + with self.test_session() as sess: + sess.run( + update_op, + feed_dict={ + image_id: ['image0', 'image1'], + groundtruth_classes: + np.stack([ + gt_class[0], + np.pad(gt_class[1], (0, 1), mode='constant') + ], + axis=0), + groundtruth_masks: + np.stack([ + np.pad( + gt_masks[0], ((0, 0), (0, 10), (0, 10)), + mode='constant'), + np.pad( + gt_masks[1], ((0, 1), (0, 30), (0, 20)), + mode='constant'), + ], + axis=0), + groundtruth_is_crowd: + np.stack([ + gt_is_crowd[0], + np.pad(gt_is_crowd[1], (0, 1), mode='constant') + ], + axis=0), + num_gt_masks_per_image: np.array([3, 2]), + detection_classes: + np.stack([ + result_class[0], + np.pad(result_class[1], (0, 1), mode='constant') + ], + axis=0), + detection_masks: + np.stack([ + np.pad( + result_masks[0], ((0, 0), (0, 10), (0, 10)), + mode='constant'), + np.pad( + result_masks[1], ((0, 1), (0, 30), (0, 20)), + mode='constant'), + ], + axis=0), + num_det_masks_per_image: np.array([3, 2]), + }) + metrics = {} + for key, (value_op, _) in eval_metric_ops.items(): + metrics[key] = value_op + metrics = sess.run(metrics) + self.assertAlmostEqual(metrics['PanopticQuality@0.50IOU'], + (0.32 + 135.0 / 195) / 3) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_tools.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8e8a7fb3eceaa2eac099ce4d1089521c653ff7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_tools.py @@ -0,0 +1,973 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Wrappers for third party pycocotools to be used within object_detection. + +Note that nothing in this file is tensorflow related and thus cannot +be called directly as a slim metric, for example. + +TODO(jonathanhuang): wrap as a slim metric in metrics.py + + +Usage example: given a set of images with ids in the list image_ids +and corresponding lists of numpy arrays encoding groundtruth (boxes and classes) +and detections (boxes, scores and classes), where elements of each list +correspond to detections/annotations of a single image, +then evaluation (in multi-class mode) can be invoked as follows: + + groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( + image_ids, groundtruth_boxes_list, groundtruth_classes_list, + max_num_classes, output_path=None) + detections_list = coco_tools.ExportDetectionsToCOCO( + image_ids, detection_boxes_list, detection_scores_list, + detection_classes_list, output_path=None) + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import OrderedDict +import copy +import time +import numpy as np + +from pycocotools import coco +from pycocotools import cocoeval +from pycocotools import mask + +import six +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.utils import json_utils + + +class COCOWrapper(coco.COCO): + """Wrapper for the pycocotools COCO class.""" + + def __init__(self, dataset, detection_type='bbox'): + """COCOWrapper constructor. + + See http://mscoco.org/dataset/#format for a description of the format. + By default, the coco.COCO class constructor reads from a JSON file. + This function duplicates the same behavior but loads from a dictionary, + allowing us to perform evaluation without writing to external storage. + + Args: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + + Raises: + ValueError: if detection_type is unsupported. + """ + supported_detection_types = ['bbox', 'segmentation'] + if detection_type not in supported_detection_types: + raise ValueError('Unsupported detection type: {}. ' + 'Supported values are: {}'.format( + detection_type, supported_detection_types)) + self._detection_type = detection_type + coco.COCO.__init__(self) + self.dataset = dataset + self.createIndex() + + def LoadAnnotations(self, annotations): + """Load annotations dictionary into COCO datastructure. + + See http://mscoco.org/dataset/#format for a description of the annotations + format. As above, this function replicates the default behavior of the API + but does not require writing to external storage. + + Args: + annotations: python list holding object detection results where each + detection is encoded as a dict with required keys ['image_id', + 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on + `detection_type`. + + Returns: + a coco.COCO datastructure holding object detection annotations results + + Raises: + ValueError: if annotations is not a list + ValueError: if annotations do not correspond to the images contained + in self. + """ + results = coco.COCO() + results.dataset['images'] = [img for img in self.dataset['images']] + + tf.logging.info('Loading and preparing annotation results...') + tic = time.time() + + if not isinstance(annotations, list): + raise ValueError('annotations is not a list of objects') + annotation_img_ids = [ann['image_id'] for ann in annotations] + if (set(annotation_img_ids) != (set(annotation_img_ids) + & set(self.getImgIds()))): + raise ValueError('Results do not correspond to current coco set') + results.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + if self._detection_type == 'bbox': + for idx, ann in enumerate(annotations): + bb = ann['bbox'] + ann['area'] = bb[2] * bb[3] + ann['id'] = idx + 1 + ann['iscrowd'] = 0 + elif self._detection_type == 'segmentation': + for idx, ann in enumerate(annotations): + ann['area'] = mask.area(ann['segmentation']) + ann['bbox'] = mask.toBbox(ann['segmentation']) + ann['id'] = idx + 1 + ann['iscrowd'] = 0 + tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic)) + + results.dataset['annotations'] = annotations + results.createIndex() + return results + + +class COCOEvalWrapper(cocoeval.COCOeval): + """Wrapper for the pycocotools COCOeval class. + + To evaluate, create two objects (groundtruth_dict and detections_list) + using the conventions listed at http://mscoco.org/dataset/#format. + Then call evaluation as follows: + + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + + metrics = evaluator.ComputeMetrics() + """ + + def __init__(self, groundtruth=None, detections=None, agnostic_mode=False, + iou_type='bbox', oks_sigmas=None): + """COCOEvalWrapper constructor. + + Note that for the area-based metrics to be meaningful, detection and + groundtruth boxes must be in image coordinates measured in pixels. + + Args: + groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding + groundtruth annotations + detections: a coco.COCO (or coco_tools.COCOWrapper) object holding + detections + agnostic_mode: boolean (default: False). If True, evaluation ignores + class labels, treating all detections as proposals. + iou_type: IOU type to use for evaluation. Supports `bbox', `segm`, + `keypoints`. + oks_sigmas: Float numpy array holding the OKS variances for keypoints. + """ + cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type) + if oks_sigmas is not None: + self.params.kpt_oks_sigmas = oks_sigmas + if agnostic_mode: + self.params.useCats = 0 + self._iou_type = iou_type + + def GetCategory(self, category_id): + """Fetches dictionary holding category information given category id. + + Args: + category_id: integer id + Returns: + dictionary holding 'id', 'name'. + """ + return self.cocoGt.cats[category_id] + + def GetAgnosticMode(self): + """Returns true if COCO Eval is configured to evaluate in agnostic mode.""" + return self.params.useCats == 0 + + def GetCategoryIdList(self): + """Returns list of valid category ids.""" + return self.params.catIds + + def ComputeMetrics(self, + include_metrics_per_category=False, + all_metrics_per_category=False, + super_categories=None): + """Computes detection/keypoint metrics. + + Args: + include_metrics_per_category: If True, will include metrics per category. + all_metrics_per_category: If true, include all the summery metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + super_categories: None or a python dict mapping super-category names + (strings) to lists of categories (corresponding to category names + in the label_map). Metrics are aggregated along these super-categories + and added to the `per_category_ap` and are associated with the name + `PerformanceBySuperCategory/`. + + Returns: + 1. summary_metrics: a dictionary holding: + 'Precision/mAP': mean average precision over classes averaged over IOU + thresholds ranging from .5 to .95 with .05 increments + 'Precision/mAP@.50IOU': mean average precision at 50% IOU + 'Precision/mAP@.75IOU': mean average precision at 75% IOU + 'Precision/mAP (small)': mean average precision for small objects + (area < 32^2 pixels). NOTE: not present for 'keypoints' + 'Precision/mAP (medium)': mean average precision for medium sized + objects (32^2 pixels < area < 96^2 pixels) + 'Precision/mAP (large)': mean average precision for large objects + (96^2 pixels < area < 10000^2 pixels) + 'Recall/AR@1': average recall with 1 detection + 'Recall/AR@10': average recall with 10 detections + 'Recall/AR@100': average recall with 100 detections + 'Recall/AR@100 (small)': average recall for small objects with 100 + detections. NOTE: not present for 'keypoints' + 'Recall/AR@100 (medium)': average recall for medium objects with 100 + detections + 'Recall/AR@100 (large)': average recall for large objects with 100 + detections + 2. per_category_ap: a dictionary holding category specific results with + keys of the form: 'Precision mAP ByCategory/category' + (without the supercategory part if no supercategories exist). + For backward compatibility 'PerformanceByCategory' is included in the + output regardless of all_metrics_per_category. + If evaluating class-agnostic mode, per_category_ap is an empty + dictionary. + If super_categories are provided, then this will additionally include + metrics aggregated along the super_categories with keys of the form: + `PerformanceBySuperCategory/` + + Raises: + ValueError: If category_stats does not exist. + """ + self.evaluate() + self.accumulate() + self.summarize() + + summary_metrics = {} + if self._iou_type in ['bbox', 'segm']: + summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]), + ('Precision/mAP@.50IOU', self.stats[1]), + ('Precision/mAP@.75IOU', self.stats[2]), + ('Precision/mAP (small)', self.stats[3]), + ('Precision/mAP (medium)', self.stats[4]), + ('Precision/mAP (large)', self.stats[5]), + ('Recall/AR@1', self.stats[6]), + ('Recall/AR@10', self.stats[7]), + ('Recall/AR@100', self.stats[8]), + ('Recall/AR@100 (small)', self.stats[9]), + ('Recall/AR@100 (medium)', self.stats[10]), + ('Recall/AR@100 (large)', self.stats[11])]) + elif self._iou_type == 'keypoints': + category_id = self.GetCategoryIdList()[0] + category_name = self.GetCategory(category_id)['name'] + summary_metrics = OrderedDict([]) + summary_metrics['Precision/mAP ByCategory/{}'.format( + category_name)] = self.stats[0] + summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format( + category_name)] = self.stats[1] + summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format( + category_name)] = self.stats[2] + summary_metrics['Precision/mAP (medium) ByCategory/{}'.format( + category_name)] = self.stats[3] + summary_metrics['Precision/mAP (large) ByCategory/{}'.format( + category_name)] = self.stats[4] + summary_metrics['Recall/AR@1 ByCategory/{}'.format( + category_name)] = self.stats[5] + summary_metrics['Recall/AR@10 ByCategory/{}'.format( + category_name)] = self.stats[6] + summary_metrics['Recall/AR@100 ByCategory/{}'.format( + category_name)] = self.stats[7] + summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format( + category_name)] = self.stats[8] + summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format( + category_name)] = self.stats[9] + if not include_metrics_per_category: + return summary_metrics, {} + if not hasattr(self, 'category_stats'): + raise ValueError('Category stats do not exist') + per_category_ap = OrderedDict([]) + super_category_ap = OrderedDict([]) + if self.GetAgnosticMode(): + return summary_metrics, per_category_ap + for category_index, category_id in enumerate(self.GetCategoryIdList()): + category = self.GetCategory(category_id)['name'] + # Kept for backward compatilbility + per_category_ap['PerformanceByCategory/mAP/{}'.format( + category)] = self.category_stats[0][category_index] + if super_categories: + for key in super_categories: + if category in super_categories[key]: + metric_name = 'PerformanceBySuperCategory/{}'.format(key) + if metric_name not in super_category_ap: + super_category_ap[metric_name] = 0 + super_category_ap[metric_name] += self.category_stats[0][ + category_index] + if all_metrics_per_category: + per_category_ap['Precision mAP ByCategory/{}'.format( + category)] = self.category_stats[0][category_index] + per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format( + category)] = self.category_stats[1][category_index] + per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format( + category)] = self.category_stats[2][category_index] + per_category_ap['Precision mAP (small) ByCategory/{}'.format( + category)] = self.category_stats[3][category_index] + per_category_ap['Precision mAP (medium) ByCategory/{}'.format( + category)] = self.category_stats[4][category_index] + per_category_ap['Precision mAP (large) ByCategory/{}'.format( + category)] = self.category_stats[5][category_index] + per_category_ap['Recall AR@1 ByCategory/{}'.format( + category)] = self.category_stats[6][category_index] + per_category_ap['Recall AR@10 ByCategory/{}'.format( + category)] = self.category_stats[7][category_index] + per_category_ap['Recall AR@100 ByCategory/{}'.format( + category)] = self.category_stats[8][category_index] + per_category_ap['Recall AR@100 (small) ByCategory/{}'.format( + category)] = self.category_stats[9][category_index] + per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format( + category)] = self.category_stats[10][category_index] + per_category_ap['Recall AR@100 (large) ByCategory/{}'.format( + category)] = self.category_stats[11][category_index] + if super_categories: + for key in super_categories: + metric_name = 'PerformanceBySuperCategory/{}'.format(key) + super_category_ap[metric_name] /= len(super_categories[key]) + per_category_ap.update(super_category_ap) + return summary_metrics, per_category_ap + + +def _ConvertBoxToCOCOFormat(box): + """Converts a box in [ymin, xmin, ymax, xmax] format to COCO format. + + This is a utility function for converting from our internal + [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API + i.e., [xmin, ymin, width, height]. + + Args: + box: a [ymin, xmin, ymax, xmax] numpy array + + Returns: + a list of floats representing [xmin, ymin, width, height] + """ + return [float(box[1]), float(box[0]), float(box[3] - box[1]), + float(box[2] - box[0])] + + +def _RleCompress(masks): + """Compresses mask using Run-length encoding provided by pycocotools. + + Args: + masks: uint8 numpy array of shape [mask_height, mask_width] with values in + {0, 1}. + + Returns: + A pycocotools Run-length encoding of the mask. + """ + rle = mask.encode(np.asfortranarray(masks)) + rle['counts'] = six.ensure_str(rle['counts']) + return rle + + +def ExportSingleImageGroundtruthToCoco(image_id, + next_annotation_id, + category_id_set, + groundtruth_boxes, + groundtruth_classes, + groundtruth_keypoints=None, + groundtruth_keypoint_visibilities=None, + groundtruth_masks=None, + groundtruth_is_crowd=None, + groundtruth_area=None): + """Export groundtruth of a single image to COCO format. + + This function converts groundtruth detection annotations represented as numpy + arrays to dictionaries that can be ingested by the COCO evaluation API. Note + that the image_ids provided here must match the ones given to + ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in + correspondence - that is: groundtruth_boxes[i, :], and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box. + + Args: + image_id: a unique image identifier either of type integer or string. + next_annotation_id: integer specifying the first id to use for the + groundtruth annotations. All annotations are assigned a continuous integer + id starting from this value. + category_id_set: A set of valid class ids. Groundtruth with classes not in + category_id_set are dropped. + groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4] + groundtruth_classes: numpy array (int) with shape [num_gt_boxes] + groundtruth_keypoints: optional float numpy array of keypoints + with shape [num_gt_boxes, num_keypoints, 2]. + groundtruth_keypoint_visibilities: optional integer numpy array of keypoint + visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated + as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and + visible. + groundtruth_masks: optional uint8 numpy array of shape [num_detections, + image_height, image_width] containing detection_masks. + groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes] + indicating whether groundtruth boxes are crowd. + groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If + provided, then the area values (in the original absolute coordinates) will + be populated instead of calculated from bounding box coordinates. + + Returns: + a list of groundtruth annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + + if len(groundtruth_classes.shape) != 1: + raise ValueError('groundtruth_classes is ' + 'expected to be of rank 1.') + if len(groundtruth_boxes.shape) != 2: + raise ValueError('groundtruth_boxes is expected to be of ' + 'rank 2.') + if groundtruth_boxes.shape[1] != 4: + raise ValueError('groundtruth_boxes should have ' + 'shape[1] == 4.') + num_boxes = groundtruth_classes.shape[0] + if num_boxes != groundtruth_boxes.shape[0]: + raise ValueError('Corresponding entries in groundtruth_classes, ' + 'and groundtruth_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension).' + 'Classes shape: %d. Boxes shape: %d. Image ID: %s' % ( + groundtruth_classes.shape[0], + groundtruth_boxes.shape[0], image_id)) + has_is_crowd = groundtruth_is_crowd is not None + if has_is_crowd and len(groundtruth_is_crowd.shape) != 1: + raise ValueError('groundtruth_is_crowd is expected to be of rank 1.') + has_keypoints = groundtruth_keypoints is not None + has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None + if has_keypoints and not has_keypoint_visibilities: + groundtruth_keypoint_visibilities = np.full( + (num_boxes, groundtruth_keypoints.shape[1]), 2) + groundtruth_list = [] + for i in range(num_boxes): + if groundtruth_classes[i] in category_id_set: + iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0 + if groundtruth_area is not None and groundtruth_area[i] > 0: + area = float(groundtruth_area[i]) + else: + area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) * + (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])) + export_dict = { + 'id': + next_annotation_id + i, + 'image_id': + image_id, + 'category_id': + int(groundtruth_classes[i]), + 'bbox': + list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), + 'area': area, + 'iscrowd': + iscrowd + } + if groundtruth_masks is not None: + export_dict['segmentation'] = _RleCompress(groundtruth_masks[i]) + if has_keypoints: + keypoints = groundtruth_keypoints[i] + visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1]) + coco_keypoints = [] + num_valid_keypoints = 0 + for keypoint, visibility in zip(keypoints, visibilities): + # Convert from [y, x] to [x, y] as mandated by COCO. + coco_keypoints.append(float(keypoint[1])) + coco_keypoints.append(float(keypoint[0])) + coco_keypoints.append(int(visibility)) + if int(visibility) > 0: + num_valid_keypoints = num_valid_keypoints + 1 + export_dict['keypoints'] = coco_keypoints + export_dict['num_keypoints'] = num_valid_keypoints + + groundtruth_list.append(export_dict) + return groundtruth_list + + +def ExportGroundtruthToCOCO(image_ids, + groundtruth_boxes, + groundtruth_classes, + categories, + output_path=None): + """Export groundtruth detection annotations in numpy arrays to COCO API. + + This function converts a set of groundtruth detection annotations represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are three lists: image ids for each groundtruth image, + groundtruth boxes for each image and groundtruth classes respectively. + Note that the image_ids provided here must match the ones given to the + ExportDetectionsToCOCO function in order for evaluation to work properly. + We assume that for each image, boxes, scores and classes are in + correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box and "iscrowd" fields are always set to 0. + TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset. + + Args: + image_ids: a list of unique image identifier either of type integer or + string. + groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4] + (note that num_gt_boxes can be different for each entry in the list) + groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes] + (note that num_gt_boxes can be different for each entry in the list) + categories: a list of dictionaries representing all possible categories. + Each dict in this list has the following keys: + 'id': (required) an integer id uniquely identifying this category + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza' + 'supercategory': (optional) string representing the supercategory + e.g., 'animal', 'vehicle', 'food', etc + output_path: (optional) path for exporting result to JSON + Returns: + dictionary that can be read by COCO API + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + category_id_set = set([cat['id'] for cat in categories]) + groundtruth_export_list = [] + image_export_list = [] + if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes): + raise ValueError('Input lists must have the same length') + + # For reasons internal to the COCO API, it is important that annotation ids + # are not equal to zero; we thus start counting from 1. + annotation_id = 1 + for image_id, boxes, classes in zip(image_ids, groundtruth_boxes, + groundtruth_classes): + image_export_list.append({'id': image_id}) + groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco( + image_id, + annotation_id, + category_id_set, + boxes, + classes)) + num_boxes = classes.shape[0] + annotation_id += num_boxes + + groundtruth_dict = { + 'annotations': groundtruth_export_list, + 'images': image_export_list, + 'categories': categories + } + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2) + return groundtruth_dict + + +def ExportSingleImageDetectionBoxesToCoco(image_id, + category_id_set, + detection_boxes, + detection_scores, + detection_classes, + detection_keypoints=None, + detection_keypoint_visibilities=None): + """Export detections of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. Note that the image_ids + provided here must match the ones given to the + ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in + correspondence - that is: boxes[i, :], and classes[i] + are associated with the same groundtruth annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_boxes: float numpy array of shape [num_detections, 4] containing + detection boxes. + detection_scores: float numpy array of shape [num_detections] containing + scored for the detection boxes. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection boxes. + detection_keypoints: optional float numpy array of keypoints + with shape [num_detections, num_keypoints, 2]. + detection_keypoint_visibilities: optional integer numpy array of keypoint + visibilities with shape [num_detections, num_keypoints]. Integer is + treated as an enum with 0=not labels, 1=labeled but not visible and + 2=labeled and visible. + + Returns: + a list of detection annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_boxes, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + if len(detection_boxes.shape) != 2: + raise ValueError('All entries in detection_boxes expected to be of ' + 'rank 2.') + if detection_boxes.shape[1] != 4: + raise ValueError('All entries in detection_boxes should have ' + 'shape[1] == 4.') + num_boxes = detection_classes.shape[0] + if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_scores and detection_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension). ' + 'Classes shape: %d. Boxes shape: %d. ' + 'Scores shape: %d' % ( + detection_classes.shape[0], detection_boxes.shape[0], + detection_scores.shape[0] + )) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + export_dict = { + 'image_id': + image_id, + 'category_id': + int(detection_classes[i]), + 'bbox': + list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), + 'score': + float(detection_scores[i]), + } + if detection_keypoints is not None: + keypoints = detection_keypoints[i] + num_keypoints = keypoints.shape[0] + if detection_keypoint_visibilities is None: + detection_keypoint_visibilities = np.full((num_boxes, num_keypoints), + 2) + visibilities = np.reshape(detection_keypoint_visibilities[i], [-1]) + coco_keypoints = [] + for keypoint, visibility in zip(keypoints, visibilities): + # Convert from [y, x] to [x, y] as mandated by COCO. + coco_keypoints.append(float(keypoint[1])) + coco_keypoints.append(float(keypoint[0])) + coco_keypoints.append(int(visibility)) + export_dict['keypoints'] = coco_keypoints + export_dict['num_keypoints'] = num_keypoints + detections_list.append(export_dict) + + return detections_list + + +def ExportSingleImageDetectionMasksToCoco(image_id, + category_id_set, + detection_masks, + detection_scores, + detection_classes): + """Export detection masks of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. We assume that + detection_masks, detection_scores, and detection_classes are in correspondence + - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i] + are associated with the same annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_masks: uint8 numpy array of shape [num_detections, image_height, + image_width] containing detection_masks. + detection_scores: float numpy array of shape [num_detections] containing + scores for detection masks. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection masks. + + Returns: + a list of detection mask annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_masks, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + num_boxes = detection_classes.shape[0] + if not num_boxes == len(detection_masks) == detection_scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_scores and detection_masks should have ' + 'compatible lengths and shapes ' + 'Classes length: %d. Masks length: %d. ' + 'Scores length: %d' % ( + detection_classes.shape[0], len(detection_masks), + detection_scores.shape[0] + )) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append({ + 'image_id': image_id, + 'category_id': int(detection_classes[i]), + 'segmentation': _RleCompress(detection_masks[i]), + 'score': float(detection_scores[i]) + }) + return detections_list + + +def ExportDetectionsToCOCO(image_ids, + detection_boxes, + detection_scores, + detection_classes, + categories, + output_path=None): + """Export detection annotations in numpy arrays to COCO API. + + This function converts a set of predicted detections represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are lists, consisting of boxes, scores and + classes, respectively, corresponding to each image for which detections + have been produced. Note that the image_ids provided here must + match the ones given to the ExportGroundtruthToCOCO function in order + for evaluation to work properly. + + We assume that for each image, boxes, scores and classes are in + correspondence --- that is: detection_boxes[i, :], detection_scores[i] and + detection_classes[i] are associated with the same detection. + + Args: + image_ids: a list of unique image identifier either of type integer or + string. + detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4] + detection_scores: list of numpy arrays (float) with shape + [num_detection_boxes]. Note that num_detection_boxes can be different + for each entry in the list. + detection_classes: list of numpy arrays (int) with shape + [num_detection_boxes]. Note that num_detection_boxes can be different + for each entry in the list. + categories: a list of dictionaries representing all possible categories. + Each dict in this list must have an integer 'id' key uniquely identifying + this category. + output_path: (optional) path for exporting result to JSON + + Returns: + list of dictionaries that can be read by COCO API, where each entry + corresponds to a single detection and has keys from: + ['image_id', 'category_id', 'bbox', 'score']. + Raises: + ValueError: if (1) detection_boxes and detection_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers. + """ + category_id_set = set([cat['id'] for cat in categories]) + detections_export_list = [] + if not (len(image_ids) == len(detection_boxes) == len(detection_scores) == + len(detection_classes)): + raise ValueError('Input lists must have the same length') + for image_id, boxes, scores, classes in zip(image_ids, detection_boxes, + detection_scores, + detection_classes): + detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco( + image_id, + category_id_set, + boxes, + scores, + classes)) + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2) + return detections_export_list + + +def ExportSegmentsToCOCO(image_ids, + detection_masks, + detection_scores, + detection_classes, + categories, + output_path=None): + """Export segmentation masks in numpy arrays to COCO API. + + This function converts a set of predicted instance masks represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are lists, consisting of segments, scores and + classes, respectively, corresponding to each image for which detections + have been produced. + + Note this function is recommended to use for small dataset. + For large dataset, it should be used with a merge function + (e.g. in map reduce), otherwise the memory consumption is large. + + We assume that for each image, masks, scores and classes are in + correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i] + and detection_classes[i] are associated with the same detection. + + Args: + image_ids: list of image ids (typically ints or strings) + detection_masks: list of numpy arrays with shape [num_detection, h, w, 1] + and type uint8. The height and width should match the shape of + corresponding image. + detection_scores: list of numpy arrays (float) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + detection_classes: list of numpy arrays (int) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + categories: a list of dictionaries representing all possible categories. + Each dict in this list must have an integer 'id' key uniquely identifying + this category. + output_path: (optional) path for exporting result to JSON + + Returns: + list of dictionaries that can be read by COCO API, where each entry + corresponds to a single detection and has keys from: + ['image_id', 'category_id', 'segmentation', 'score']. + + Raises: + ValueError: if detection_masks and detection_classes do not have the + right lengths or if each of the elements inside these lists do not + have the correct shapes. + """ + if not (len(image_ids) == len(detection_masks) == len(detection_scores) == + len(detection_classes)): + raise ValueError('Input lists must have the same length') + + segment_export_list = [] + for image_id, masks, scores, classes in zip(image_ids, detection_masks, + detection_scores, + detection_classes): + + if len(classes.shape) != 1 or len(scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + if len(masks.shape) != 4: + raise ValueError('All entries in masks expected to be of ' + 'rank 4. Given {}'.format(masks.shape)) + + num_boxes = classes.shape[0] + if not num_boxes == masks.shape[0] == scores.shape[0]: + raise ValueError('Corresponding entries in segment_classes, ' + 'detection_scores and detection_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension).') + + category_id_set = set([cat['id'] for cat in categories]) + segment_export_list.extend(ExportSingleImageDetectionMasksToCoco( + image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes)) + + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2) + return segment_export_list + + +def ExportKeypointsToCOCO(image_ids, + detection_keypoints, + detection_scores, + detection_classes, + categories, + output_path=None): + """Exports keypoints in numpy arrays to COCO API. + + This function converts a set of predicted keypoints represented + as numpy arrays to dictionaries that can be ingested by the COCO API. + Inputs to this function are lists, consisting of keypoints, scores and + classes, respectively, corresponding to each image for which detections + have been produced. + + We assume that for each image, keypoints, scores and classes are in + correspondence --- that is: detection_keypoints[i, :, :, :], + detection_scores[i] and detection_classes[i] are associated with the same + detection. + + Args: + image_ids: list of image ids (typically ints or strings) + detection_keypoints: list of numpy arrays with shape + [num_detection, num_keypoints, 2] and type float32 in absolute + x-y coordinates. + detection_scores: list of numpy arrays (float) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + detection_classes: list of numpy arrays (int) with shape + [num_detection]. Note that num_detection can be different + for each entry in the list. + categories: a list of dictionaries representing all possible categories. + Each dict in this list must have an integer 'id' key uniquely identifying + this category and an integer 'num_keypoints' key specifying the number of + keypoints the category has. + output_path: (optional) path for exporting result to JSON + + Returns: + list of dictionaries that can be read by COCO API, where each entry + corresponds to a single detection and has keys from: + ['image_id', 'category_id', 'keypoints', 'score']. + + Raises: + ValueError: if detection_keypoints and detection_classes do not have the + right lengths or if each of the elements inside these lists do not + have the correct shapes. + """ + if not (len(image_ids) == len(detection_keypoints) == + len(detection_scores) == len(detection_classes)): + raise ValueError('Input lists must have the same length') + + keypoints_export_list = [] + for image_id, keypoints, scores, classes in zip( + image_ids, detection_keypoints, detection_scores, detection_classes): + + if len(classes.shape) != 1 or len(scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + if len(keypoints.shape) != 3: + raise ValueError('All entries in keypoints expected to be of ' + 'rank 3. Given {}'.format(keypoints.shape)) + + num_boxes = classes.shape[0] + if not num_boxes == keypoints.shape[0] == scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_keypoints, and detection_scores should have ' + 'compatible shapes (i.e., agree on the 0th dimension).') + + category_id_set = set([cat['id'] for cat in categories]) + category_id_to_num_keypoints_map = { + cat['id']: cat['num_keypoints'] for cat in categories + if 'num_keypoints' in cat} + + for i in range(num_boxes): + if classes[i] not in category_id_set: + raise ValueError('class id should be in category_id_set\n') + + if classes[i] in category_id_to_num_keypoints_map: + num_keypoints = category_id_to_num_keypoints_map[classes[i]] + # Adds extra ones to indicate the visibility for each keypoint as is + # recommended by MSCOCO. + instance_keypoints = np.concatenate( + [keypoints[i, 0:num_keypoints, :], + np.expand_dims(np.ones(num_keypoints), axis=1)], + axis=1).astype(int) + + instance_keypoints = instance_keypoints.flatten().tolist() + keypoints_export_list.append({ + 'image_id': image_id, + 'category_id': int(classes[i]), + 'keypoints': instance_keypoints, + 'score': float(scores[i]) + }) + + if output_path: + with tf.gfile.GFile(output_path, 'w') as fid: + json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2) + return keypoints_export_list diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_tools_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_tools_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c3ce0a81d46f6c4447272b60a3381a2adeeb0c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/coco_tools_test.py @@ -0,0 +1,405 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_model.object_detection.metrics.coco_tools.""" +import json +import os +import re +import numpy as np + +from pycocotools import mask + +import tensorflow.compat.v1 as tf + +from object_detection.metrics import coco_tools + + +class CocoToolsTest(tf.test.TestCase): + + def setUp(self): + groundtruth_annotations_list = [ + { + 'id': 1, + 'image_id': 'first', + 'category_id': 1, + 'bbox': [100., 100., 100., 100.], + 'area': 100.**2, + 'iscrowd': 0 + }, + { + 'id': 2, + 'image_id': 'second', + 'category_id': 1, + 'bbox': [50., 50., 50., 50.], + 'area': 50.**2, + 'iscrowd': 0 + }, + ] + image_list = [{'id': 'first'}, {'id': 'second'}] + category_list = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + self._groundtruth_dict = { + 'annotations': groundtruth_annotations_list, + 'images': image_list, + 'categories': category_list + } + + self._detections_list = [ + { + 'image_id': 'first', + 'category_id': 1, + 'bbox': [100., 100., 100., 100.], + 'score': .8 + }, + { + 'image_id': 'second', + 'category_id': 1, + 'bbox': [50., 50., 50., 50.], + 'score': .7 + }, + ] + + def testCocoWrappers(self): + groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict) + detections = groundtruth.LoadAnnotations(self._detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections) + summary_metrics, _ = evaluator.ComputeMetrics() + self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP']) + + def testExportGroundtruthToCOCO(self): + image_ids = ['first', 'second'] + groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float), + np.array([[50, 50, 100, 100]], np.float)] + groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)] + categories = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json') + result = coco_tools.ExportGroundtruthToCOCO( + image_ids, + groundtruth_boxes, + groundtruth_classes, + categories, + output_path=output_path) + self.assertDictEqual(result, self._groundtruth_dict) + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + # The json output should have floats written to 4 digits of precision. + matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) + self.assertTrue(matcher.findall(written_result)) + written_result = json.loads(written_result) + self.assertAlmostEqual(result, written_result) + + def testExportDetectionsToCOCO(self): + image_ids = ['first', 'second'] + detections_boxes = [np.array([[100, 100, 200, 200]], np.float), + np.array([[50, 50, 100, 100]], np.float)] + detections_scores = [np.array([.8], np.float), np.array([.7], np.float)] + detections_classes = [np.array([1], np.int32), np.array([1], np.int32)] + categories = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json') + result = coco_tools.ExportDetectionsToCOCO( + image_ids, + detections_boxes, + detections_scores, + detections_classes, + categories, + output_path=output_path) + self.assertListEqual(result, self._detections_list) + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + # The json output should have floats written to 4 digits of precision. + matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) + self.assertTrue(matcher.findall(written_result)) + written_result = json.loads(written_result) + self.assertAlmostEqual(result, written_result) + + def testExportSegmentsToCOCO(self): + image_ids = ['first', 'second'] + detection_masks = [np.array( + [[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]], + dtype=np.uint8), np.array( + [[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]], + dtype=np.uint8)] + + for i, detection_mask in enumerate(detection_masks): + detection_masks[i] = detection_mask[:, :, :, None] + + detection_scores = [np.array([.8], np.float), np.array([.7], np.float)] + detection_classes = [np.array([1], np.int32), np.array([1], np.int32)] + + categories = [{'id': 0, 'name': 'person'}, + {'id': 1, 'name': 'cat'}, + {'id': 2, 'name': 'dog'}] + output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json') + result = coco_tools.ExportSegmentsToCOCO( + image_ids, + detection_masks, + detection_scores, + detection_classes, + categories, + output_path=output_path) + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + written_result = json.loads(written_result) + mask_load = mask.decode([written_result[0]['segmentation']]) + self.assertTrue(np.allclose(mask_load, detection_masks[0])) + self.assertAlmostEqual(result, written_result) + + def testExportKeypointsToCOCO(self): + image_ids = ['first', 'second'] + detection_keypoints = [ + np.array( + [[[100, 200], [300, 400], [500, 600]], + [[50, 150], [250, 350], [450, 550]]], dtype=np.int32), + np.array( + [[[110, 210], [310, 410], [510, 610]], + [[60, 160], [260, 360], [460, 560]]], dtype=np.int32)] + + detection_scores = [np.array([.8, 0.2], np.float), + np.array([.7, 0.3], np.float)] + detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)] + + categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3}, + {'id': 2, 'name': 'cat'}, + {'id': 3, 'name': 'dog'}] + + output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json') + result = coco_tools.ExportKeypointsToCOCO( + image_ids, + detection_keypoints, + detection_scores, + detection_classes, + categories, + output_path=output_path) + + with tf.gfile.GFile(output_path, 'r') as f: + written_result = f.read() + written_result = json.loads(written_result) + self.assertAlmostEqual(result, written_result) + + def testSingleImageDetectionBoxesExport(self): + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_boxes=boxes, + detection_classes=classes, + detection_scores=scores) + for i, annotation in enumerate(coco_annotations): + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertAlmostEqual(annotation['score'], scores[i]) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + + def testSingleImageDetectionMaskExport(self): + masks = np.array( + [[[1, 1,], [1, 1]], + [[0, 0], [0, 1]], + [[0, 0], [0, 0]]], dtype=np.uint8) + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_classes=classes, + detection_scores=scores, + detection_masks=masks) + expected_counts = ['04', '31', '4'] + for i, mask_annotation in enumerate(coco_annotations): + self.assertEqual(mask_annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + mask_annotation['segmentation']), masks[i]))) + self.assertEqual(mask_annotation['image_id'], 'first_image') + self.assertEqual(mask_annotation['category_id'], classes[i]) + self.assertAlmostEqual(mask_annotation['score'], scores[i]) + + def testSingleImageGroundtruthExport(self): + masks = np.array( + [[[1, 1,], [1, 1]], + [[0, 0], [0, 1]], + [[0, 0], [0, 0]]], dtype=np.uint8) + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + classes = np.array([1, 2, 3], dtype=np.int32) + is_crowd = np.array([0, 1, 0], dtype=np.int32) + next_annotation_id = 1 + expected_counts = ['04', '31', '4'] + + # Tests exporting without passing in is_crowd (for backward compatibility). + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_masks=masks) + for i, annotation in enumerate(coco_annotations): + self.assertEqual(annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + annotation['segmentation']), masks[i]))) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + + # Tests exporting with is_crowd. + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_masks=masks, + groundtruth_is_crowd=is_crowd) + for i, annotation in enumerate(coco_annotations): + self.assertEqual(annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + annotation['segmentation']), masks[i]))) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['iscrowd'], is_crowd[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + + def testSingleImageGroundtruthExportWithKeypoints(self): + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]], + [[0, 0], [0.125, 0.125], [0.375, 0.375]], + [[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]], + dtype=np.float32) + visibilities = np.array([[2, 2, 2], + [2, 2, 0], + [2, 0, 0]], dtype=np.int32) + areas = np.array([15., 16., 17.]) + + classes = np.array([1, 2, 3], dtype=np.int32) + is_crowd = np.array([0, 1, 0], dtype=np.int32) + next_annotation_id = 1 + + # Tests exporting without passing in is_crowd (for backward compatibility). + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_keypoints=keypoints, + groundtruth_keypoint_visibilities=visibilities, + groundtruth_area=areas) + for i, annotation in enumerate(coco_annotations): + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + self.assertEqual(annotation['num_keypoints'], 3 - i) + self.assertEqual(annotation['area'], 15.0 + i) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1]))) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0]))) + self.assertTrue( + np.all(np.equal(annotation['keypoints'][2::3], visibilities[i]))) + + # Tests exporting with is_crowd. + coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_keypoints=keypoints, + groundtruth_keypoint_visibilities=visibilities, + groundtruth_is_crowd=is_crowd) + for i, annotation in enumerate(coco_annotations): + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['iscrowd'], is_crowd[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + self.assertEqual(annotation['num_keypoints'], 3 - i) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1]))) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0]))) + self.assertTrue( + np.all(np.equal(annotation['keypoints'][2::3], visibilities[i]))) + # Testing the area values are derived from the bounding boxes. + if i == 0: + self.assertAlmostEqual(annotation['area'], 1.0) + else: + self.assertAlmostEqual(annotation['area'], 0.25) + + def testSingleImageDetectionBoxesExportWithKeypoints(self): + boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]], + dtype=np.float32) + coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]], + dtype=np.float32) + keypoints = np.array([[[0, 0], [0.25, 0.25], [0.75, 0.75]], + [[0, 0], [0.125, 0.125], [0.375, 0.375]], + [[0.5, 0.5], [0.75, 0.75], [1.0, 1.0]]], + dtype=np.float32) + visibilities = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=np.int32) + + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + + # Tests exporting without passing in is_crowd (for backward compatibility). + coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_boxes=boxes, + detection_scores=scores, + detection_classes=classes, + detection_keypoints=keypoints, + detection_keypoint_visibilities=visibilities) + for i, annotation in enumerate(coco_annotations): + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) + self.assertEqual(annotation['score'], scores[i]) + self.assertEqual(annotation['num_keypoints'], 3) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][0::3], keypoints[i, :, 1]))) + self.assertTrue( + np.all(np.isclose(annotation['keypoints'][1::3], keypoints[i, :, 0]))) + self.assertTrue( + np.all(np.equal(annotation['keypoints'][2::3], visibilities[i]))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/io_utils.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/io_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..900584de1e5cd26b51ae8928581a4283bea2598e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/io_utils.py @@ -0,0 +1,34 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common IO utils used in offline metric computation. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv + + +def write_csv(fid, metrics): + """Writes metrics key-value pairs to CSV file. + + Args: + fid: File identifier of an opened file. + metrics: A dictionary with metrics to be written. + """ + metrics_writer = csv.writer(fid, delimiter=',') + for metric_name, metric_value in metrics.items(): + metrics_writer.writerow([metric_name, str(metric_value)]) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_evaluation.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..95fc12d2032fb7afba780468984fd91cdbc3c0c8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_evaluation.py @@ -0,0 +1,443 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Class for evaluating object detections with LVIS metrics.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import re + +from lvis import results as lvis_results + +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.metrics import lvis_tools +from object_detection.utils import object_detection_evaluation + + +def convert_masks_to_binary(masks): + """Converts masks to 0 or 1 and uint8 type.""" + return (masks > 0).astype(np.uint8) + + +class LVISMaskEvaluator(object_detection_evaluation.DetectionEvaluator): + """Class to evaluate LVIS mask metrics.""" + + def __init__(self, + categories): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + """ + super(LVISMaskEvaluator, self).__init__(categories) + self._image_ids_with_detections = set([]) + self._groundtruth_list = [] + self._detection_masks_list = [] + self._category_id_set = set([cat['id'] for cat in self._categories]) + self._annotation_id = 1 + self._image_id_to_mask_shape_map = {} + self._image_id_to_verified_neg_classes = {} + self._image_id_to_not_exhaustive_classes = {} + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._image_id_to_mask_shape_map.clear() + self._image_ids_with_detections.clear() + self._image_id_to_verified_neg_classes.clear() + self._image_id_to_not_exhaustive_classes.clear() + self._groundtruth_list = [] + self._detection_masks_list = [] + + def add_single_ground_truth_image_info(self, + image_id, + groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + If the image has already been added, a warning is logged, and groundtruth is + ignored. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + InputDataFields.groundtruth_boxes: float32 numpy array of shape + [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + InputDataFields.groundtruth_classes: integer numpy array of shape + [num_boxes] containing 1-indexed groundtruth classes for the boxes. + InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape + [num_masks, image_height, image_width] containing groundtruth masks. + The elements of the array must be in {0, 1}. + InputDataFields.groundtruth_verified_neg_classes: [num_classes] + float indicator vector with values in {0, 1}. + InputDataFields.groundtruth_not_exhaustive_classes: [num_classes] + float indicator vector with values in {0, 1}. + InputDataFields.groundtruth_area (optional): float numpy array of + shape [num_boxes] containing the area (in the original absolute + coordinates) of the annotated object. + Raises: + ValueError: if groundtruth_dict is missing a required field + """ + if image_id in self._image_id_to_mask_shape_map: + tf.logging.warning('Ignoring ground truth with image id %s since it was ' + 'previously added', image_id) + return + for key in [fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_classes, + fields.InputDataFields.groundtruth_instance_masks, + fields.InputDataFields.groundtruth_verified_neg_classes, + fields.InputDataFields.groundtruth_not_exhaustive_classes]: + if key not in groundtruth_dict.keys(): + raise ValueError('groundtruth_dict missing entry: {}'.format(key)) + + groundtruth_instance_masks = groundtruth_dict[ + fields.InputDataFields.groundtruth_instance_masks] + groundtruth_instance_masks = convert_masks_to_binary( + groundtruth_instance_masks) + verified_neg_classes_shape = groundtruth_dict[ + fields.InputDataFields.groundtruth_verified_neg_classes].shape + not_exhaustive_classes_shape = groundtruth_dict[ + fields.InputDataFields.groundtruth_not_exhaustive_classes].shape + if verified_neg_classes_shape != (len(self._category_id_set),): + raise ValueError('Invalid shape for verified_neg_classes_shape.') + if not_exhaustive_classes_shape != (len(self._category_id_set),): + raise ValueError('Invalid shape for not_exhaustive_classes_shape.') + self._image_id_to_verified_neg_classes[image_id] = np.flatnonzero( + groundtruth_dict[ + fields.InputDataFields.groundtruth_verified_neg_classes] + == 1).tolist() + self._image_id_to_not_exhaustive_classes[image_id] = np.flatnonzero( + groundtruth_dict[ + fields.InputDataFields.groundtruth_not_exhaustive_classes] + == 1).tolist() + + # Drop optional fields if empty tensor. + groundtruth_area = groundtruth_dict.get( + fields.InputDataFields.groundtruth_area) + if groundtruth_area is not None and not groundtruth_area.shape[0]: + groundtruth_area = None + + self._groundtruth_list.extend( + lvis_tools.ExportSingleImageGroundtruthToLVIS( + image_id=image_id, + next_annotation_id=self._annotation_id, + category_id_set=self._category_id_set, + groundtruth_boxes=groundtruth_dict[ + fields.InputDataFields.groundtruth_boxes], + groundtruth_classes=groundtruth_dict[ + fields.InputDataFields.groundtruth_classes], + groundtruth_masks=groundtruth_instance_masks, + groundtruth_area=groundtruth_area) + ) + + self._annotation_id += groundtruth_dict[fields.InputDataFields. + groundtruth_boxes].shape[0] + self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[ + fields.InputDataFields.groundtruth_instance_masks].shape + + def add_single_detected_image_info(self, + image_id, + detections_dict): + """Adds detections for a single image to be used for evaluation. + + If a detection has already been added for this image id, a warning is + logged, and the detection is skipped. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + DetectionResultFields.detection_scores: float32 numpy array of shape + [num_boxes] containing detection scores for the boxes. + DetectionResultFields.detection_classes: integer numpy array of shape + [num_boxes] containing 1-indexed detection classes for the boxes. + DetectionResultFields.detection_masks: optional uint8 numpy array of + shape [num_boxes, image_height, image_width] containing instance + masks corresponding to the boxes. The elements of the array must be + in {0, 1}. + Raises: + ValueError: If groundtruth for the image_id is not available. + """ + if image_id not in self._image_id_to_mask_shape_map: + raise ValueError('Missing groundtruth for image id: {}'.format(image_id)) + + if image_id in self._image_ids_with_detections: + tf.logging.warning('Ignoring detection with image id %s since it was ' + 'previously added', image_id) + return + + groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id] + detection_masks = detections_dict[fields.DetectionResultFields. + detection_masks] + if groundtruth_masks_shape[1:] != detection_masks.shape[1:]: + raise ValueError('Spatial shape of groundtruth masks and detection masks ' + 'are incompatible: {} vs {}'.format( + groundtruth_masks_shape, + detection_masks.shape)) + detection_masks = convert_masks_to_binary(detection_masks) + + self._detection_masks_list.extend( + lvis_tools.ExportSingleImageDetectionMasksToLVIS( + image_id=image_id, + category_id_set=self._category_id_set, + detection_masks=detection_masks, + detection_scores=detections_dict[ + fields.DetectionResultFields.detection_scores], + detection_classes=detections_dict[ + fields.DetectionResultFields.detection_classes])) + self._image_ids_with_detections.update([image_id]) + + def evaluate(self): + """Evaluates the detection boxes and returns a dictionary of coco metrics. + + Returns: + A dictionary holding + """ + tf.logging.info('Performing evaluation on %d images.', + len(self._image_id_to_mask_shape_map.keys())) + # pylint: disable=g-complex-comprehension + groundtruth_dict = { + 'annotations': self._groundtruth_list, + 'images': [ + { + 'id': image_id, + 'height': shape[1], + 'width': shape[2], + 'neg_category_ids': + self._image_id_to_verified_neg_classes[image_id], + 'not_exhaustive_category_ids': + self._image_id_to_not_exhaustive_classes[image_id] + } for image_id, shape in self._image_id_to_mask_shape_map.items()], + 'categories': self._categories + } + # pylint: enable=g-complex-comprehension + lvis_wrapped_groundtruth = lvis_tools.LVISWrapper(groundtruth_dict) + detections = lvis_results.LVISResults(lvis_wrapped_groundtruth, + self._detection_masks_list) + mask_evaluator = lvis_tools.LVISEvalWrapper( + lvis_wrapped_groundtruth, detections, iou_type='segm') + mask_metrics = mask_evaluator.ComputeMetrics() + mask_metrics = {'DetectionMasks_'+ key: value + for key, value in iter(mask_metrics.items())} + return mask_metrics + + def add_eval_dict(self, eval_dict): + """Observes an evaluation result dict for a single example. + + When executing eagerly, once all observations have been observed by this + method you can use `.evaluate()` to get the final metrics. + + When using `tf.estimator.Estimator` for evaluation this function is used by + `get_estimator_eval_metric_ops()` to construct the metric update op. + + Args: + eval_dict: A dictionary that holds tensors for evaluating an object + detection model, returned from + eval_util.result_dict_for_single_example(). + + Returns: + None when executing eagerly, or an update_op that can be used to update + the eval metrics in `tf.estimator.EstimatorSpec`. + """ + def update_op(image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, + groundtruth_instance_masks_batched, + groundtruth_verified_neg_classes_batched, + groundtruth_not_exhaustive_classes_batched, + num_gt_boxes_per_image, + detection_scores_batched, detection_classes_batched, + detection_masks_batched, num_det_boxes_per_image, + original_image_spatial_shape): + """Update op for metrics.""" + + for (image_id, groundtruth_boxes, groundtruth_classes, + groundtruth_instance_masks, groundtruth_verified_neg_classes, + groundtruth_not_exhaustive_classes, num_gt_box, + detection_scores, detection_classes, + detection_masks, num_det_box, original_image_shape) in zip( + image_id_batched, groundtruth_boxes_batched, + groundtruth_classes_batched, groundtruth_instance_masks_batched, + groundtruth_verified_neg_classes_batched, + groundtruth_not_exhaustive_classes_batched, + num_gt_boxes_per_image, + detection_scores_batched, detection_classes_batched, + detection_masks_batched, num_det_boxes_per_image, + original_image_spatial_shape): + self.add_single_ground_truth_image_info( + image_id, { + input_data_fields.groundtruth_boxes: + groundtruth_boxes[:num_gt_box], + input_data_fields.groundtruth_classes: + groundtruth_classes[:num_gt_box], + input_data_fields.groundtruth_instance_masks: + groundtruth_instance_masks[:num_gt_box][ + :original_image_shape[0], :original_image_shape[1]], + input_data_fields.groundtruth_verified_neg_classes: + groundtruth_verified_neg_classes, + input_data_fields.groundtruth_not_exhaustive_classes: + groundtruth_not_exhaustive_classes + }) + self.add_single_detected_image_info( + image_id, { + 'detection_scores': detection_scores[:num_det_box], + 'detection_classes': detection_classes[:num_det_box], + 'detection_masks': detection_masks[:num_det_box][ + :original_image_shape[0], :original_image_shape[1]] + }) + + # Unpack items from the evaluation dictionary. + input_data_fields = fields.InputDataFields + detection_fields = fields.DetectionResultFields + image_id = eval_dict[input_data_fields.key] + original_image_spatial_shape = eval_dict[ + input_data_fields.original_image_spatial_shape] + groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes] + groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes] + groundtruth_instance_masks = eval_dict[ + input_data_fields.groundtruth_instance_masks] + groundtruth_verified_neg_classes = eval_dict[ + input_data_fields.groundtruth_verified_neg_classes] + groundtruth_not_exhaustive_classes = eval_dict[ + input_data_fields.groundtruth_not_exhaustive_classes] + + num_gt_boxes_per_image = eval_dict.get( + input_data_fields.num_groundtruth_boxes, None) + detection_scores = eval_dict[detection_fields.detection_scores] + detection_classes = eval_dict[detection_fields.detection_classes] + detection_masks = eval_dict[detection_fields.detection_masks] + num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections, + None) + + if not image_id.shape.as_list(): + # Apply a batch dimension to all tensors. + image_id = tf.expand_dims(image_id, 0) + groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0) + groundtruth_classes = tf.expand_dims(groundtruth_classes, 0) + groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0) + groundtruth_verified_neg_classes = tf.expand_dims( + groundtruth_verified_neg_classes, 0) + groundtruth_not_exhaustive_classes = tf.expand_dims( + groundtruth_not_exhaustive_classes, 0) + detection_scores = tf.expand_dims(detection_scores, 0) + detection_classes = tf.expand_dims(detection_classes, 0) + detection_masks = tf.expand_dims(detection_masks, 0) + + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2] + else: + num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0) + + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.shape(detection_scores)[1:2] + else: + num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0) + else: + if num_gt_boxes_per_image is None: + num_gt_boxes_per_image = tf.tile( + tf.shape(groundtruth_boxes)[1:2], + multiples=tf.shape(groundtruth_boxes)[0:1]) + if num_det_boxes_per_image is None: + num_det_boxes_per_image = tf.tile( + tf.shape(detection_scores)[1:2], + multiples=tf.shape(detection_scores)[0:1]) + + return tf.py_func(update_op, [ + image_id, groundtruth_boxes, groundtruth_classes, + groundtruth_instance_masks, groundtruth_verified_neg_classes, + groundtruth_not_exhaustive_classes, + num_gt_boxes_per_image, detection_scores, detection_classes, + detection_masks, num_det_boxes_per_image, original_image_spatial_shape + ], []) + + def get_estimator_eval_metric_ops(self, eval_dict): + """Returns a dictionary of eval metric ops. + + Note that once value_op is called, the detections and groundtruth added via + update_op are cleared. + + Args: + eval_dict: A dictionary that holds tensors for evaluating object detection + performance. For single-image evaluation, this dictionary may be + produced from eval_util.result_dict_for_single_example(). If multi-image + evaluation, `eval_dict` should contain the fields + 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to + properly unpad the tensors from the batch. + + Returns: + a dictionary of metric names to tuple of value_op and update_op that can + be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all + update ops must be run together and similarly all value ops must be run + together to guarantee correct behaviour. + """ + update_op = self.add_eval_dict(eval_dict) + metric_names = ['DetectionMasks_Precision/mAP', + 'DetectionMasks_Precision/mAP@.50IOU', + 'DetectionMasks_Precision/mAP@.75IOU', + 'DetectionMasks_Precision/mAP (small)', + 'DetectionMasks_Precision/mAP (medium)', + 'DetectionMasks_Precision/mAP (large)', + 'DetectionMasks_Recall/AR@1', + 'DetectionMasks_Recall/AR@10', + 'DetectionMasks_Recall/AR@100', + 'DetectionMasks_Recall/AR@100 (small)', + 'DetectionMasks_Recall/AR@100 (medium)', + 'DetectionMasks_Recall/AR@100 (large)'] + if self._include_metrics_per_category: + for category_dict in self._categories: + metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' + + category_dict['name']) + + def first_value_func(): + self._metrics = self.evaluate() + self.clear() + return np.float32(self._metrics[metric_names[0]]) + + def value_func_factory(metric_name): + def value_func(): + return np.float32(self._metrics[metric_name]) + return value_func + + # Ensure that the metrics are only evaluated once. + first_value_op = tf.py_func(first_value_func, [], tf.float32) + eval_metric_ops = {metric_names[0]: (first_value_op, update_op)} + with tf.control_dependencies([first_value_op]): + for metric_name in metric_names[1:]: + eval_metric_ops[metric_name] = (tf.py_func( + value_func_factory(metric_name), [], np.float32), update_op) + return eval_metric_ops + + def dump_detections_to_json_file(self, json_output_path): + """Saves the detections into json_output_path in the format used by MS COCO. + + Args: + json_output_path: String containing the output file's path. It can be also + None. In that case nothing will be written to the output file. + """ + if json_output_path and json_output_path is not None: + pattern = re.compile(r'\d+\.\d{8,}') + def mround(match): + return '{:.2f}'.format(float(match.group())) + + with tf.io.gfile.GFile(json_output_path, 'w') as fid: + json_string = json.dumps(self._detection_masks_list) + fid.write(re.sub(pattern, mround, json_string)) + + tf.logging.info('Dumping detections to output json file: %s', + json_output_path) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_evaluation_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_evaluation_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0a095b8abb58c5be3b4c93107c52d8da0e11f5c4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_evaluation_test.py @@ -0,0 +1,182 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import numpy as np +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields as fields +from object_detection.metrics import lvis_evaluation +from object_detection.utils import tf_version + + +def _get_categories_list(): + return [{ + 'id': 1, + 'name': 'person', + 'frequency': 'f' + }, { + 'id': 2, + 'name': 'dog', + 'frequency': 'c' + }, { + 'id': 3, + 'name': 'cat', + 'frequency': 'r' + }] + + +class LvisMaskEvaluationTest(tf.test.TestCase): + + def testGetOneMAPWithMatchingGroundtruthAndDetections(self): + """Tests that mAP is calculated correctly on GT and Detections.""" + masks1 = np.expand_dims(np.pad( + np.ones([100, 100], dtype=np.uint8), + ((100, 56), (100, 56)), mode='constant'), axis=0) + masks2 = np.expand_dims(np.pad( + np.ones([50, 50], dtype=np.uint8), + ((50, 156), (50, 156)), mode='constant'), axis=0) + masks3 = np.expand_dims(np.pad( + np.ones([25, 25], dtype=np.uint8), + ((25, 206), (25, 206)), mode='constant'), axis=0) + + lvis_evaluator = lvis_evaluation.LVISMaskEvaluator( + _get_categories_list()) + lvis_evaluator.add_single_ground_truth_image_info( + image_id='image1', + groundtruth_dict={ + fields.InputDataFields.groundtruth_boxes: + np.array([[100., 100., 200., 200.]]), + fields.InputDataFields.groundtruth_classes: np.array([1]), + fields.InputDataFields.groundtruth_instance_masks: masks1, + fields.InputDataFields.groundtruth_verified_neg_classes: + np.array([0, 0, 0]), + fields.InputDataFields.groundtruth_not_exhaustive_classes: + np.array([0, 0, 0]) + }) + lvis_evaluator.add_single_detected_image_info( + image_id='image1', + detections_dict={ + fields.DetectionResultFields.detection_masks: masks1, + fields.DetectionResultFields.detection_scores: + np.array([.8]), + fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + lvis_evaluator.add_single_ground_truth_image_info( + image_id='image2', + groundtruth_dict={ + fields.InputDataFields.groundtruth_boxes: + np.array([[50., 50., 100., 100.]]), + fields.InputDataFields.groundtruth_classes: np.array([1]), + fields.InputDataFields.groundtruth_instance_masks: masks2, + fields.InputDataFields.groundtruth_verified_neg_classes: + np.array([0, 0, 0]), + fields.InputDataFields.groundtruth_not_exhaustive_classes: + np.array([0, 0, 0]) + }) + lvis_evaluator.add_single_detected_image_info( + image_id='image2', + detections_dict={ + fields.DetectionResultFields.detection_masks: masks2, + fields.DetectionResultFields.detection_scores: + np.array([.8]), + fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + lvis_evaluator.add_single_ground_truth_image_info( + image_id='image3', + groundtruth_dict={ + fields.InputDataFields.groundtruth_boxes: + np.array([[25., 25., 50., 50.]]), + fields.InputDataFields.groundtruth_classes: np.array([1]), + fields.InputDataFields.groundtruth_instance_masks: masks3, + fields.InputDataFields.groundtruth_verified_neg_classes: + np.array([0, 0, 0]), + fields.InputDataFields.groundtruth_not_exhaustive_classes: + np.array([0, 0, 0]) + }) + lvis_evaluator.add_single_detected_image_info( + image_id='image3', + detections_dict={ + fields.DetectionResultFields.detection_masks: masks3, + fields.DetectionResultFields.detection_scores: + np.array([.8]), + fields.DetectionResultFields.detection_classes: + np.array([1]) + }) + metrics = lvis_evaluator.evaluate() + self.assertAlmostEqual(metrics['DetectionMasks_AP'], 1.0) + + +@unittest.skipIf(tf_version.is_tf1(), 'Only Supported in TF2.X') +class LVISMaskEvaluationPyFuncTest(tf.test.TestCase): + + def testAddEvalDict(self): + lvis_evaluator = lvis_evaluation.LVISMaskEvaluator(_get_categories_list()) + image_id = tf.constant('image1', dtype=tf.string) + groundtruth_boxes = tf.constant( + np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]), + dtype=tf.float32) + groundtruth_classes = tf.constant(np.array([1, 2]), dtype=tf.float32) + groundtruth_masks = tf.constant(np.stack([ + np.pad(np.ones([100, 100], dtype=np.uint8), ((10, 10), (10, 10)), + mode='constant'), + np.pad(np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant') + ]), dtype=tf.uint8) + original_image_spatial_shapes = tf.constant([[120, 120], [120, 120]], + dtype=tf.int32) + groundtruth_verified_neg_classes = tf.constant(np.array([0, 0, 0]), + dtype=tf.float32) + groundtruth_not_exhaustive_classes = tf.constant(np.array([0, 0, 0]), + dtype=tf.float32) + detection_scores = tf.constant(np.array([.9, .8]), dtype=tf.float32) + detection_classes = tf.constant(np.array([2, 1]), dtype=tf.float32) + detection_masks = tf.constant(np.stack([ + np.pad(np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)), + mode='constant'), + np.pad(np.ones([100, 100], dtype=np.uint8), ((10, 10), (10, 10)), + mode='constant'), + ]), dtype=tf.uint8) + + input_data_fields = fields.InputDataFields + detection_fields = fields.DetectionResultFields + eval_dict = { + input_data_fields.key: image_id, + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes, + input_data_fields.groundtruth_instance_masks: groundtruth_masks, + input_data_fields.groundtruth_verified_neg_classes: + groundtruth_verified_neg_classes, + input_data_fields.groundtruth_not_exhaustive_classes: + groundtruth_not_exhaustive_classes, + input_data_fields.original_image_spatial_shape: + original_image_spatial_shapes, + detection_fields.detection_scores: detection_scores, + detection_fields.detection_classes: detection_classes, + detection_fields.detection_masks: detection_masks + } + lvis_evaluator.add_eval_dict(eval_dict) + self.assertLen(lvis_evaluator._groundtruth_list, 2) + self.assertLen(lvis_evaluator._detection_masks_list, 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_tools.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..92a102efd89b7be4e24a50f92b9d542e451d5952 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_tools.py @@ -0,0 +1,259 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Wrappers for third party lvis to be used within object_detection. + +Usage example: given a set of images with ids in the list image_ids +and corresponding lists of numpy arrays encoding groundtruth (boxes, +masks and classes) and detections (masks, scores and classes), where +elements of each list correspond to detections/annotations of a single image, +then evaluation can be invoked as follows: + + groundtruth = lvis_tools.LVISWrapper(groundtruth_dict) + detections = lvis_results.LVISResults(groundtruth, detections_list) + evaluator = lvis_tools.LVISEvalWrapper(groundtruth, detections, + iou_type='segm') + summary_metrics = evaluator.ComputeMetrics() + +TODO(jonathanhuang): Add support for exporting to JSON. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +from lvis import eval as lvis_eval +from lvis import lvis +import numpy as np +from pycocotools import mask +import six +from six.moves import range + + +def RleCompress(masks): + """Compresses mask using Run-length encoding provided by pycocotools. + + Args: + masks: uint8 numpy array of shape [mask_height, mask_width] with values in + {0, 1}. + + Returns: + A pycocotools Run-length encoding of the mask. + """ + rle = mask.encode(np.asfortranarray(masks)) + rle['counts'] = six.ensure_str(rle['counts']) + return rle + + +def _ConvertBoxToCOCOFormat(box): + """Converts a box in [ymin, xmin, ymax, xmax] format to COCO format. + + This is a utility function for converting from our internal + [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API + i.e., [xmin, ymin, width, height]. + + Args: + box: a [ymin, xmin, ymax, xmax] numpy array + + Returns: + a list of floats representing [xmin, ymin, width, height] + """ + return [float(box[1]), float(box[0]), float(box[3] - box[1]), + float(box[2] - box[0])] + + +class LVISWrapper(lvis.LVIS): + """Wrapper for the lvis.LVIS class.""" + + def __init__(self, dataset, detection_type='bbox'): + """LVISWrapper constructor. + + See https://www.lvisdataset.org/dataset for a description of the format. + By default, the coco.COCO class constructor reads from a JSON file. + This function duplicates the same behavior but loads from a dictionary, + allowing us to perform evaluation without writing to external storage. + + Args: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + + Raises: + ValueError: if detection_type is unsupported. + """ + self.logger = logging.getLogger(__name__) + self.logger.info('Loading annotations.') + self.dataset = dataset + self._create_index() + + +class LVISEvalWrapper(lvis_eval.LVISEval): + """LVISEval wrapper.""" + + def __init__(self, groundtruth=None, detections=None, iou_type='bbox'): + lvis_eval.LVISEval.__init__( + self, groundtruth, detections, iou_type=iou_type) + self._iou_type = iou_type + + def ComputeMetrics(self): + self.run() + summary_metrics = {} + summary_metrics = self.results + return summary_metrics + + +def ExportSingleImageGroundtruthToLVIS(image_id, + next_annotation_id, + category_id_set, + groundtruth_boxes, + groundtruth_classes, + groundtruth_masks=None, + groundtruth_area=None): + """Export groundtruth of a single image to LVIS format. + + This function converts groundtruth detection annotations represented as numpy + arrays to dictionaries that can be ingested by the LVIS evaluation API. Note + that the image_ids provided here must match the ones given to + ExportSingleImageDetectionMasksToLVIS. We assume that boxes, classes and masks + are in correspondence - that is, e.g., groundtruth_boxes[i, :], and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box. + + Args: + image_id: a unique image identifier either of type integer or string. + next_annotation_id: integer specifying the first id to use for the + groundtruth annotations. All annotations are assigned a continuous integer + id starting from this value. + category_id_set: A set of valid class ids. Groundtruth with classes not in + category_id_set are dropped. + groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4] + groundtruth_classes: numpy array (int) with shape [num_gt_boxes] + groundtruth_masks: optional uint8 numpy array of shape [num_detections, + image_height, image_width] containing detection_masks. + groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If + provided, then the area values (in the original absolute coordinates) will + be populated instead of calculated from bounding box coordinates. + + Returns: + a list of groundtruth annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + + if len(groundtruth_classes.shape) != 1: + raise ValueError('groundtruth_classes is ' + 'expected to be of rank 1.') + if len(groundtruth_boxes.shape) != 2: + raise ValueError('groundtruth_boxes is expected to be of ' + 'rank 2.') + if groundtruth_boxes.shape[1] != 4: + raise ValueError('groundtruth_boxes should have ' + 'shape[1] == 4.') + num_boxes = groundtruth_classes.shape[0] + if num_boxes != groundtruth_boxes.shape[0]: + raise ValueError('Corresponding entries in groundtruth_classes, ' + 'and groundtruth_boxes should have ' + 'compatible shapes (i.e., agree on the 0th dimension).' + 'Classes shape: %d. Boxes shape: %d. Image ID: %s' % ( + groundtruth_classes.shape[0], + groundtruth_boxes.shape[0], image_id)) + + groundtruth_list = [] + for i in range(num_boxes): + if groundtruth_classes[i] in category_id_set: + if groundtruth_area is not None and groundtruth_area[i] > 0: + area = float(groundtruth_area[i]) + else: + area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) * + (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])) + export_dict = { + 'id': + next_annotation_id + i, + 'image_id': + image_id, + 'category_id': + int(groundtruth_classes[i]), + 'bbox': + list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), + 'area': area, + } + if groundtruth_masks is not None: + export_dict['segmentation'] = RleCompress(groundtruth_masks[i]) + + groundtruth_list.append(export_dict) + return groundtruth_list + + +def ExportSingleImageDetectionMasksToLVIS(image_id, + category_id_set, + detection_masks, + detection_scores, + detection_classes): + """Export detection masks of a single image to LVIS format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the LVIS evaluation API. We assume that + detection_masks, detection_scores, and detection_classes are in correspondence + - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i] + are associated with the same annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_masks: uint8 numpy array of shape [num_detections, image_height, + image_width] containing detection_masks. + detection_scores: float numpy array of shape [num_detections] containing + scores for detection masks. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection masks. + + Returns: + a list of detection mask annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_masks, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError('All entries in detection_classes and detection_scores' + 'expected to be of rank 1.') + num_boxes = detection_classes.shape[0] + if not num_boxes == len(detection_masks) == detection_scores.shape[0]: + raise ValueError('Corresponding entries in detection_classes, ' + 'detection_scores and detection_masks should have ' + 'compatible lengths and shapes ' + 'Classes length: %d. Masks length: %d. ' + 'Scores length: %d' % ( + detection_classes.shape[0], len(detection_masks), + detection_scores.shape[0] + )) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append({ + 'image_id': image_id, + 'category_id': int(detection_classes[i]), + 'segmentation': RleCompress(detection_masks[i]), + 'score': float(detection_scores[i]) + }) + return detections_list diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_tools_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_tools_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6c6ef107ad113120d48bbafba9d7ae1971092463 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/lvis_tools_test.py @@ -0,0 +1,158 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_model.object_detection.metrics.lvis_tools.""" +from lvis import results as lvis_results +import numpy as np +from pycocotools import mask +import tensorflow.compat.v1 as tf +from object_detection.metrics import lvis_tools + + +class LVISToolsTest(tf.test.TestCase): + + def setUp(self): + super(LVISToolsTest, self).setUp() + mask1 = np.pad( + np.ones([100, 100], dtype=np.uint8), + ((100, 56), (100, 56)), mode='constant') + mask2 = np.pad( + np.ones([50, 50], dtype=np.uint8), + ((50, 156), (50, 156)), mode='constant') + mask1_rle = lvis_tools.RleCompress(mask1) + mask2_rle = lvis_tools.RleCompress(mask2) + groundtruth_annotations_list = [ + { + 'id': 1, + 'image_id': 'first', + 'category_id': 1, + 'bbox': [100., 100., 100., 100.], + 'area': 100.**2, + 'segmentation': mask1_rle + }, + { + 'id': 2, + 'image_id': 'second', + 'category_id': 1, + 'bbox': [50., 50., 50., 50.], + 'area': 50.**2, + 'segmentation': mask2_rle + }, + ] + image_list = [ + { + 'id': 'first', + 'neg_category_ids': [], + 'not_exhaustive_category_ids': [], + 'height': 256, + 'width': 256 + }, + { + 'id': 'second', + 'neg_category_ids': [], + 'not_exhaustive_category_ids': [], + 'height': 256, + 'width': 256 + } + ] + category_list = [{'id': 0, 'name': 'person', 'frequency': 'f'}, + {'id': 1, 'name': 'cat', 'frequency': 'c'}, + {'id': 2, 'name': 'dog', 'frequency': 'r'}] + self._groundtruth_dict = { + 'annotations': groundtruth_annotations_list, + 'images': image_list, + 'categories': category_list + } + + self._detections_list = [ + { + 'image_id': 'first', + 'category_id': 1, + 'segmentation': mask1_rle, + 'score': .8 + }, + { + 'image_id': 'second', + 'category_id': 1, + 'segmentation': mask2_rle, + 'score': .7 + }, + ] + + def testLVISWrappers(self): + groundtruth = lvis_tools.LVISWrapper(self._groundtruth_dict) + detections = lvis_results.LVISResults(groundtruth, self._detections_list) + evaluator = lvis_tools.LVISEvalWrapper(groundtruth, detections, + iou_type='segm') + summary_metrics = evaluator.ComputeMetrics() + self.assertAlmostEqual(1.0, summary_metrics['AP']) + + def testSingleImageDetectionMaskExport(self): + masks = np.array( + [[[1, 1,], [1, 1]], + [[0, 0], [0, 1]], + [[0, 0], [0, 0]]], dtype=np.uint8) + classes = np.array([1, 2, 3], dtype=np.int32) + scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) + lvis_annotations = lvis_tools.ExportSingleImageDetectionMasksToLVIS( + image_id='first_image', + category_id_set=set([1, 2, 3]), + detection_classes=classes, + detection_scores=scores, + detection_masks=masks) + expected_counts = ['04', '31', '4'] + for i, mask_annotation in enumerate(lvis_annotations): + self.assertEqual(mask_annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + mask_annotation['segmentation']), masks[i]))) + self.assertEqual(mask_annotation['image_id'], 'first_image') + self.assertEqual(mask_annotation['category_id'], classes[i]) + self.assertAlmostEqual(mask_annotation['score'], scores[i]) + + def testSingleImageGroundtruthExport(self): + masks = np.array( + [[[1, 1,], [1, 1]], + [[0, 0], [0, 1]], + [[0, 0], [0, 0]]], dtype=np.uint8) + boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, 1, 1]], dtype=np.float32) + lvis_boxes = np.array([[0, 0, 1, 1], + [0, 0, .5, .5], + [.5, .5, .5, .5]], dtype=np.float32) + classes = np.array([1, 2, 3], dtype=np.int32) + next_annotation_id = 1 + expected_counts = ['04', '31', '4'] + + lvis_annotations = lvis_tools.ExportSingleImageGroundtruthToLVIS( + image_id='first_image', + category_id_set=set([1, 2, 3]), + next_annotation_id=next_annotation_id, + groundtruth_boxes=boxes, + groundtruth_classes=classes, + groundtruth_masks=masks) + for i, annotation in enumerate(lvis_annotations): + self.assertEqual(annotation['segmentation']['counts'], + expected_counts[i]) + self.assertTrue(np.all(np.equal(mask.decode( + annotation['segmentation']), masks[i]))) + self.assertTrue(np.all(np.isclose(annotation['bbox'], lvis_boxes[i]))) + self.assertEqual(annotation['image_id'], 'first_image') + self.assertEqual(annotation['category_id'], classes[i]) + self.assertEqual(annotation['id'], i + next_annotation_id) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/offline_eval_map_corloc.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/offline_eval_map_corloc.py new file mode 100644 index 0000000000000000000000000000000000000000..a12b1d98493e022d302c76b0cadb514e7fc0eb60 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/offline_eval_map_corloc.py @@ -0,0 +1,171 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Evaluation executable for detection data. + +This executable evaluates precomputed detections produced by a detection +model and writes the evaluation results into csv file metrics.csv, stored +in the directory, specified by --eval_dir. + +The evaluation metrics set is supplied in object_detection.protos.EvalConfig +in metrics_set field. +Currently two set of metrics are supported: +- pascal_voc_metrics: standard PASCAL VOC 2007 metric +- open_images_detection_metrics: Open Image V2 metric +All other field of object_detection.protos.EvalConfig are ignored. + +Example usage: + ./compute_metrics \ + --eval_dir=path/to/eval_dir \ + --eval_config_path=path/to/evaluation/configuration/file \ + --input_config_path=path/to/input/configuration/file +""" +import csv +import os +import re +import tensorflow.compat.v1 as tf + +from object_detection import eval_util +from object_detection.core import standard_fields +from object_detection.metrics import tf_example_parser +from object_detection.utils import config_util +from object_detection.utils import label_map_util + +flags = tf.app.flags +tf.logging.set_verbosity(tf.logging.INFO) + +flags.DEFINE_string('eval_dir', None, 'Directory to write eval summaries to.') +flags.DEFINE_string('eval_config_path', None, + 'Path to an eval_pb2.EvalConfig config file.') +flags.DEFINE_string('input_config_path', None, + 'Path to an eval_pb2.InputConfig config file.') + +FLAGS = flags.FLAGS + + +def _generate_sharded_filenames(filename): + m = re.search(r'@(\d{1,})', filename) + if m: + num_shards = int(m.group(1)) + return [ + re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards), filename) + for i in range(num_shards) + ] + else: + return [filename] + + +def _generate_filenames(filenames): + result = [] + for filename in filenames: + result += _generate_sharded_filenames(filename) + return result + + +def read_data_and_evaluate(input_config, eval_config): + """Reads pre-computed object detections and groundtruth from tf_record. + + Args: + input_config: input config proto of type + object_detection.protos.InputReader. + eval_config: evaluation config proto of type + object_detection.protos.EvalConfig. + + Returns: + Evaluated detections metrics. + + Raises: + ValueError: if input_reader type is not supported or metric type is unknown. + """ + if input_config.WhichOneof('input_reader') == 'tf_record_input_reader': + input_paths = input_config.tf_record_input_reader.input_path + + categories = label_map_util.create_categories_from_labelmap( + input_config.label_map_path) + + object_detection_evaluators = eval_util.get_evaluators( + eval_config, categories) + # Support a single evaluator + object_detection_evaluator = object_detection_evaluators[0] + + skipped_images = 0 + processed_images = 0 + for input_path in _generate_filenames(input_paths): + tf.logging.info('Processing file: {0}'.format(input_path)) + + record_iterator = tf.python_io.tf_record_iterator(path=input_path) + data_parser = tf_example_parser.TfExampleDetectionAndGTParser() + + for string_record in record_iterator: + tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000, + processed_images) + processed_images += 1 + + example = tf.train.Example() + example.ParseFromString(string_record) + decoded_dict = data_parser.parse(example) + + if decoded_dict: + object_detection_evaluator.add_single_ground_truth_image_info( + decoded_dict[standard_fields.DetectionResultFields.key], + decoded_dict) + object_detection_evaluator.add_single_detected_image_info( + decoded_dict[standard_fields.DetectionResultFields.key], + decoded_dict) + else: + skipped_images += 1 + tf.logging.info('Skipped images: {0}'.format(skipped_images)) + + return object_detection_evaluator.evaluate() + + raise ValueError('Unsupported input_reader_config.') + + +def write_metrics(metrics, output_dir): + """Write metrics to the output directory. + + Args: + metrics: A dictionary containing metric names and values. + output_dir: Directory to write metrics to. + """ + tf.logging.info('Writing metrics.') + + with open(os.path.join(output_dir, 'metrics.csv'), 'w') as csvfile: + metrics_writer = csv.writer(csvfile, delimiter=',') + for metric_name, metric_value in metrics.items(): + metrics_writer.writerow([metric_name, str(metric_value)]) + + +def main(argv): + del argv + required_flags = ['input_config_path', 'eval_config_path', 'eval_dir'] + for flag_name in required_flags: + if not getattr(FLAGS, flag_name): + raise ValueError('Flag --{} is required'.format(flag_name)) + + configs = config_util.get_configs_from_multiple_files( + eval_input_config_path=FLAGS.input_config_path, + eval_config_path=FLAGS.eval_config_path) + + eval_config = configs['eval_config'] + input_config = configs['eval_input_config'] + + metrics = read_data_and_evaluate(input_config, eval_config) + + # Save metrics + write_metrics(metrics, FLAGS.eval_dir) + + +if __name__ == '__main__': + tf.app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/offline_eval_map_corloc_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/offline_eval_map_corloc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9641dfb2d189d7aaa2c39246d143e97f68c8dbae --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/offline_eval_map_corloc_test.py @@ -0,0 +1,58 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for utilities in offline_eval_map_corloc binary.""" + +import tensorflow.compat.v1 as tf + +from object_detection.metrics import offline_eval_map_corloc as offline_eval + + +class OfflineEvalMapCorlocTest(tf.test.TestCase): + + def test_generateShardedFilenames(self): + test_filename = '/path/to/file' + result = offline_eval._generate_sharded_filenames(test_filename) + self.assertEqual(result, [test_filename]) + + test_filename = '/path/to/file-00000-of-00050' + result = offline_eval._generate_sharded_filenames(test_filename) + self.assertEqual(result, [test_filename]) + + result = offline_eval._generate_sharded_filenames('/path/to/@3.record') + self.assertEqual(result, [ + '/path/to/-00000-of-00003.record', '/path/to/-00001-of-00003.record', + '/path/to/-00002-of-00003.record' + ]) + + result = offline_eval._generate_sharded_filenames('/path/to/abc@3') + self.assertEqual(result, [ + '/path/to/abc-00000-of-00003', '/path/to/abc-00001-of-00003', + '/path/to/abc-00002-of-00003' + ]) + + result = offline_eval._generate_sharded_filenames('/path/to/@1') + self.assertEqual(result, ['/path/to/-00000-of-00001']) + + def test_generateFilenames(self): + test_filenames = ['/path/to/file', '/path/to/@3.record'] + result = offline_eval._generate_filenames(test_filenames) + self.assertEqual(result, [ + '/path/to/file', '/path/to/-00000-of-00003.record', + '/path/to/-00001-of-00003.record', '/path/to/-00002-of-00003.record' + ]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..25f553a917fb5120ad7fed8f0a9cc43c78092d5e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation.py @@ -0,0 +1,149 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Runs evaluation using OpenImages groundtruth and predictions. + +Uses Open Images Challenge 2018, 2019 metrics + +Example usage: +python models/research/object_detection/metrics/oid_od_challenge_evaluation.py \ + --input_annotations_boxes=/path/to/input/annotations-human-bbox.csv \ + --input_annotations_labels=/path/to/input/annotations-label.csv \ + --input_class_labelmap=/path/to/input/class_labelmap.pbtxt \ + --input_predictions=/path/to/input/predictions.csv \ + --output_metrics=/path/to/output/metric.csv \ + --input_annotations_segm=[/path/to/input/annotations-human-mask.csv] \ + +If optional flag has_masks is True, Mask column is also expected in CSV. + +CSVs with bounding box annotations, instance segmentations and image label +can be downloaded from the Open Images Challenge website: +https://storage.googleapis.com/openimages/web/challenge.html +The format of the input csv and the metrics itself are described on the +challenge website as well. + + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +from absl import app +from absl import flags +import pandas as pd +from google.protobuf import text_format + +from object_detection.metrics import io_utils +from object_detection.metrics import oid_challenge_evaluation_utils as utils +from object_detection.protos import string_int_label_map_pb2 +from object_detection.utils import object_detection_evaluation + +flags.DEFINE_string('input_annotations_boxes', None, + 'File with groundtruth boxes annotations.') +flags.DEFINE_string('input_annotations_labels', None, + 'File with groundtruth labels annotations.') +flags.DEFINE_string( + 'input_predictions', None, + """File with detection predictions; NOTE: no postprocessing is applied in the evaluation script.""" +) +flags.DEFINE_string('input_class_labelmap', None, + 'Open Images Challenge labelmap.') +flags.DEFINE_string('output_metrics', None, 'Output file with csv metrics.') +flags.DEFINE_string( + 'input_annotations_segm', None, + 'File with groundtruth instance segmentation annotations [OPTIONAL].') + +FLAGS = flags.FLAGS + + +def _load_labelmap(labelmap_path): + """Loads labelmap from the labelmap path. + + Args: + labelmap_path: Path to the labelmap. + + Returns: + A dictionary mapping class name to class numerical id + A list with dictionaries, one dictionary per category. + """ + + label_map = string_int_label_map_pb2.StringIntLabelMap() + with open(labelmap_path, 'r') as fid: + label_map_string = fid.read() + text_format.Merge(label_map_string, label_map) + labelmap_dict = {} + categories = [] + for item in label_map.item: + labelmap_dict[item.name] = item.id + categories.append({'id': item.id, 'name': item.name}) + return labelmap_dict, categories + + +def main(unused_argv): + flags.mark_flag_as_required('input_annotations_boxes') + flags.mark_flag_as_required('input_annotations_labels') + flags.mark_flag_as_required('input_predictions') + flags.mark_flag_as_required('input_class_labelmap') + flags.mark_flag_as_required('output_metrics') + + all_location_annotations = pd.read_csv(FLAGS.input_annotations_boxes) + all_label_annotations = pd.read_csv(FLAGS.input_annotations_labels) + all_label_annotations.rename( + columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) + + is_instance_segmentation_eval = False + if FLAGS.input_annotations_segm: + is_instance_segmentation_eval = True + all_segm_annotations = pd.read_csv(FLAGS.input_annotations_segm) + # Note: this part is unstable as it requires the float point numbers in both + # csvs are exactly the same; + # Will be replaced by more stable solution: merge on LabelName and ImageID + # and filter down by IoU. + all_location_annotations = utils.merge_boxes_and_masks( + all_location_annotations, all_segm_annotations) + all_annotations = pd.concat([all_location_annotations, all_label_annotations]) + + class_label_map, categories = _load_labelmap(FLAGS.input_class_labelmap) + challenge_evaluator = ( + object_detection_evaluation.OpenImagesChallengeEvaluator( + categories, evaluate_masks=is_instance_segmentation_eval)) + + all_predictions = pd.read_csv(FLAGS.input_predictions) + images_processed = 0 + for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): + logging.info('Processing image %d', images_processed) + image_id, image_groundtruth = groundtruth + groundtruth_dictionary = utils.build_groundtruth_dictionary( + image_groundtruth, class_label_map) + challenge_evaluator.add_single_ground_truth_image_info( + image_id, groundtruth_dictionary) + + prediction_dictionary = utils.build_predictions_dictionary( + all_predictions.loc[all_predictions['ImageID'] == image_id], + class_label_map) + challenge_evaluator.add_single_detected_image_info(image_id, + prediction_dictionary) + images_processed += 1 + + metrics = challenge_evaluator.evaluate() + + with open(FLAGS.output_metrics, 'w') as fid: + io_utils.write_csv(fid, metrics) + + +if __name__ == '__main__': + app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation_utils.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..844cce3e8f362c2c15403269584149878f60bc51 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation_utils.py @@ -0,0 +1,197 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Converts data from CSV to the OpenImagesDetectionChallengeEvaluator format.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import zlib + +import numpy as np +import pandas as pd +from pycocotools import mask as coco_mask + +from object_detection.core import standard_fields + + +def _to_normalized_box(mask_np): + """Decodes binary segmentation masks into np.arrays and boxes. + + Args: + mask_np: np.ndarray of size NxWxH. + + Returns: + a np.ndarray of the size Nx4, each row containing normalized coordinates + [YMin, XMin, YMax, XMax] of a box computed of axis parallel enclosing box of + a mask. + """ + coord1, coord2 = np.nonzero(mask_np) + if coord1.size > 0: + ymin = float(min(coord1)) / mask_np.shape[0] + ymax = float(max(coord1) + 1) / mask_np.shape[0] + xmin = float(min(coord2)) / mask_np.shape[1] + xmax = float((max(coord2) + 1)) / mask_np.shape[1] + + return np.array([ymin, xmin, ymax, xmax]) + else: + return np.array([0.0, 0.0, 0.0, 0.0]) + + +def _decode_raw_data_into_masks_and_boxes(segments, image_widths, + image_heights): + """Decods binary segmentation masks into np.arrays and boxes. + + Args: + segments: pandas Series object containing either + None entries, or strings with + base64, zlib compressed, COCO RLE-encoded binary masks. + All masks are expected to be the same size. + image_widths: pandas Series of mask widths. + image_heights: pandas Series of mask heights. + + Returns: + a np.ndarray of the size NxWxH, where W and H is determined from the encoded + masks; for the None values, zero arrays of size WxH are created. If input + contains only None values, W=1, H=1. + """ + segment_masks = [] + segment_boxes = [] + ind = segments.first_valid_index() + if ind is not None: + size = [int(image_heights[ind]), int(image_widths[ind])] + else: + # It does not matter which size we pick since no masks will ever be + # evaluated. + return np.zeros((segments.shape[0], 1, 1), dtype=np.uint8), np.zeros( + (segments.shape[0], 4), dtype=np.float32) + + for segment, im_width, im_height in zip(segments, image_widths, + image_heights): + if pd.isnull(segment): + segment_masks.append(np.zeros([1, size[0], size[1]], dtype=np.uint8)) + segment_boxes.append(np.expand_dims(np.array([0.0, 0.0, 0.0, 0.0]), 0)) + else: + compressed_mask = base64.b64decode(segment) + rle_encoded_mask = zlib.decompress(compressed_mask) + decoding_dict = { + 'size': [im_height, im_width], + 'counts': rle_encoded_mask + } + mask_tensor = coco_mask.decode(decoding_dict) + + segment_masks.append(np.expand_dims(mask_tensor, 0)) + segment_boxes.append(np.expand_dims(_to_normalized_box(mask_tensor), 0)) + + return np.concatenate( + segment_masks, axis=0), np.concatenate( + segment_boxes, axis=0) + + +def merge_boxes_and_masks(box_data, mask_data): + return pd.merge( + box_data, + mask_data, + how='outer', + on=['LabelName', 'ImageID', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf']) + + +def build_groundtruth_dictionary(data, class_label_map): + """Builds a groundtruth dictionary from groundtruth data in CSV file. + + Args: + data: Pandas DataFrame with the groundtruth data for a single image. + class_label_map: Class labelmap from string label name to an integer. + + Returns: + A dictionary with keys suitable for passing to + OpenImagesDetectionChallengeEvaluator.add_single_ground_truth_image_info: + standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array + of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of + the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.InputDataFields.groundtruth_classes: integer numpy array + of shape [num_boxes] containing 1-indexed groundtruth classes for the + boxes. + standard_fields.InputDataFields.verified_labels: integer 1D numpy array + containing all classes for which labels are verified. + standard_fields.InputDataFields.groundtruth_group_of: Optional length + M numpy boolean array denoting whether a groundtruth box contains a + group of instances. + """ + data_location = data[data.XMin.notnull()] + data_labels = data[data.ConfidenceImageLabel.notnull()] + + dictionary = { + standard_fields.InputDataFields.groundtruth_boxes: + data_location[['YMin', 'XMin', 'YMax', 'XMax']].to_numpy(), + standard_fields.InputDataFields.groundtruth_classes: + data_location['LabelName'].map(lambda x: class_label_map[x] + ).to_numpy(), + standard_fields.InputDataFields.groundtruth_group_of: + data_location['IsGroupOf'].to_numpy().astype(int), + standard_fields.InputDataFields.groundtruth_image_classes: + data_labels['LabelName'].map(lambda x: class_label_map[x] + ).to_numpy(), + } + + if 'Mask' in data_location: + segments, _ = _decode_raw_data_into_masks_and_boxes( + data_location['Mask'], data_location['ImageWidth'], + data_location['ImageHeight']) + dictionary[ + standard_fields.InputDataFields.groundtruth_instance_masks] = segments + + return dictionary + + +def build_predictions_dictionary(data, class_label_map): + """Builds a predictions dictionary from predictions data in CSV file. + + Args: + data: Pandas DataFrame with the predictions data for a single image. + class_label_map: Class labelmap from string label name to an integer. + + Returns: + Dictionary with keys suitable for passing to + OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info: + standard_fields.DetectionResultFields.detection_boxes: float32 numpy + array of shape [num_boxes, 4] containing `num_boxes` detection boxes + of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.DetectionResultFields.detection_scores: float32 numpy + array of shape [num_boxes] containing detection scores for the boxes. + standard_fields.DetectionResultFields.detection_classes: integer numpy + array of shape [num_boxes] containing 1-indexed detection classes for + the boxes. + + """ + dictionary = { + standard_fields.DetectionResultFields.detection_classes: + data['LabelName'].map(lambda x: class_label_map[x]).to_numpy(), + standard_fields.DetectionResultFields.detection_scores: + data['Score'].to_numpy() + } + + if 'Mask' in data: + segments, boxes = _decode_raw_data_into_masks_and_boxes( + data['Mask'], data['ImageWidth'], data['ImageHeight']) + dictionary[standard_fields.DetectionResultFields.detection_masks] = segments + dictionary[standard_fields.DetectionResultFields.detection_boxes] = boxes + else: + dictionary[standard_fields.DetectionResultFields.detection_boxes] = data[[ + 'YMin', 'XMin', 'YMax', 'XMax' + ]].to_numpy() + + return dictionary diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation_utils_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..94a1da0327e6fc56981aea7fb0100d16ac681f74 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_challenge_evaluation_utils_test.py @@ -0,0 +1,308 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for oid_od_challenge_evaluation_util.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import zlib + +import numpy as np +import pandas as pd +from pycocotools import mask as coco_mask +import six +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields +from object_detection.metrics import oid_challenge_evaluation_utils as utils + + +def encode_mask(mask_to_encode): + """Encodes a binary mask into the Kaggle challenge text format. + + The encoding is done in three stages: + - COCO RLE-encoding, + - zlib compression, + - base64 encoding (to use as entry in csv file). + + Args: + mask_to_encode: binary np.ndarray of dtype bool and 2d shape. + + Returns: + A (base64) text string of the encoded mask. + """ + mask_to_encode = np.squeeze(mask_to_encode) + mask_to_encode = mask_to_encode.reshape(mask_to_encode.shape[0], + mask_to_encode.shape[1], 1) + mask_to_encode = mask_to_encode.astype(np.uint8) + mask_to_encode = np.asfortranarray(mask_to_encode) + encoded_mask = coco_mask.encode(mask_to_encode)[0]['counts'] + compressed_mask = zlib.compress(six.ensure_binary(encoded_mask), + zlib.Z_BEST_COMPRESSION) + base64_mask = base64.b64encode(compressed_mask) + return base64_mask + + +class OidUtilTest(tf.test.TestCase): + + def testMaskToNormalizedBox(self): + mask_np = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]]) + box = utils._to_normalized_box(mask_np) + self.assertAllEqual(np.array([0.25, 0.25, 0.75, 0.5]), box) + mask_np = np.array([[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]]) + box = utils._to_normalized_box(mask_np) + self.assertAllEqual(np.array([0.25, 0.25, 1.0, 1.0]), box) + mask_np = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + box = utils._to_normalized_box(mask_np) + self.assertAllEqual(np.array([0.0, 0.0, 0.0, 0.0]), box) + + def testDecodeToTensors(self): + mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0]], dtype=np.uint8) + mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.uint8) + + encoding1 = encode_mask(mask1) + encoding2 = encode_mask(mask2) + + vals = pd.Series([encoding1, encoding2]) + image_widths = pd.Series([mask1.shape[1], mask2.shape[1]]) + image_heights = pd.Series([mask1.shape[0], mask2.shape[0]]) + + segm, bbox = utils._decode_raw_data_into_masks_and_boxes( + vals, image_widths, image_heights) + expected_segm = np.concatenate( + [np.expand_dims(mask1, 0), + np.expand_dims(mask2, 0)], axis=0) + expected_bbox = np.array([[0.0, 0.5, 2.0 / 3.0, 1.0], [0, 0, 0, 0]]) + self.assertAllEqual(expected_segm, segm) + self.assertAllEqual(expected_bbox, bbox) + + def testDecodeToTensorsNoMasks(self): + vals = pd.Series([None, None]) + image_widths = pd.Series([None, None]) + image_heights = pd.Series([None, None]) + segm, bbox = utils._decode_raw_data_into_masks_and_boxes( + vals, image_widths, image_heights) + self.assertAllEqual(np.zeros((2, 1, 1), dtype=np.uint8), segm) + self.assertAllEqual(np.zeros((2, 4), dtype=np.float32), bbox) + + +class OidChallengeEvaluationUtilTest(tf.test.TestCase): + + def testBuildGroundtruthDictionaryBoxes(self): + np_data = pd.DataFrame( + [['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 1, None], + ['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0, None], + ['fe58ec1b06db2bb7', '/m/04bcr3', None, None, None, None, None, 1], + ['fe58ec1b06db2bb7', '/m/083vt', None, None, None, None, None, 0], + ['fe58ec1b06db2bb7', '/m/02gy9n', None, None, None, None, None, 1]], + columns=[ + 'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf', + 'ConfidenceImageLabel' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + groundtruth_dictionary = utils.build_groundtruth_dictionary( + np_data, class_label_map) + + self.assertIn(standard_fields.InputDataFields.groundtruth_boxes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_classes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_group_of, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_image_classes, + groundtruth_dictionary) + + self.assertAllEqual( + np.array([1, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_classes]) + self.assertAllEqual( + np.array([1, 0]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_group_of]) + + expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]]) + + self.assertNDArrayNear( + expected_boxes_data, groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_boxes], 1e-5) + self.assertAllEqual( + np.array([1, 2, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_image_classes]) + + def testBuildPredictionDictionaryBoxes(self): + np_data = pd.DataFrame( + [['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 0.1], + ['fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0.2], + ['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.1, 0.2, 0.3, 0.3]], + columns=[ + 'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'Score' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + prediction_dictionary = utils.build_predictions_dictionary( + np_data, class_label_map) + + self.assertIn(standard_fields.DetectionResultFields.detection_boxes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_classes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_scores, + prediction_dictionary) + + self.assertAllEqual( + np.array([1, 3, 1]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_classes]) + expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2], + [0.2, 0.0, 0.3, 0.1]]) + self.assertNDArrayNear( + expected_boxes_data, prediction_dictionary[ + standard_fields.DetectionResultFields.detection_boxes], 1e-5) + self.assertNDArrayNear( + np.array([0.1, 0.2, 0.3]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_scores], 1e-5) + + def testBuildGroundtruthDictionaryMasks(self): + mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + + encoding1 = encode_mask(mask1) + encoding2 = encode_mask(mask2) + + np_data = pd.DataFrame( + [[ + 'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0], '/m/04bcr3', + 0.0, 0.3, 0.5, 0.6, 0, None, encoding1 + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 1, + None, None + ], + [ + 'fe58ec1b06db2bb7', mask2.shape[1], mask2.shape[0], '/m/02gy9n', + 0.5, 0.6, 0.8, 0.9, 0, None, encoding2 + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/04bcr3', None, None, None, + None, None, 1, None + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/083vt', None, None, None, None, + None, 0, None + ], + [ + 'fe58ec1b06db2bb7', None, None, '/m/02gy9n', None, None, None, + None, None, 1, None + ]], + columns=[ + 'ImageID', 'ImageWidth', 'ImageHeight', 'LabelName', 'XMin', 'XMax', + 'YMin', 'YMax', 'IsGroupOf', 'ConfidenceImageLabel', 'Mask' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + groundtruth_dictionary = utils.build_groundtruth_dictionary( + np_data, class_label_map) + self.assertIn(standard_fields.InputDataFields.groundtruth_boxes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_classes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_group_of, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_image_classes, + groundtruth_dictionary) + self.assertIn(standard_fields.InputDataFields.groundtruth_instance_masks, + groundtruth_dictionary) + self.assertAllEqual( + np.array([1, 3, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_classes]) + self.assertAllEqual( + np.array([0, 1, 0]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_group_of]) + + expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2], + [0.8, 0.5, 0.9, 0.6]]) + + self.assertNDArrayNear( + expected_boxes_data, groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_boxes], 1e-5) + self.assertAllEqual( + np.array([1, 2, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_image_classes]) + + expected_segm = np.concatenate([ + np.expand_dims(mask1, 0), + np.zeros((1, 4, 4), dtype=np.uint8), + np.expand_dims(mask2, 0) + ], + axis=0) + self.assertAllEqual( + expected_segm, groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_instance_masks]) + + def testBuildPredictionDictionaryMasks(self): + mask1 = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + mask2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + + encoding1 = encode_mask(mask1) + encoding2 = encode_mask(mask2) + + np_data = pd.DataFrame([[ + 'fe58ec1b06db2bb7', mask1.shape[1], mask1.shape[0], '/m/04bcr3', + encoding1, 0.8 + ], + [ + 'fe58ec1b06db2bb7', mask2.shape[1], + mask2.shape[0], '/m/02gy9n', encoding2, 0.6 + ]], + columns=[ + 'ImageID', 'ImageWidth', 'ImageHeight', + 'LabelName', 'Mask', 'Score' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/02gy9n': 3} + prediction_dictionary = utils.build_predictions_dictionary( + np_data, class_label_map) + + self.assertIn(standard_fields.DetectionResultFields.detection_boxes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_classes, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_scores, + prediction_dictionary) + self.assertIn(standard_fields.DetectionResultFields.detection_masks, + prediction_dictionary) + + self.assertAllEqual( + np.array([1, 3]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_classes]) + + expected_boxes_data = np.array([[0.0, 0.5, 0.5, 1.0], [0, 0, 0, 0]]) + self.assertNDArrayNear( + expected_boxes_data, prediction_dictionary[ + standard_fields.DetectionResultFields.detection_boxes], 1e-5) + self.assertNDArrayNear( + np.array([0.8, 0.6]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_scores], 1e-5) + expected_segm = np.concatenate( + [np.expand_dims(mask1, 0), + np.expand_dims(mask2, 0)], axis=0) + self.assertAllEqual( + expected_segm, prediction_dictionary[ + standard_fields.DetectionResultFields.detection_masks]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..7a56c6bc0807ff00fcfaa261b4842995057b5015 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation.py @@ -0,0 +1,154 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Runs evaluation using OpenImages groundtruth and predictions. + +Example usage: +python \ +models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py \ + --input_annotations_vrd=/path/to/input/annotations-human-bbox.csv \ + --input_annotations_labels=/path/to/input/annotations-label.csv \ + --input_class_labelmap=/path/to/input/class_labelmap.pbtxt \ + --input_relationship_labelmap=/path/to/input/relationship_labelmap.pbtxt \ + --input_predictions=/path/to/input/predictions.csv \ + --output_metrics=/path/to/output/metric.csv \ + +CSVs with bounding box annotations and image label (including the image URLs) +can be downloaded from the Open Images Challenge website: +https://storage.googleapis.com/openimages/web/challenge.html +The format of the input csv and the metrics itself are described on the +challenge website. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import pandas as pd +from google.protobuf import text_format + +from object_detection.metrics import io_utils +from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils +from object_detection.protos import string_int_label_map_pb2 +from object_detection.utils import vrd_evaluation + + +def _load_labelmap(labelmap_path): + """Loads labelmap from the labelmap path. + + Args: + labelmap_path: Path to the labelmap. + + Returns: + A dictionary mapping class name to class numerical id. + """ + + label_map = string_int_label_map_pb2.StringIntLabelMap() + with open(labelmap_path, 'r') as fid: + label_map_string = fid.read() + text_format.Merge(label_map_string, label_map) + labelmap_dict = {} + for item in label_map.item: + labelmap_dict[item.name] = item.id + return labelmap_dict + + +def _swap_labelmap_dict(labelmap_dict): + """Swaps keys and labels in labelmap. + + Args: + labelmap_dict: Input dictionary. + + Returns: + A dictionary mapping class name to class numerical id. + """ + return dict((v, k) for k, v in labelmap_dict.iteritems()) + + +def main(parsed_args): + all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) + all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) + all_annotations = pd.concat([all_box_annotations, all_label_annotations]) + + class_label_map = _load_labelmap(parsed_args.input_class_labelmap) + relationship_label_map = _load_labelmap( + parsed_args.input_relationship_labelmap) + + relation_evaluator = vrd_evaluation.VRDRelationDetectionEvaluator() + phrase_evaluator = vrd_evaluation.VRDPhraseDetectionEvaluator() + + for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): + image_id, image_groundtruth = groundtruth + groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary( + image_groundtruth, class_label_map, relationship_label_map) + + relation_evaluator.add_single_ground_truth_image_info( + image_id, groundtruth_dictionary) + phrase_evaluator.add_single_ground_truth_image_info(image_id, + groundtruth_dictionary) + + all_predictions = pd.read_csv(parsed_args.input_predictions) + for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): + image_id, image_predictions = prediction_data + prediction_dictionary = utils.build_predictions_vrd_dictionary( + image_predictions, class_label_map, relationship_label_map) + + relation_evaluator.add_single_detected_image_info(image_id, + prediction_dictionary) + phrase_evaluator.add_single_detected_image_info(image_id, + prediction_dictionary) + + relation_metrics = relation_evaluator.evaluate( + relationships=_swap_labelmap_dict(relationship_label_map)) + phrase_metrics = phrase_evaluator.evaluate( + relationships=_swap_labelmap_dict(relationship_label_map)) + + with open(parsed_args.output_metrics, 'w') as fid: + io_utils.write_csv(fid, relation_metrics) + io_utils.write_csv(fid, phrase_metrics) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser( + description= + 'Evaluate Open Images Visual Relationship Detection predictions.') + parser.add_argument( + '--input_annotations_vrd', + required=True, + help='File with groundtruth vrd annotations.') + parser.add_argument( + '--input_annotations_labels', + required=True, + help='File with groundtruth labels annotations') + parser.add_argument( + '--input_predictions', + required=True, + help="""File with detection predictions; NOTE: no postprocessing is + applied in the evaluation script.""") + parser.add_argument( + '--input_class_labelmap', + required=True, + help="""OpenImages Challenge labelmap; note: it is expected to include + attributes.""") + parser.add_argument( + '--input_relationship_labelmap', + required=True, + help="""OpenImages Challenge relationship labelmap.""") + parser.add_argument( + '--output_metrics', required=True, help='Output file with csv metrics') + + args = parser.parse_args() + main(args) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..590c8c84857470ab347377744c4f3493cf238feb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation_utils.py @@ -0,0 +1,125 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Converts data from CSV format to the VRDDetectionEvaluator format.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from object_detection.core import standard_fields +from object_detection.utils import vrd_evaluation + + +def build_groundtruth_vrd_dictionary(data, class_label_map, + relationship_label_map): + """Builds a groundtruth dictionary from groundtruth data in CSV file. + + Args: + data: Pandas DataFrame with the groundtruth data for a single image. + class_label_map: Class labelmap from string label name to an integer. + relationship_label_map: Relationship type labelmap from string name to an + integer. + + Returns: + A dictionary with keys suitable for passing to + VRDDetectionEvaluator.add_single_ground_truth_image_info: + standard_fields.InputDataFields.groundtruth_boxes: A numpy array + of structures with the shape [M, 1], representing M tuples, each tuple + containing the same number of named bounding boxes. + Each box is of the format [y_min, x_min, y_max, x_max] (see + datatype vrd_box_data_type, single_box_data_type above). + standard_fields.InputDataFields.groundtruth_classes: A numpy array of + structures shape [M, 1], representing the class labels of the + corresponding bounding boxes and possibly additional classes (see + datatype label_data_type above). + standard_fields.InputDataFields.verified_labels: numpy array + of shape [K] containing verified labels. + """ + data_boxes = data[data.LabelName.isnull()] + data_labels = data[data.LabelName1.isnull()] + + boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type) + boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1', + 'XMax1']].to_numpy() + boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].to_numpy() + + labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type) + labels['subject'] = data_boxes['LabelName1'].map( + lambda x: class_label_map[x]).to_numpy() + labels['object'] = data_boxes['LabelName2'].map( + lambda x: class_label_map[x]).to_numpy() + labels['relation'] = data_boxes['RelationshipLabel'].map( + lambda x: relationship_label_map[x]).to_numpy() + + return { + standard_fields.InputDataFields.groundtruth_boxes: + boxes, + standard_fields.InputDataFields.groundtruth_classes: + labels, + standard_fields.InputDataFields.groundtruth_image_classes: + data_labels['LabelName'].map(lambda x: class_label_map[x]) + .to_numpy(), + } + + +def build_predictions_vrd_dictionary(data, class_label_map, + relationship_label_map): + """Builds a predictions dictionary from predictions data in CSV file. + + Args: + data: Pandas DataFrame with the predictions data for a single image. + class_label_map: Class labelmap from string label name to an integer. + relationship_label_map: Relationship type labelmap from string name to an + integer. + + Returns: + Dictionary with keys suitable for passing to + VRDDetectionEvaluator.add_single_detected_image_info: + standard_fields.DetectionResultFields.detection_boxes: A numpy array of + structures with shape [N, 1], representing N tuples, each tuple + containing the same number of named bounding boxes. + Each box is of the format [y_min, x_min, y_max, x_max] (as an example + see datatype vrd_box_data_type, single_box_data_type above). + standard_fields.DetectionResultFields.detection_scores: float32 numpy + array of shape [N] containing detection scores for the boxes. + standard_fields.DetectionResultFields.detection_classes: A numpy array + of structures shape [N, 1], representing the class labels of the + corresponding bounding boxes and possibly additional classes (see + datatype label_data_type above). + """ + data_boxes = data + + boxes = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.vrd_box_data_type) + boxes['subject'] = data_boxes[['YMin1', 'XMin1', 'YMax1', + 'XMax1']].to_numpy() + boxes['object'] = data_boxes[['YMin2', 'XMin2', 'YMax2', 'XMax2']].to_numpy() + + labels = np.zeros(data_boxes.shape[0], dtype=vrd_evaluation.label_data_type) + labels['subject'] = data_boxes['LabelName1'].map( + lambda x: class_label_map[x]).to_numpy() + labels['object'] = data_boxes['LabelName2'].map( + lambda x: class_label_map[x]).to_numpy() + labels['relation'] = data_boxes['RelationshipLabel'].map( + lambda x: relationship_label_map[x]).to_numpy() + + return { + standard_fields.DetectionResultFields.detection_boxes: + boxes, + standard_fields.DetectionResultFields.detection_classes: + labels, + standard_fields.DetectionResultFields.detection_scores: + data_boxes['Score'].to_numpy() + } diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..04547bbedacc687927d72070c46ffc273b5d95af --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/oid_vrd_challenge_evaluation_utils_test.py @@ -0,0 +1,149 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for oid_vrd_challenge_evaluation_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import pandas as pd +import tensorflow.compat.v1 as tf +from object_detection.core import standard_fields +from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils +from object_detection.utils import vrd_evaluation + + +class OidVrdChallengeEvaluationUtilsTest(tf.test.TestCase): + + def testBuildGroundtruthDictionary(self): + np_data = pd.DataFrame( + [[ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.3, 0.5, 0.6, + 0.0, 0.3, 0.5, 0.6, 'is', None, None + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/02gy9n', 0.0, 0.3, 0.5, 0.6, + 0.1, 0.2, 0.3, 0.4, 'under', None, None + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.1, 0.2, 0.3, + 0.0, 0.1, 0.2, 0.3, 'is', None, None + ], [ + 'fe58ec1b06db2bb7', '/m/083vt', '/m/04bcr3', 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, 'at', None, None + ], [ + 'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None, + None, None, None, '/m/04bcr3', 1.0 + ], [ + 'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None, + None, None, None, '/m/083vt', 0.0 + ], [ + 'fe58ec1b06db2bb7', None, None, None, None, None, None, None, None, + None, None, None, '/m/02gy9n', 0.0 + ]], + columns=[ + 'ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1', + 'YMax1', 'XMin2', 'XMax2', 'YMin2', 'YMax2', 'RelationshipLabel', + 'LabelName', 'Confidence' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + relationship_label_map = {'is': 1, 'under': 2, 'at': 3} + groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary( + np_data, class_label_map, relationship_label_map) + + self.assertTrue(standard_fields.InputDataFields.groundtruth_boxes in + groundtruth_dictionary) + self.assertTrue(standard_fields.InputDataFields.groundtruth_classes in + groundtruth_dictionary) + self.assertTrue(standard_fields.InputDataFields.groundtruth_image_classes in + groundtruth_dictionary) + + self.assertAllEqual( + np.array( + [(1, 2, 1), (1, 3, 2), (1, 2, 1), (2, 1, 3)], + dtype=vrd_evaluation.label_data_type), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_classes]) + expected_vrd_data = np.array( + [ + ([0.5, 0.0, 0.6, 0.3], [0.5, 0.0, 0.6, 0.3]), + ([0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]), + ([0.2, 0.0, 0.3, 0.1], [0.2, 0.0, 0.3, 0.1]), + ([0.3, 0.1, 0.4, 0.2], [0.7, 0.5, 0.8, 0.6]), + ], + dtype=vrd_evaluation.vrd_box_data_type) + for field in expected_vrd_data.dtype.fields: + self.assertNDArrayNear( + expected_vrd_data[field], groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_boxes][field], 1e-5) + self.assertAllEqual( + np.array([1, 2, 3]), groundtruth_dictionary[ + standard_fields.InputDataFields.groundtruth_image_classes]) + + def testBuildPredictionDictionary(self): + np_data = pd.DataFrame( + [[ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.3, 0.5, 0.6, + 0.0, 0.3, 0.5, 0.6, 'is', 0.1 + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/02gy9n', 0.0, 0.3, 0.5, 0.6, + 0.1, 0.2, 0.3, 0.4, 'under', 0.2 + ], [ + 'fe58ec1b06db2bb7', '/m/04bcr3', '/m/083vt', 0.0, 0.1, 0.2, 0.3, + 0.0, 0.1, 0.2, 0.3, 'is', 0.3 + ], [ + 'fe58ec1b06db2bb7', '/m/083vt', '/m/04bcr3', 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, 'at', 0.4 + ]], + columns=[ + 'ImageID', 'LabelName1', 'LabelName2', 'XMin1', 'XMax1', 'YMin1', + 'YMax1', 'XMin2', 'XMax2', 'YMin2', 'YMax2', 'RelationshipLabel', + 'Score' + ]) + class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3} + relationship_label_map = {'is': 1, 'under': 2, 'at': 3} + prediction_dictionary = utils.build_predictions_vrd_dictionary( + np_data, class_label_map, relationship_label_map) + + self.assertTrue(standard_fields.DetectionResultFields.detection_boxes in + prediction_dictionary) + self.assertTrue(standard_fields.DetectionResultFields.detection_classes in + prediction_dictionary) + self.assertTrue(standard_fields.DetectionResultFields.detection_scores in + prediction_dictionary) + + self.assertAllEqual( + np.array( + [(1, 2, 1), (1, 3, 2), (1, 2, 1), (2, 1, 3)], + dtype=vrd_evaluation.label_data_type), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_classes]) + expected_vrd_data = np.array( + [ + ([0.5, 0.0, 0.6, 0.3], [0.5, 0.0, 0.6, 0.3]), + ([0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]), + ([0.2, 0.0, 0.3, 0.1], [0.2, 0.0, 0.3, 0.1]), + ([0.3, 0.1, 0.4, 0.2], [0.7, 0.5, 0.8, 0.6]), + ], + dtype=vrd_evaluation.vrd_box_data_type) + for field in expected_vrd_data.dtype.fields: + self.assertNDArrayNear( + expected_vrd_data[field], prediction_dictionary[ + standard_fields.DetectionResultFields.detection_boxes][field], + 1e-5) + self.assertNDArrayNear( + np.array([0.1, 0.2, 0.3, 0.4]), prediction_dictionary[ + standard_fields.DetectionResultFields.detection_scores], 1e-5) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/tf_example_parser.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/tf_example_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1535f89bf585d2be6d2d19183b7b97318aeade --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/tf_example_parser.py @@ -0,0 +1,159 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensorflow Example proto parser for data loading. + +A parser to decode data containing serialized tensorflow.Example +protos into materialized tensors (numpy arrays). +""" + +import numpy as np + +from object_detection.core import data_parser +from object_detection.core import standard_fields as fields + + +class FloatParser(data_parser.DataToNumpyParser): + """Tensorflow Example float parser.""" + + def __init__(self, field_name): + self.field_name = field_name + + def parse(self, tf_example): + return np.array( + tf_example.features.feature[self.field_name].float_list.value, + dtype=np.float).transpose() if tf_example.features.feature[ + self.field_name].HasField("float_list") else None + + +class StringParser(data_parser.DataToNumpyParser): + """Tensorflow Example string parser.""" + + def __init__(self, field_name): + self.field_name = field_name + + def parse(self, tf_example): + return b"".join(tf_example.features.feature[ + self.field_name].bytes_list.value) if tf_example.features.feature[ + self.field_name].HasField("bytes_list") else None + + +class Int64Parser(data_parser.DataToNumpyParser): + """Tensorflow Example int64 parser.""" + + def __init__(self, field_name): + self.field_name = field_name + + def parse(self, tf_example): + return np.array( + tf_example.features.feature[self.field_name].int64_list.value, + dtype=np.int64).transpose() if tf_example.features.feature[ + self.field_name].HasField("int64_list") else None + + +class BoundingBoxParser(data_parser.DataToNumpyParser): + """Tensorflow Example bounding box parser.""" + + def __init__(self, xmin_field_name, ymin_field_name, xmax_field_name, + ymax_field_name): + self.field_names = [ + ymin_field_name, xmin_field_name, ymax_field_name, xmax_field_name + ] + + def parse(self, tf_example): + result = [] + parsed = True + for field_name in self.field_names: + result.append(tf_example.features.feature[field_name].float_list.value) + parsed &= ( + tf_example.features.feature[field_name].HasField("float_list")) + + return np.array(result).transpose() if parsed else None + + +class TfExampleDetectionAndGTParser(data_parser.DataToNumpyParser): + """Tensorflow Example proto parser.""" + + def __init__(self): + self.items_to_handlers = { + fields.DetectionResultFields.key: + StringParser(fields.TfExampleFields.source_id), + # Object ground truth boxes and classes. + fields.InputDataFields.groundtruth_boxes: (BoundingBoxParser( + fields.TfExampleFields.object_bbox_xmin, + fields.TfExampleFields.object_bbox_ymin, + fields.TfExampleFields.object_bbox_xmax, + fields.TfExampleFields.object_bbox_ymax)), + fields.InputDataFields.groundtruth_classes: ( + Int64Parser(fields.TfExampleFields.object_class_label)), + # Object detections. + fields.DetectionResultFields.detection_boxes: (BoundingBoxParser( + fields.TfExampleFields.detection_bbox_xmin, + fields.TfExampleFields.detection_bbox_ymin, + fields.TfExampleFields.detection_bbox_xmax, + fields.TfExampleFields.detection_bbox_ymax)), + fields.DetectionResultFields.detection_classes: ( + Int64Parser(fields.TfExampleFields.detection_class_label)), + fields.DetectionResultFields.detection_scores: ( + FloatParser(fields.TfExampleFields.detection_score)), + } + + self.optional_items_to_handlers = { + fields.InputDataFields.groundtruth_difficult: + Int64Parser(fields.TfExampleFields.object_difficult), + fields.InputDataFields.groundtruth_group_of: + Int64Parser(fields.TfExampleFields.object_group_of), + fields.InputDataFields.groundtruth_image_classes: + Int64Parser(fields.TfExampleFields.image_class_label), + } + + def parse(self, tf_example): + """Parses tensorflow example and returns a tensor dictionary. + + Args: + tf_example: a tf.Example object. + + Returns: + A dictionary of the following numpy arrays: + fields.DetectionResultFields.source_id - string containing original image + id. + fields.InputDataFields.groundtruth_boxes - a numpy array containing + groundtruth boxes. + fields.InputDataFields.groundtruth_classes - a numpy array containing + groundtruth classes. + fields.InputDataFields.groundtruth_group_of - a numpy array containing + groundtruth group of flag (optional, None if not specified). + fields.InputDataFields.groundtruth_difficult - a numpy array containing + groundtruth difficult flag (optional, None if not specified). + fields.InputDataFields.groundtruth_image_classes - a numpy array + containing groundtruth image-level labels. + fields.DetectionResultFields.detection_boxes - a numpy array containing + detection boxes. + fields.DetectionResultFields.detection_classes - a numpy array containing + detection class labels. + fields.DetectionResultFields.detection_scores - a numpy array containing + detection scores. + Returns None if tf.Example was not parsed or non-optional fields were not + found. + """ + results_dict = {} + parsed = True + for key, parser in self.items_to_handlers.items(): + results_dict[key] = parser.parse(tf_example) + parsed &= (results_dict[key] is not None) + + for key, parser in self.optional_items_to_handlers.items(): + results_dict[key] = parser.parse(tf_example) + + return results_dict if parsed else None diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/tf_example_parser_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/tf_example_parser_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c195c7376acdb58f341846031a255d630fd4be13 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/metrics/tf_example_parser_test.py @@ -0,0 +1,197 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object_detection.data_decoders.tf_example_parser.""" + +import numpy as np +import numpy.testing as np_testing +import tensorflow.compat.v1 as tf + +from object_detection.core import standard_fields as fields +from object_detection.metrics import tf_example_parser + + +class TfExampleDecoderTest(tf.test.TestCase): + + def _Int64Feature(self, value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + def _FloatFeature(self, value): + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + def _BytesFeature(self, value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def testParseDetectionsAndGT(self): + source_id = b'abc.jpg' + # y_min, x_min, y_max, x_max + object_bb = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6], [1.0, 0.6, 0.8], + [1.0, 0.6, 0.7]]).transpose() + detection_bb = np.array([[0.1, 0.2], [0.0, 0.8], [1.0, 0.6], + [1.0, 0.85]]).transpose() + + object_class_label = [1, 1, 2] + object_difficult = [1, 0, 0] + object_group_of = [0, 0, 1] + verified_labels = [1, 2, 3, 4] + detection_class_label = [2, 1] + detection_score = [0.5, 0.3] + features = { + fields.TfExampleFields.source_id: + self._BytesFeature(source_id), + fields.TfExampleFields.object_bbox_ymin: + self._FloatFeature(object_bb[:, 0].tolist()), + fields.TfExampleFields.object_bbox_xmin: + self._FloatFeature(object_bb[:, 1].tolist()), + fields.TfExampleFields.object_bbox_ymax: + self._FloatFeature(object_bb[:, 2].tolist()), + fields.TfExampleFields.object_bbox_xmax: + self._FloatFeature(object_bb[:, 3].tolist()), + fields.TfExampleFields.detection_bbox_ymin: + self._FloatFeature(detection_bb[:, 0].tolist()), + fields.TfExampleFields.detection_bbox_xmin: + self._FloatFeature(detection_bb[:, 1].tolist()), + fields.TfExampleFields.detection_bbox_ymax: + self._FloatFeature(detection_bb[:, 2].tolist()), + fields.TfExampleFields.detection_bbox_xmax: + self._FloatFeature(detection_bb[:, 3].tolist()), + fields.TfExampleFields.detection_class_label: + self._Int64Feature(detection_class_label), + fields.TfExampleFields.detection_score: + self._FloatFeature(detection_score), + } + + example = tf.train.Example(features=tf.train.Features(feature=features)) + parser = tf_example_parser.TfExampleDetectionAndGTParser() + + results_dict = parser.parse(example) + self.assertIsNone(results_dict) + + features[fields.TfExampleFields.object_class_label] = ( + self._Int64Feature(object_class_label)) + features[fields.TfExampleFields.object_difficult] = ( + self._Int64Feature(object_difficult)) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + results_dict = parser.parse(example) + + self.assertIsNotNone(results_dict) + self.assertEqual(source_id, results_dict[fields.DetectionResultFields.key]) + np_testing.assert_almost_equal( + object_bb, results_dict[fields.InputDataFields.groundtruth_boxes]) + np_testing.assert_almost_equal( + detection_bb, + results_dict[fields.DetectionResultFields.detection_boxes]) + np_testing.assert_almost_equal( + detection_score, + results_dict[fields.DetectionResultFields.detection_scores]) + np_testing.assert_almost_equal( + detection_class_label, + results_dict[fields.DetectionResultFields.detection_classes]) + np_testing.assert_almost_equal( + object_difficult, + results_dict[fields.InputDataFields.groundtruth_difficult]) + np_testing.assert_almost_equal( + object_class_label, + results_dict[fields.InputDataFields.groundtruth_classes]) + + parser = tf_example_parser.TfExampleDetectionAndGTParser() + + features[fields.TfExampleFields.object_group_of] = ( + self._Int64Feature(object_group_of)) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + results_dict = parser.parse(example) + self.assertIsNotNone(results_dict) + np_testing.assert_equal( + object_group_of, + results_dict[fields.InputDataFields.groundtruth_group_of]) + + features[fields.TfExampleFields.image_class_label] = ( + self._Int64Feature(verified_labels)) + + example = tf.train.Example(features=tf.train.Features(feature=features)) + results_dict = parser.parse(example) + self.assertIsNotNone(results_dict) + np_testing.assert_equal( + verified_labels, + results_dict[fields.InputDataFields.groundtruth_image_classes]) + + def testParseString(self): + string_val = b'abc' + features = {'string': self._BytesFeature(string_val)} + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.StringParser('string') + result = parser.parse(example) + self.assertIsNotNone(result) + self.assertEqual(result, string_val) + + parser = tf_example_parser.StringParser('another_string') + result = parser.parse(example) + self.assertIsNone(result) + + def testParseFloat(self): + float_array_val = [1.5, 1.4, 2.0] + features = {'floats': self._FloatFeature(float_array_val)} + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.FloatParser('floats') + result = parser.parse(example) + self.assertIsNotNone(result) + np_testing.assert_almost_equal(result, float_array_val) + + parser = tf_example_parser.StringParser('another_floats') + result = parser.parse(example) + self.assertIsNone(result) + + def testInt64Parser(self): + int_val = [1, 2, 3] + features = {'ints': self._Int64Feature(int_val)} + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.Int64Parser('ints') + result = parser.parse(example) + self.assertIsNotNone(result) + np_testing.assert_almost_equal(result, int_val) + + parser = tf_example_parser.Int64Parser('another_ints') + result = parser.parse(example) + self.assertIsNone(result) + + def testBoundingBoxParser(self): + bounding_boxes = np.array([[0.0, 0.5, 0.3], [0.0, 0.1, 0.6], + [1.0, 0.6, 0.8], [1.0, 0.6, 0.7]]).transpose() + features = { + 'ymin': self._FloatFeature(bounding_boxes[:, 0]), + 'xmin': self._FloatFeature(bounding_boxes[:, 1]), + 'ymax': self._FloatFeature(bounding_boxes[:, 2]), + 'xmax': self._FloatFeature(bounding_boxes[:, 3]) + } + + example = tf.train.Example(features=tf.train.Features(feature=features)) + + parser = tf_example_parser.BoundingBoxParser('xmin', 'ymin', 'xmax', 'ymax') + result = parser.parse(example) + self.assertIsNotNone(result) + np_testing.assert_almost_equal(result, bounding_boxes) + + parser = tf_example_parser.BoundingBoxParser('xmin', 'ymin', 'xmax', + 'another_ymax') + result = parser.parse(example) + self.assertIsNone(result) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_hparams.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_hparams.py new file mode 100644 index 0000000000000000000000000000000000000000..12b043e9b1c8652aa856d919931074adf5ec18e4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_hparams.py @@ -0,0 +1,50 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hyperparameters for the object detection model in TF.learn. + +This file consolidates and documents the hyperparameters used by the model. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import training as contrib_training +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + + +def create_hparams(hparams_overrides=None): + """Returns hyperparameters, including any flag value overrides. + + Args: + hparams_overrides: Optional hparams overrides, represented as a + string containing comma-separated hparam_name=value pairs. + + Returns: + The hyperparameters as a tf.HParams object. + """ + hparams = contrib_training.HParams( + # Whether a fine tuning checkpoint (provided in the pipeline config) + # should be loaded for training. + load_pretrained=True) + # Override any of the preceding hyperparameter values. + if hparams_overrides: + hparams = hparams.parse(hparams_overrides) + return hparams diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..a79b5bdba5a2466cfd1a13e3347b45670a3c6e8a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib.py @@ -0,0 +1,1097 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Constructs model, inputs, and training environment.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +import os + +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 +import tf_slim as slim + +from object_detection import eval_util +from object_detection import exporter as exporter_lib +from object_detection import inputs +from object_detection.builders import graph_rewriter_builder +from object_detection.builders import model_builder +from object_detection.builders import optimizer_builder +from object_detection.core import standard_fields as fields +from object_detection.utils import config_util +from object_detection.utils import label_map_util +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import variables_helper +from object_detection.utils import visualization_utils as vis_utils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import learn as contrib_learn +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +# A map of names to methods that help build the model. +MODEL_BUILD_UTIL_MAP = { + 'get_configs_from_pipeline_file': + config_util.get_configs_from_pipeline_file, + 'create_pipeline_proto_from_configs': + config_util.create_pipeline_proto_from_configs, + 'merge_external_params_with_configs': + config_util.merge_external_params_with_configs, + 'create_train_input_fn': + inputs.create_train_input_fn, + 'create_eval_input_fn': + inputs.create_eval_input_fn, + 'create_predict_input_fn': + inputs.create_predict_input_fn, + 'detection_model_fn_base': model_builder.build, +} + + +def _prepare_groundtruth_for_eval(detection_model, class_agnostic, + max_number_of_boxes): + """Extracts groundtruth data from detection_model and prepares it for eval. + + Args: + detection_model: A `DetectionModel` object. + class_agnostic: Whether the detections are class_agnostic. + max_number_of_boxes: Max number of groundtruth boxes. + + Returns: + A tuple of: + groundtruth: Dictionary with the following fields: + 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, + in normalized coordinates. + 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed + classes. + 'groundtruth_masks': 4D float32 tensor of instance masks (if provided in + groundtruth) + 'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating + is_crowd annotations (if provided in groundtruth). + 'groundtruth_area': [batch_size, num_boxes] float32 tensor indicating + the area (in the original absolute coordinates) of annotations (if + provided in groundtruth). + 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number + of groundtruth boxes per image.. + 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 + tensor of keypoints (if provided in groundtruth). + 'groundtruth_dp_num_points_list': [batch_size, num_boxes] int32 tensor + with the number of DensePose points for each instance (if provided in + groundtruth). + 'groundtruth_dp_part_ids_list': [batch_size, num_boxes, + max_sampled_points] int32 tensor with the part ids for each DensePose + sampled point (if provided in groundtruth). + 'groundtruth_dp_surface_coords_list': [batch_size, num_boxes, + max_sampled_points, 4] containing the DensePose surface coordinates for + each sampled point (if provided in groundtruth). + 'groundtruth_track_ids_list': [batch_size, num_boxes] int32 tensor + with track ID for each instance (if provided in groundtruth). + 'groundtruth_group_of': [batch_size, num_boxes] bool tensor indicating + group_of annotations (if provided in groundtruth). + 'groundtruth_labeled_classes': [batch_size, num_classes] int64 + tensor of 1-indexed classes. + 'groundtruth_verified_neg_classes': [batch_size, num_classes] float32 + K-hot representation of 1-indexed classes which were verified as not + present in the image. + 'groundtruth_not_exhaustive_classes': [batch_size, num_classes] K-hot + representation of 1-indexed classes which don't have all of their + instances marked exhaustively. + class_agnostic: Boolean indicating whether detections are class agnostic. + """ + input_data_fields = fields.InputDataFields() + groundtruth_boxes = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.boxes)) + groundtruth_boxes_shape = tf.shape(groundtruth_boxes) + # For class-agnostic models, groundtruth one-hot encodings collapse to all + # ones. + if class_agnostic: + groundtruth_classes_one_hot = tf.ones( + [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1]) + else: + groundtruth_classes_one_hot = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.classes)) + label_id_offset = 1 # Applying label id offset (b/63711816) + groundtruth_classes = ( + tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset) + groundtruth = { + input_data_fields.groundtruth_boxes: groundtruth_boxes, + input_data_fields.groundtruth_classes: groundtruth_classes + } + + if detection_model.groundtruth_has_field(fields.BoxListFields.masks): + groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.masks)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd): + groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.is_crowd)) + + if detection_model.groundtruth_has_field(input_data_fields.groundtruth_area): + groundtruth[input_data_fields.groundtruth_area] = tf.stack( + detection_model.groundtruth_lists(input_data_fields.groundtruth_area)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.keypoints): + groundtruth[input_data_fields.groundtruth_keypoints] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.keypoints)) + + if detection_model.groundtruth_has_field( + fields.BoxListFields.keypoint_visibilities): + groundtruth[input_data_fields.groundtruth_keypoint_visibilities] = tf.stack( + detection_model.groundtruth_lists( + fields.BoxListFields.keypoint_visibilities)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.group_of): + groundtruth[input_data_fields.groundtruth_group_of] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.group_of)) + + if detection_model.groundtruth_has_field( + input_data_fields.groundtruth_verified_neg_classes): + groundtruth[input_data_fields.groundtruth_verified_neg_classes] = tf.stack( + detection_model.groundtruth_lists( + input_data_fields.groundtruth_verified_neg_classes)) + + if detection_model.groundtruth_has_field( + input_data_fields.groundtruth_not_exhaustive_classes): + groundtruth[ + input_data_fields.groundtruth_not_exhaustive_classes] = tf.stack( + detection_model.groundtruth_lists( + input_data_fields.groundtruth_not_exhaustive_classes)) + + if detection_model.groundtruth_has_field( + fields.BoxListFields.densepose_num_points): + groundtruth[input_data_fields.groundtruth_dp_num_points] = tf.stack( + detection_model.groundtruth_lists( + fields.BoxListFields.densepose_num_points)) + if detection_model.groundtruth_has_field( + fields.BoxListFields.densepose_part_ids): + groundtruth[input_data_fields.groundtruth_dp_part_ids] = tf.stack( + detection_model.groundtruth_lists( + fields.BoxListFields.densepose_part_ids)) + if detection_model.groundtruth_has_field( + fields.BoxListFields.densepose_surface_coords): + groundtruth[input_data_fields.groundtruth_dp_surface_coords] = tf.stack( + detection_model.groundtruth_lists( + fields.BoxListFields.densepose_surface_coords)) + + if detection_model.groundtruth_has_field(fields.BoxListFields.track_ids): + groundtruth[input_data_fields.groundtruth_track_ids] = tf.stack( + detection_model.groundtruth_lists(fields.BoxListFields.track_ids)) + + if detection_model.groundtruth_has_field( + input_data_fields.groundtruth_labeled_classes): + labeled_classes_list = detection_model.groundtruth_lists( + input_data_fields.groundtruth_labeled_classes) + labeled_classes = [ + tf.where(x)[:, 0] + label_id_offset for x in labeled_classes_list + ] + if len(labeled_classes) > 1: + num_classes = labeled_classes_list[0].shape[0] + padded_labeled_classes = [] + for x in labeled_classes: + padding = num_classes - tf.shape(x)[0] + padded_labeled_classes.append(tf.pad(x, [[0, padding]])) + groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.stack( + padded_labeled_classes) + else: + groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.stack( + labeled_classes) + + groundtruth[input_data_fields.num_groundtruth_boxes] = ( + tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]])) + return groundtruth + + +def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True): + """Unstacks all tensors in `tensor_dict` along 0th dimension. + + Unstacks tensor from the tensor dict along 0th dimension and returns a + tensor_dict containing values that are lists of unstacked, unpadded tensors. + + Tensors in the `tensor_dict` are expected to be of one of the three shapes: + 1. [batch_size] + 2. [batch_size, height, width, channels] + 3. [batch_size, num_boxes, d1, d2, ... dn] + + When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3 + above are sliced along the `num_boxes` dimension using the value in tensor + field.InputDataFields.num_groundtruth_boxes. + + Note that this function has a static list of input data fields and has to be + kept in sync with the InputDataFields defined in core/standard_fields.py + + Args: + tensor_dict: A dictionary of batched groundtruth tensors. + unpad_groundtruth_tensors: Whether to remove padding along `num_boxes` + dimension of the groundtruth tensors. + + Returns: + A dictionary where the keys are from fields.InputDataFields and values are + a list of unstacked (optionally unpadded) tensors. + + Raises: + ValueError: If unpad_tensors is True and `tensor_dict` does not contain + `num_groundtruth_boxes` tensor. + """ + unbatched_tensor_dict = { + key: tf.unstack(tensor) for key, tensor in tensor_dict.items() + } + if unpad_groundtruth_tensors: + if (fields.InputDataFields.num_groundtruth_boxes not in + unbatched_tensor_dict): + raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. ' + 'Keys available: {}'.format( + unbatched_tensor_dict.keys())) + unbatched_unpadded_tensor_dict = {} + unpad_keys = set([ + # List of input data fields that are padded along the num_boxes + # dimension. This list has to be kept in sync with InputDataFields in + # standard_fields.py. + fields.InputDataFields.groundtruth_instance_masks, + fields.InputDataFields.groundtruth_classes, + fields.InputDataFields.groundtruth_boxes, + fields.InputDataFields.groundtruth_keypoints, + fields.InputDataFields.groundtruth_keypoint_visibilities, + fields.InputDataFields.groundtruth_dp_num_points, + fields.InputDataFields.groundtruth_dp_part_ids, + fields.InputDataFields.groundtruth_dp_surface_coords, + fields.InputDataFields.groundtruth_track_ids, + fields.InputDataFields.groundtruth_group_of, + fields.InputDataFields.groundtruth_difficult, + fields.InputDataFields.groundtruth_is_crowd, + fields.InputDataFields.groundtruth_area, + fields.InputDataFields.groundtruth_weights + ]).intersection(set(unbatched_tensor_dict.keys())) + + for key in unpad_keys: + unpadded_tensor_list = [] + for num_gt, padded_tensor in zip( + unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes], + unbatched_tensor_dict[key]): + tensor_shape = shape_utils.combined_static_and_dynamic_shape( + padded_tensor) + slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32) + slice_size = tf.stack( + [num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]]) + unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size) + unpadded_tensor_list.append(unpadded_tensor) + unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list + + unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict) + + return unbatched_tensor_dict + + +def provide_groundtruth(model, labels): + """Provides the labels to a model as groundtruth. + + This helper function extracts the corresponding boxes, classes, + keypoints, weights, masks, etc. from the labels, and provides it + as groundtruth to the models. + + Args: + model: The detection model to provide groundtruth to. + labels: The labels for the training or evaluation inputs. + """ + gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes] + gt_classes_list = labels[fields.InputDataFields.groundtruth_classes] + gt_masks_list = None + if fields.InputDataFields.groundtruth_instance_masks in labels: + gt_masks_list = labels[ + fields.InputDataFields.groundtruth_instance_masks] + gt_keypoints_list = None + if fields.InputDataFields.groundtruth_keypoints in labels: + gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints] + gt_keypoint_visibilities_list = None + if fields.InputDataFields.groundtruth_keypoint_visibilities in labels: + gt_keypoint_visibilities_list = labels[ + fields.InputDataFields.groundtruth_keypoint_visibilities] + gt_dp_num_points_list = None + if fields.InputDataFields.groundtruth_dp_num_points in labels: + gt_dp_num_points_list = labels[ + fields.InputDataFields.groundtruth_dp_num_points] + gt_dp_part_ids_list = None + if fields.InputDataFields.groundtruth_dp_part_ids in labels: + gt_dp_part_ids_list = labels[ + fields.InputDataFields.groundtruth_dp_part_ids] + gt_dp_surface_coords_list = None + if fields.InputDataFields.groundtruth_dp_surface_coords in labels: + gt_dp_surface_coords_list = labels[ + fields.InputDataFields.groundtruth_dp_surface_coords] + gt_track_ids_list = None + if fields.InputDataFields.groundtruth_track_ids in labels: + gt_track_ids_list = labels[ + fields.InputDataFields.groundtruth_track_ids] + gt_weights_list = None + if fields.InputDataFields.groundtruth_weights in labels: + gt_weights_list = labels[fields.InputDataFields.groundtruth_weights] + gt_confidences_list = None + if fields.InputDataFields.groundtruth_confidences in labels: + gt_confidences_list = labels[ + fields.InputDataFields.groundtruth_confidences] + gt_is_crowd_list = None + if fields.InputDataFields.groundtruth_is_crowd in labels: + gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd] + gt_group_of_list = None + if fields.InputDataFields.groundtruth_group_of in labels: + gt_group_of_list = labels[fields.InputDataFields.groundtruth_group_of] + gt_area_list = None + if fields.InputDataFields.groundtruth_area in labels: + gt_area_list = labels[fields.InputDataFields.groundtruth_area] + gt_labeled_classes = None + if fields.InputDataFields.groundtruth_labeled_classes in labels: + gt_labeled_classes = labels[ + fields.InputDataFields.groundtruth_labeled_classes] + gt_verified_neg_classes = None + if fields.InputDataFields.groundtruth_verified_neg_classes in labels: + gt_verified_neg_classes = labels[ + fields.InputDataFields.groundtruth_verified_neg_classes] + gt_not_exhaustive_classes = None + if fields.InputDataFields.groundtruth_not_exhaustive_classes in labels: + gt_not_exhaustive_classes = labels[ + fields.InputDataFields.groundtruth_not_exhaustive_classes] + model.provide_groundtruth( + groundtruth_boxes_list=gt_boxes_list, + groundtruth_classes_list=gt_classes_list, + groundtruth_confidences_list=gt_confidences_list, + groundtruth_labeled_classes=gt_labeled_classes, + groundtruth_masks_list=gt_masks_list, + groundtruth_keypoints_list=gt_keypoints_list, + groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list, + groundtruth_dp_num_points_list=gt_dp_num_points_list, + groundtruth_dp_part_ids_list=gt_dp_part_ids_list, + groundtruth_dp_surface_coords_list=gt_dp_surface_coords_list, + groundtruth_weights_list=gt_weights_list, + groundtruth_is_crowd_list=gt_is_crowd_list, + groundtruth_group_of_list=gt_group_of_list, + groundtruth_area_list=gt_area_list, + groundtruth_track_ids_list=gt_track_ids_list, + groundtruth_verified_neg_classes=gt_verified_neg_classes, + groundtruth_not_exhaustive_classes=gt_not_exhaustive_classes) + + +def create_model_fn(detection_model_fn, configs, hparams=None, use_tpu=False, + postprocess_on_cpu=False): + """Creates a model function for `Estimator`. + + Args: + detection_model_fn: Function that returns a `DetectionModel` instance. + configs: Dictionary of pipeline config objects. + hparams: `HParams` object. + use_tpu: Boolean indicating whether model should be constructed for + use on TPU. + postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess + is scheduled on the host cpu. + + Returns: + `model_fn` for `Estimator`. + """ + train_config = configs['train_config'] + eval_input_config = configs['eval_input_config'] + eval_config = configs['eval_config'] + + def model_fn(features, labels, mode, params=None): + """Constructs the object detection model. + + Args: + features: Dictionary of feature tensors, returned from `input_fn`. + labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL, + otherwise None. + mode: Mode key from tf.estimator.ModeKeys. + params: Parameter dictionary passed from the estimator. + + Returns: + An `EstimatorSpec` that encapsulates the model and its serving + configurations. + """ + params = params or {} + total_loss, train_op, detections, export_outputs = None, None, None, None + is_training = mode == tf.estimator.ModeKeys.TRAIN + + # Make sure to set the Keras learning phase. True during training, + # False for inference. + tf.keras.backend.set_learning_phase(is_training) + # Set policy for mixed-precision training with Keras-based models. + if use_tpu and train_config.use_bfloat16: + from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top + # Enable v2 behavior, as `mixed_bfloat16` is only supported in TF 2.0. + base_layer_utils.enable_v2_dtype_behavior() + tf2.keras.mixed_precision.experimental.set_policy( + 'mixed_bfloat16') + detection_model = detection_model_fn( + is_training=is_training, add_summaries=(not use_tpu)) + scaffold_fn = None + + if mode == tf.estimator.ModeKeys.TRAIN: + labels = unstack_batch( + labels, + unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors) + elif mode == tf.estimator.ModeKeys.EVAL: + # For evaling on train data, it is necessary to check whether groundtruth + # must be unpadded. + boxes_shape = ( + labels[fields.InputDataFields.groundtruth_boxes].get_shape() + .as_list()) + unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu + labels = unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): + provide_groundtruth(detection_model, labels) + + preprocessed_images = features[fields.InputDataFields.image] + + side_inputs = detection_model.get_side_inputs(features) + + if use_tpu and train_config.use_bfloat16: + with tf.tpu.bfloat16_scope(): + prediction_dict = detection_model.predict( + preprocessed_images, + features[fields.InputDataFields.true_image_shape], **side_inputs) + prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict) + else: + prediction_dict = detection_model.predict( + preprocessed_images, + features[fields.InputDataFields.true_image_shape], **side_inputs) + + def postprocess_wrapper(args): + return detection_model.postprocess(args[0], args[1]) + + if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT): + if use_tpu and postprocess_on_cpu: + detections = tf.tpu.outside_compilation( + postprocess_wrapper, + (prediction_dict, + features[fields.InputDataFields.true_image_shape])) + else: + detections = postprocess_wrapper(( + prediction_dict, + features[fields.InputDataFields.true_image_shape])) + + if mode == tf.estimator.ModeKeys.TRAIN: + load_pretrained = hparams.load_pretrained if hparams else False + if train_config.fine_tune_checkpoint and load_pretrained: + if not train_config.fine_tune_checkpoint_type: + # train_config.from_detection_checkpoint field is deprecated. For + # backward compatibility, set train_config.fine_tune_checkpoint_type + # based on train_config.from_detection_checkpoint. + if train_config.from_detection_checkpoint: + train_config.fine_tune_checkpoint_type = 'detection' + else: + train_config.fine_tune_checkpoint_type = 'classification' + asg_map = detection_model.restore_map( + fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, + load_all_detection_checkpoint_vars=( + train_config.load_all_detection_checkpoint_vars)) + available_var_map = ( + variables_helper.get_variables_available_in_checkpoint( + asg_map, + train_config.fine_tune_checkpoint, + include_global_step=False)) + if use_tpu: + + def tpu_scaffold(): + tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint, + available_var_map) + return tf.train.Scaffold() + + scaffold_fn = tpu_scaffold + else: + tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint, + available_var_map) + + if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): + if (mode == tf.estimator.ModeKeys.EVAL and + eval_config.use_dummy_loss_in_eval): + total_loss = tf.constant(1.0) + losses_dict = {'Loss/total_loss': total_loss} + else: + losses_dict = detection_model.loss( + prediction_dict, features[fields.InputDataFields.true_image_shape]) + losses = [loss_tensor for loss_tensor in losses_dict.values()] + if train_config.add_regularization_loss: + regularization_losses = detection_model.regularization_losses() + if use_tpu and train_config.use_bfloat16: + regularization_losses = ops.bfloat16_to_float32_nested( + regularization_losses) + if regularization_losses: + regularization_loss = tf.add_n( + regularization_losses, name='regularization_loss') + losses.append(regularization_loss) + losses_dict['Loss/regularization_loss'] = regularization_loss + total_loss = tf.add_n(losses, name='total_loss') + losses_dict['Loss/total_loss'] = total_loss + + if 'graph_rewriter_config' in configs: + graph_rewriter_fn = graph_rewriter_builder.build( + configs['graph_rewriter_config'], is_training=is_training) + graph_rewriter_fn() + + # TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we + # can write learning rate summaries on TPU without host calls. + global_step = tf.train.get_or_create_global_step() + training_optimizer, optimizer_summary_vars = optimizer_builder.build( + train_config.optimizer) + + if mode == tf.estimator.ModeKeys.TRAIN: + if use_tpu: + training_optimizer = tf.tpu.CrossShardOptimizer(training_optimizer) + + # Optionally freeze some layers by setting their gradients to be zero. + trainable_variables = None + include_variables = ( + train_config.update_trainable_variables + if train_config.update_trainable_variables else None) + exclude_variables = ( + train_config.freeze_variables + if train_config.freeze_variables else None) + trainable_variables = slim.filter_variables( + tf.trainable_variables(), + include_patterns=include_variables, + exclude_patterns=exclude_variables) + + clip_gradients_value = None + if train_config.gradient_clipping_by_norm > 0: + clip_gradients_value = train_config.gradient_clipping_by_norm + + if not use_tpu: + for var in optimizer_summary_vars: + tf.summary.scalar(var.op.name, var) + summaries = [] if use_tpu else None + if train_config.summarize_gradients: + summaries = ['gradients', 'gradient_norm', 'global_gradient_norm'] + train_op = slim.optimizers.optimize_loss( + loss=total_loss, + global_step=global_step, + learning_rate=None, + clip_gradients=clip_gradients_value, + optimizer=training_optimizer, + update_ops=detection_model.updates(), + variables=trainable_variables, + summaries=summaries, + name='') # Preventing scope prefix on all variables. + + if mode == tf.estimator.ModeKeys.PREDICT: + exported_output = exporter_lib.add_output_tensor_nodes(detections) + export_outputs = { + tf.saved_model.signature_constants.PREDICT_METHOD_NAME: + tf.estimator.export.PredictOutput(exported_output) + } + + eval_metric_ops = None + scaffold = None + if mode == tf.estimator.ModeKeys.EVAL: + class_agnostic = ( + fields.DetectionResultFields.detection_classes not in detections) + groundtruth = _prepare_groundtruth_for_eval( + detection_model, class_agnostic, + eval_input_config.max_number_of_boxes) + use_original_images = fields.InputDataFields.original_image in features + if use_original_images: + eval_images = features[fields.InputDataFields.original_image] + true_image_shapes = tf.slice( + features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3]) + original_image_spatial_shapes = features[fields.InputDataFields + .original_image_spatial_shape] + else: + eval_images = features[fields.InputDataFields.image] + true_image_shapes = None + original_image_spatial_shapes = None + + eval_dict = eval_util.result_dict_for_batched_example( + eval_images, + features[inputs.HASH_KEY], + detections, + groundtruth, + class_agnostic=class_agnostic, + scale_to_absolute=True, + original_image_spatial_shapes=original_image_spatial_shapes, + true_image_shapes=true_image_shapes) + + if fields.InputDataFields.image_additional_channels in features: + eval_dict[fields.InputDataFields.image_additional_channels] = features[ + fields.InputDataFields.image_additional_channels] + + if class_agnostic: + category_index = label_map_util.create_class_agnostic_category_index() + else: + category_index = label_map_util.create_category_index_from_labelmap( + eval_input_config.label_map_path) + vis_metric_ops = None + if not use_tpu and use_original_images: + keypoint_edges = [ + (kp.start, kp.end) for kp in eval_config.keypoint_edge] + + eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections( + category_index, + max_examples_to_draw=eval_config.num_visualizations, + max_boxes_to_draw=eval_config.max_num_boxes_to_visualize, + min_score_thresh=eval_config.min_score_threshold, + use_normalized_coordinates=False, + keypoint_edges=keypoint_edges or None) + vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops( + eval_dict) + + # Eval metrics on a single example. + eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators( + eval_config, list(category_index.values()), eval_dict) + for loss_key, loss_tensor in iter(losses_dict.items()): + eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor) + for var in optimizer_summary_vars: + eval_metric_ops[var.op.name] = (var, tf.no_op()) + if vis_metric_ops is not None: + eval_metric_ops.update(vis_metric_ops) + eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()} + + if eval_config.use_moving_averages: + variable_averages = tf.train.ExponentialMovingAverage(0.0) + variables_to_restore = variable_averages.variables_to_restore() + keep_checkpoint_every_n_hours = ( + train_config.keep_checkpoint_every_n_hours) + saver = tf.train.Saver( + variables_to_restore, + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) + scaffold = tf.train.Scaffold(saver=saver) + + # EVAL executes on CPU, so use regular non-TPU EstimatorSpec. + if use_tpu and mode != tf.estimator.ModeKeys.EVAL: + return tf.estimator.tpu.TPUEstimatorSpec( + mode=mode, + scaffold_fn=scaffold_fn, + predictions=detections, + loss=total_loss, + train_op=train_op, + eval_metrics=eval_metric_ops, + export_outputs=export_outputs) + else: + if scaffold is None: + keep_checkpoint_every_n_hours = ( + train_config.keep_checkpoint_every_n_hours) + saver = tf.train.Saver( + sharded=True, + keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, + save_relative_paths=True) + tf.add_to_collection(tf.GraphKeys.SAVERS, saver) + scaffold = tf.train.Scaffold(saver=saver) + return tf.estimator.EstimatorSpec( + mode=mode, + predictions=detections, + loss=total_loss, + train_op=train_op, + eval_metric_ops=eval_metric_ops, + export_outputs=export_outputs, + scaffold=scaffold) + + return model_fn + + +def create_estimator_and_inputs(run_config, + hparams=None, + pipeline_config_path=None, + config_override=None, + train_steps=None, + sample_1_of_n_eval_examples=1, + sample_1_of_n_eval_on_train_examples=1, + model_fn_creator=create_model_fn, + use_tpu_estimator=False, + use_tpu=False, + num_shards=1, + params=None, + override_eval_num_epochs=True, + save_final_config=False, + postprocess_on_cpu=False, + export_to_tpu=None, + **kwargs): + """Creates `Estimator`, input functions, and steps. + + Args: + run_config: A `RunConfig`. + hparams: (optional) A `HParams`. + pipeline_config_path: A path to a pipeline config file. + config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to + override the config from `pipeline_config_path`. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + sample_1_of_n_eval_examples: Integer representing how often an eval example + should be sampled. If 1, will sample all examples. + sample_1_of_n_eval_on_train_examples: Similar to + `sample_1_of_n_eval_examples`, except controls the sampling of training + data for evaluation. + model_fn_creator: A function that creates a `model_fn` for `Estimator`. + Follows the signature: + + * Args: + * `detection_model_fn`: Function that returns `DetectionModel` instance. + * `configs`: Dictionary of pipeline config objects. + * `hparams`: `HParams` object. + * Returns: + `model_fn` for `Estimator`. + + use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False, + an `Estimator` will be returned. + use_tpu: Boolean, whether training and evaluation should run on TPU. Only + used if `use_tpu_estimator` is True. + num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator` + is True. + params: Parameter dictionary passed from the estimator. Only used if + `use_tpu_estimator` is True. + override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for + eval_input. + save_final_config: Whether to save final config (obtained after applying + overrides) to `estimator.model_dir`. + postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true, + postprocess is scheduled on the host cpu. + export_to_tpu: When use_tpu and export_to_tpu are true, + `export_savedmodel()` exports a metagraph for serving on TPU besides the + one on CPU. + **kwargs: Additional keyword arguments for configuration override. + + Returns: + A dictionary with the following fields: + 'estimator': An `Estimator` or `TPUEstimator`. + 'train_input_fn': A training input function. + 'eval_input_fns': A list of all evaluation input functions. + 'eval_input_names': A list of names for each evaluation input. + 'eval_on_train_input_fn': An evaluation-on-train input function. + 'predict_input_fn': A prediction input function. + 'train_steps': Number of training steps. Either directly from input or from + configuration. + """ + get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ + 'get_configs_from_pipeline_file'] + merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ + 'merge_external_params_with_configs'] + create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ + 'create_pipeline_proto_from_configs'] + create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn'] + create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn'] + create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn'] + detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base'] + + configs = get_configs_from_pipeline_file( + pipeline_config_path, config_override=config_override) + kwargs.update({ + 'train_steps': train_steps, + 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu + }) + if sample_1_of_n_eval_examples >= 1: + kwargs.update({ + 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples + }) + if override_eval_num_epochs: + kwargs.update({'eval_num_epochs': 1}) + tf.logging.warning( + 'Forced number of epochs for all eval validations to be 1.') + configs = merge_external_params_with_configs( + configs, hparams, kwargs_dict=kwargs) + model_config = configs['model'] + train_config = configs['train_config'] + train_input_config = configs['train_input_config'] + eval_config = configs['eval_config'] + eval_input_configs = configs['eval_input_configs'] + eval_on_train_input_config = copy.deepcopy(train_input_config) + eval_on_train_input_config.sample_1_of_n_examples = ( + sample_1_of_n_eval_on_train_examples) + if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1: + tf.logging.warning('Expected number of evaluation epochs is 1, but ' + 'instead encountered `eval_on_train_input_config' + '.num_epochs` = ' + '{}. Overwriting `num_epochs` to 1.'.format( + eval_on_train_input_config.num_epochs)) + eval_on_train_input_config.num_epochs = 1 + + # update train_steps from config but only when non-zero value is provided + if train_steps is None and train_config.num_steps != 0: + train_steps = train_config.num_steps + + detection_model_fn = functools.partial( + detection_model_fn_base, model_config=model_config) + + # Create the input functions for TRAIN/EVAL/PREDICT. + train_input_fn = create_train_input_fn( + train_config=train_config, + train_input_config=train_input_config, + model_config=model_config) + eval_input_fns = [ + create_eval_input_fn( + eval_config=eval_config, + eval_input_config=eval_input_config, + model_config=model_config) for eval_input_config in eval_input_configs + ] + eval_input_names = [ + eval_input_config.name for eval_input_config in eval_input_configs + ] + eval_on_train_input_fn = create_eval_input_fn( + eval_config=eval_config, + eval_input_config=eval_on_train_input_config, + model_config=model_config) + predict_input_fn = create_predict_input_fn( + model_config=model_config, predict_input_config=eval_input_configs[0]) + + # Read export_to_tpu from hparams if not passed. + if export_to_tpu is None and hparams is not None: + export_to_tpu = hparams.get('export_to_tpu', False) + tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s', + use_tpu, export_to_tpu) + model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu, + postprocess_on_cpu) + if use_tpu_estimator: + estimator = tf.estimator.tpu.TPUEstimator( + model_fn=model_fn, + train_batch_size=train_config.batch_size, + # For each core, only batch size 1 is supported for eval. + eval_batch_size=num_shards * 1 if use_tpu else 1, + use_tpu=use_tpu, + config=run_config, + export_to_tpu=export_to_tpu, + eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU + params=params if params else {}) + else: + estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config) + + # Write the as-run pipeline config to disk. + if run_config.is_chief and save_final_config: + pipeline_config_final = create_pipeline_proto_from_configs(configs) + config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir) + + return dict( + estimator=estimator, + train_input_fn=train_input_fn, + eval_input_fns=eval_input_fns, + eval_input_names=eval_input_names, + eval_on_train_input_fn=eval_on_train_input_fn, + predict_input_fn=predict_input_fn, + train_steps=train_steps) + + +def create_train_and_eval_specs(train_input_fn, + eval_input_fns, + eval_on_train_input_fn, + predict_input_fn, + train_steps, + eval_on_train_data=False, + final_exporter_name='Servo', + eval_spec_names=None): + """Creates a `TrainSpec` and `EvalSpec`s. + + Args: + train_input_fn: Function that produces features and labels on train data. + eval_input_fns: A list of functions that produce features and labels on eval + data. + eval_on_train_input_fn: Function that produces features and labels for + evaluation on train data. + predict_input_fn: Function that produces features for inference. + train_steps: Number of training steps. + eval_on_train_data: Whether to evaluate model on training data. Default is + False. + final_exporter_name: String name given to `FinalExporter`. + eval_spec_names: A list of string names for each `EvalSpec`. + + Returns: + Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is + True, the last `EvalSpec` in the list will correspond to training data. The + rest EvalSpecs in the list are evaluation datas. + """ + train_spec = tf.estimator.TrainSpec( + input_fn=train_input_fn, max_steps=train_steps) + + if eval_spec_names is None: + eval_spec_names = [str(i) for i in range(len(eval_input_fns))] + + eval_specs = [] + for index, (eval_spec_name, eval_input_fn) in enumerate( + zip(eval_spec_names, eval_input_fns)): + # Uses final_exporter_name as exporter_name for the first eval spec for + # backward compatibility. + if index == 0: + exporter_name = final_exporter_name + else: + exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name) + exporter = tf.estimator.FinalExporter( + name=exporter_name, serving_input_receiver_fn=predict_input_fn) + eval_specs.append( + tf.estimator.EvalSpec( + name=eval_spec_name, + input_fn=eval_input_fn, + steps=None, + exporters=exporter)) + + if eval_on_train_data: + eval_specs.append( + tf.estimator.EvalSpec( + name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None)) + + return train_spec, eval_specs + + +def _evaluate_checkpoint(estimator, + input_fn, + checkpoint_path, + name, + max_retries=0): + """Evaluates a checkpoint. + + Args: + estimator: Estimator object to use for evaluation. + input_fn: Input function to use for evaluation. + checkpoint_path: Path of the checkpoint to evaluate. + name: Namescope for eval summary. + max_retries: Maximum number of times to retry the evaluation on encountering + a tf.errors.InvalidArgumentError. If negative, will always retry the + evaluation. + + Returns: + Estimator evaluation results. + """ + always_retry = True if max_retries < 0 else False + retries = 0 + while always_retry or retries <= max_retries: + try: + return estimator.evaluate( + input_fn=input_fn, + steps=None, + checkpoint_path=checkpoint_path, + name=name) + except tf.errors.InvalidArgumentError as e: + if always_retry or retries < max_retries: + tf.logging.info('Retrying checkpoint evaluation after exception: %s', e) + retries += 1 + else: + raise e + + +def continuous_eval(estimator, + model_dir, + input_fn, + train_steps, + name, + max_retries=0): + """Perform continuous evaluation on checkpoints written to a model directory. + + Args: + estimator: Estimator object to use for evaluation. + model_dir: Model directory to read checkpoints for continuous evaluation. + input_fn: Input function to use for evaluation. + train_steps: Number of training steps. This is used to infer the last + checkpoint and stop evaluation loop. + name: Namescope for eval summary. + max_retries: Maximum number of times to retry the evaluation on encountering + a tf.errors.InvalidArgumentError. If negative, will always retry the + evaluation. + """ + + def terminate_eval(): + tf.logging.info('Terminating eval after 180 seconds of no checkpoints') + return True + + for ckpt in tf.train.checkpoints_iterator( + model_dir, min_interval_secs=180, timeout=None, + timeout_fn=terminate_eval): + + tf.logging.info('Starting Evaluation.') + try: + eval_results = _evaluate_checkpoint( + estimator=estimator, + input_fn=input_fn, + checkpoint_path=ckpt, + name=name, + max_retries=max_retries) + tf.logging.info('Eval results: %s' % eval_results) + + # Terminate eval job when final checkpoint is reached + current_step = int(os.path.basename(ckpt).split('-')[1]) + if current_step >= train_steps: + tf.logging.info( + 'Evaluation finished after training step %d' % current_step) + break + + except tf.errors.NotFoundError: + tf.logging.info( + 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt) + + +def populate_experiment(run_config, + hparams, + pipeline_config_path, + train_steps=None, + eval_steps=None, + model_fn_creator=create_model_fn, + **kwargs): + """Populates an `Experiment` object. + + EXPERIMENT CLASS IS DEPRECATED. Please switch to + tf.estimator.train_and_evaluate. As an example, see model_main.py. + + Args: + run_config: A `RunConfig`. + hparams: A `HParams`. + pipeline_config_path: A path to a pipeline config file. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + eval_steps: Number of evaluation steps per evaluation cycle. If None, the + number of evaluation steps is set from the `EvalConfig` proto. + model_fn_creator: A function that creates a `model_fn` for `Estimator`. + Follows the signature: + + * Args: + * `detection_model_fn`: Function that returns `DetectionModel` instance. + * `configs`: Dictionary of pipeline config objects. + * `hparams`: `HParams` object. + * Returns: + `model_fn` for `Estimator`. + + **kwargs: Additional keyword arguments for configuration override. + + Returns: + An `Experiment` that defines all aspects of training, evaluation, and + export. + """ + tf.logging.warning('Experiment is being deprecated. Please use ' + 'tf.estimator.train_and_evaluate(). See model_main.py for ' + 'an example.') + train_and_eval_dict = create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps, + eval_steps=eval_steps, + model_fn_creator=model_fn_creator, + save_final_config=True, + **kwargs) + estimator = train_and_eval_dict['estimator'] + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + predict_input_fn = train_and_eval_dict['predict_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + export_strategies = [ + contrib_learn.utils.saved_model_export_utils.make_export_strategy( + serving_input_fn=predict_input_fn) + ] + + return contrib_learn.Experiment( + estimator=estimator, + train_input_fn=train_input_fn, + eval_input_fn=eval_input_fns[0], + train_steps=train_steps, + eval_steps=None, + export_strategies=export_strategies, + eval_delay_secs=120, + ) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7d4d81b2cb43e0faa3d84f48df91c27d0da217bc --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_tf1_test.py @@ -0,0 +1,505 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object detection model library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection import inputs +from object_detection import model_hparams +from object_detection import model_lib +from object_detection.builders import model_builder +from object_detection.core import standard_fields as fields +from object_detection.utils import config_util +from object_detection.utils import tf_version + + +# Model for test. Options are: +# 'ssd_inception_v2_pets', 'faster_rcnn_resnet50_pets' +MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets' + +# Model for testing keypoints. +MODEL_NAME_FOR_KEYPOINTS_TEST = 'ssd_mobilenet_v1_fpp' + +# Model for testing tfSequenceExample inputs. +MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST = 'context_rcnn_camera_trap' + + +def _get_data_path(model_name): + """Returns an absolute path to TFRecord file.""" + if model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + 'snapshot_serengeti_sequence_examples.record') + else: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + 'pets_examples.record') + + +def get_pipeline_config_path(model_name): + """Returns path to the local pipeline config file.""" + if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + model_name + '.config') + elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + model_name + '.config') + else: + return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', + 'configs', model_name + '.config') + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'pet_label_map.pbtxt') + + +def _get_keypoints_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'face_person_with_keypoints_label_map.pbtxt') + + +def _get_sequence_example_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'snapshot_serengeti_label_map.pbtxt') + + +def _get_configs_for_model(model_name): + """Returns configurations for model.""" + filename = get_pipeline_config_path(model_name) + data_path = _get_data_path(model_name) + if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: + label_map_path = _get_keypoints_labelmap_path() + elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: + label_map_path = _get_sequence_example_labelmap_path() + else: + label_map_path = _get_labelmap_path() + configs = config_util.get_configs_from_pipeline_file(filename) + override_dict = { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + configs = config_util.merge_external_params_with_configs( + configs, kwargs_dict=override_dict) + return configs + + +def _make_initializable_iterator(dataset): + """Creates an iterator, and initializes tables. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.data.Iterator`. + """ + iterator = tf.data.make_initializable_iterator(dataset) + tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) + return iterator + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ModelLibTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + tf.reset_default_graph() + + def _assert_model_fn_for_train_eval(self, configs, mode, + class_agnostic=False): + model_config = configs['model'] + train_config = configs['train_config'] + with tf.Graph().as_default(): + if mode == 'train': + features, labels = _make_initializable_iterator( + inputs.create_train_input_fn(configs['train_config'], + configs['train_input_config'], + configs['model'])()).get_next() + model_mode = tf.estimator.ModeKeys.TRAIN + batch_size = train_config.batch_size + elif mode == 'eval': + features, labels = _make_initializable_iterator( + inputs.create_eval_input_fn(configs['eval_config'], + configs['eval_input_config'], + configs['model'])()).get_next() + model_mode = tf.estimator.ModeKeys.EVAL + batch_size = 1 + elif mode == 'eval_on_train': + features, labels = _make_initializable_iterator( + inputs.create_eval_input_fn(configs['eval_config'], + configs['train_input_config'], + configs['model'])()).get_next() + model_mode = tf.estimator.ModeKeys.EVAL + batch_size = 1 + + detection_model_fn = functools.partial( + model_builder.build, model_config=model_config, is_training=True) + + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + + model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) + estimator_spec = model_fn(features, labels, model_mode) + + self.assertIsNotNone(estimator_spec.loss) + self.assertIsNotNone(estimator_spec.predictions) + if mode == 'eval' or mode == 'eval_on_train': + if class_agnostic: + self.assertNotIn('detection_classes', estimator_spec.predictions) + else: + detection_classes = estimator_spec.predictions['detection_classes'] + self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_classes.dtype) + detection_boxes = estimator_spec.predictions['detection_boxes'] + detection_scores = estimator_spec.predictions['detection_scores'] + num_detections = estimator_spec.predictions['num_detections'] + self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_boxes.dtype) + self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_scores.dtype) + self.assertEqual(tf.float32, num_detections.dtype) + if mode == 'eval': + self.assertIn('Detections_Left_Groundtruth_Right/0', + estimator_spec.eval_metric_ops) + if model_mode == tf.estimator.ModeKeys.TRAIN: + self.assertIsNotNone(estimator_spec.train_op) + return estimator_spec + + def _assert_model_fn_for_predict(self, configs): + model_config = configs['model'] + + with tf.Graph().as_default(): + features, _ = _make_initializable_iterator( + inputs.create_eval_input_fn(configs['eval_config'], + configs['eval_input_config'], + configs['model'])()).get_next() + detection_model_fn = functools.partial( + model_builder.build, model_config=model_config, is_training=False) + + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + + model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) + estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) + + self.assertIsNone(estimator_spec.loss) + self.assertIsNone(estimator_spec.train_op) + self.assertIsNotNone(estimator_spec.predictions) + self.assertIsNotNone(estimator_spec.export_outputs) + self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, + estimator_spec.export_outputs) + + def test_model_fn_in_train_mode(self): + """Tests the model function in TRAIN mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_sequences(self): + """Tests the model function in TRAIN mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_freeze_all_variables(self): + """Tests model_fn TRAIN mode with all variables frozen.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + configs['train_config'].freeze_variables.append('.*') + with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_freeze_all_included_variables(self): + """Tests model_fn TRAIN mode with all included variables frozen.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + train_config = configs['train_config'] + train_config.update_trainable_variables.append('FeatureExtractor') + train_config.freeze_variables.append('.*') + with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_train_mode_freeze_box_predictor(self): + """Tests model_fn TRAIN mode with FeatureExtractor variables frozen.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + train_config = configs['train_config'] + train_config.update_trainable_variables.append('FeatureExtractor') + train_config.update_trainable_variables.append('BoxPredictor') + train_config.freeze_variables.append('FeatureExtractor') + self._assert_model_fn_for_train_eval(configs, 'train') + + def test_model_fn_in_eval_mode(self): + """Tests the model function in EVAL mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_train_eval(configs, 'eval') + + def test_model_fn_in_eval_mode_sequences(self): + """Tests the model function in EVAL mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) + self._assert_model_fn_for_train_eval(configs, 'eval') + + def test_model_fn_in_keypoints_eval_mode(self): + """Tests the model function in EVAL mode with keypoints config.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_KEYPOINTS_TEST) + estimator_spec = self._assert_model_fn_for_train_eval(configs, 'eval') + metric_ops = estimator_spec.eval_metric_ops + self.assertIn('Keypoints_Precision/mAP ByCategory/face', metric_ops) + self.assertIn('Keypoints_Precision/mAP ByCategory/PERSON', metric_ops) + detection_keypoints = estimator_spec.predictions['detection_keypoints'] + self.assertEqual(1, detection_keypoints.shape.as_list()[0]) + self.assertEqual(tf.float32, detection_keypoints.dtype) + + def test_model_fn_in_eval_on_train_mode(self): + """Tests the model function in EVAL mode with train data.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_train_eval(configs, 'eval_on_train') + + def test_model_fn_in_predict_mode(self): + """Tests the model function in PREDICT mode.""" + configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) + self._assert_model_fn_for_predict(configs) + + def test_create_estimator_and_inputs(self): + """Tests that Estimator and input function are constructed correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + self.assertIsInstance(estimator, tf.estimator.Estimator) + self.assertEqual(20, train_steps) + self.assertIn('train_input_fn', train_and_eval_dict) + self.assertIn('eval_input_fns', train_and_eval_dict) + self.assertIn('eval_on_train_input_fn', train_and_eval_dict) + + def test_create_estimator_and_inputs_sequence_example(self): + """Tests that Estimator and input function are constructed correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path( + MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + self.assertIsInstance(estimator, tf.estimator.Estimator) + self.assertEqual(20, train_steps) + self.assertIn('train_input_fn', train_and_eval_dict) + self.assertIn('eval_input_fns', train_and_eval_dict) + self.assertIn('eval_on_train_input_fn', train_and_eval_dict) + + def test_create_estimator_with_default_train_eval_steps(self): + """Tests that number of train/eval defaults to config values.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + config_train_steps = configs['train_config'].num_steps + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, hparams, pipeline_config_path) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + + self.assertIsInstance(estimator, tf.estimator.Estimator) + self.assertEqual(config_train_steps, train_steps) + + def test_create_tpu_estimator_and_inputs(self): + """Tests that number of train/eval defaults to config values.""" + run_config = tf.estimator.tpu.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps, + use_tpu_estimator=True) + estimator = train_and_eval_dict['estimator'] + train_steps = train_and_eval_dict['train_steps'] + + self.assertIsInstance(estimator, tf.estimator.tpu.TPUEstimator) + self.assertEqual(20, train_steps) + + def test_create_train_and_eval_specs(self): + """Tests that `TrainSpec` and `EvalSpec` is created correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + train_steps = 20 + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config, + hparams, + pipeline_config_path, + train_steps=train_steps) + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] + predict_input_fn = train_and_eval_dict['predict_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + train_spec, eval_specs = model_lib.create_train_and_eval_specs( + train_input_fn, + eval_input_fns, + eval_on_train_input_fn, + predict_input_fn, + train_steps, + eval_on_train_data=True, + final_exporter_name='exporter', + eval_spec_names=['holdout']) + self.assertEqual(train_steps, train_spec.max_steps) + self.assertEqual(2, len(eval_specs)) + self.assertEqual(None, eval_specs[0].steps) + self.assertEqual('holdout', eval_specs[0].name) + self.assertEqual('exporter', eval_specs[0].exporters[0].name) + self.assertEqual(None, eval_specs[1].steps) + self.assertEqual('eval_on_train', eval_specs[1].name) + + def test_experiment(self): + """Tests that the `Experiment` object is constructed correctly.""" + run_config = tf.estimator.RunConfig() + hparams = model_hparams.create_hparams( + hparams_overrides='load_pretrained=false') + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + experiment = model_lib.populate_experiment( + run_config, + hparams, + pipeline_config_path, + train_steps=10, + eval_steps=20) + self.assertEqual(10, experiment.train_steps) + self.assertEqual(None, experiment.eval_steps) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class UnbatchTensorsTest(tf.test.TestCase): + + def test_unbatch_without_unpadding(self): + image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) + groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, None, None]) + groundtruth_classes_placeholder = tf.placeholder(tf.float32, + [2, None, None]) + groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, None]) + + tensor_dict = { + fields.InputDataFields.image: + image_placeholder, + fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes_placeholder, + fields.InputDataFields.groundtruth_classes: + groundtruth_classes_placeholder, + fields.InputDataFields.groundtruth_weights: + groundtruth_weights_placeholder + } + unbatched_tensor_dict = model_lib.unstack_batch( + tensor_dict, unpad_groundtruth_tensors=False) + + with self.test_session() as sess: + unbatched_tensor_dict_out = sess.run( + unbatched_tensor_dict, + feed_dict={ + image_placeholder: + np.random.rand(2, 4, 4, 3).astype(np.float32), + groundtruth_boxes_placeholder: + np.random.rand(2, 5, 4).astype(np.float32), + groundtruth_classes_placeholder: + np.random.rand(2, 5, 6).astype(np.float32), + groundtruth_weights_placeholder: + np.random.rand(2, 5).astype(np.float32) + }) + for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: + self.assertAllEqual(image_out.shape, [4, 4, 3]) + for groundtruth_boxes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_boxes]: + self.assertAllEqual(groundtruth_boxes_out.shape, [5, 4]) + for groundtruth_classes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_classes]: + self.assertAllEqual(groundtruth_classes_out.shape, [5, 6]) + for groundtruth_weights_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_weights]: + self.assertAllEqual(groundtruth_weights_out.shape, [5]) + + def test_unbatch_and_unpad_groundtruth_tensors(self): + image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) + groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) + groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) + groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, 5]) + num_groundtruth_placeholder = tf.placeholder(tf.int32, [2]) + + tensor_dict = { + fields.InputDataFields.image: + image_placeholder, + fields.InputDataFields.groundtruth_boxes: + groundtruth_boxes_placeholder, + fields.InputDataFields.groundtruth_classes: + groundtruth_classes_placeholder, + fields.InputDataFields.groundtruth_weights: + groundtruth_weights_placeholder, + fields.InputDataFields.num_groundtruth_boxes: + num_groundtruth_placeholder + } + unbatched_tensor_dict = model_lib.unstack_batch( + tensor_dict, unpad_groundtruth_tensors=True) + with self.test_session() as sess: + unbatched_tensor_dict_out = sess.run( + unbatched_tensor_dict, + feed_dict={ + image_placeholder: + np.random.rand(2, 4, 4, 3).astype(np.float32), + groundtruth_boxes_placeholder: + np.random.rand(2, 5, 4).astype(np.float32), + groundtruth_classes_placeholder: + np.random.rand(2, 5, 6).astype(np.float32), + groundtruth_weights_placeholder: + np.random.rand(2, 5).astype(np.float32), + num_groundtruth_placeholder: + np.array([3, 3], np.int32) + }) + for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: + self.assertAllEqual(image_out.shape, [4, 4, 3]) + for groundtruth_boxes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_boxes]: + self.assertAllEqual(groundtruth_boxes_out.shape, [3, 4]) + for groundtruth_classes_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_classes]: + self.assertAllEqual(groundtruth_classes_out.shape, [3, 6]) + for groundtruth_weights_out in unbatched_tensor_dict_out[ + fields.InputDataFields.groundtruth_weights]: + self.assertAllEqual(groundtruth_weights_out.shape, [3]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f65273660195752227b2bcc90dceb04184a6eb62 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_tf2_test.py @@ -0,0 +1,230 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for object detection model library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +import unittest +import numpy as np +import six +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 + +from object_detection import inputs +from object_detection import model_lib_v2 +from object_detection.builders import model_builder +from object_detection.core import model +from object_detection.protos import train_pb2 +from object_detection.utils import config_util +from object_detection.utils import tf_version + +if six.PY2: + import mock # pylint: disable=g-importing-member,g-import-not-at-top +else: + from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top + +# Model for test. Current options are: +# 'ssd_mobilenet_v2_pets_keras' +MODEL_NAME_FOR_TEST = 'ssd_mobilenet_v2_pets_keras' + + +def _get_data_path(): + """Returns an absolute path to TFRecord file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', + 'pets_examples.record') + + +def get_pipeline_config_path(model_name): + """Returns path to the local pipeline config file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', + 'configs', model_name + '.config') + + +def _get_labelmap_path(): + """Returns an absolute path to label map file.""" + return os.path.join(tf.resource_loader.get_data_files_path(), 'data', + 'pet_label_map.pbtxt') + + +def _get_config_kwarg_overrides(): + """Returns overrides to the configs that insert the correct local paths.""" + data_path = _get_data_path() + label_map_path = _get_labelmap_path() + return { + 'train_input_path': data_path, + 'eval_input_path': data_path, + 'label_map_path': label_map_path + } + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ModelLibTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): # pylint:disable=g-missing-super-call + tf.keras.backend.clear_session() + + def test_train_loop_then_eval_loop(self): + """Tests that Estimator and input function are constructed correctly.""" + model_dir = tf.test.get_temp_dir() + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config') + config_util.clear_fine_tune_checkpoint(pipeline_config_path, + new_pipeline_config_path) + config_kwarg_overrides = _get_config_kwarg_overrides() + + train_steps = 2 + strategy = tf2.distribute.OneDeviceStrategy(device='/cpu:0') + with strategy.scope(): + model_lib_v2.train_loop( + new_pipeline_config_path, + model_dir=model_dir, + train_steps=train_steps, + checkpoint_every_n=1, + **config_kwarg_overrides) + + model_lib_v2.eval_continuously( + new_pipeline_config_path, + model_dir=model_dir, + checkpoint_dir=model_dir, + train_steps=train_steps, + wait_interval=1, + timeout=10, + **config_kwarg_overrides) + + +class SimpleModel(model.DetectionModel): + """A model with a single weight vector.""" + + def __init__(self, num_classes=1): + super(SimpleModel, self).__init__(num_classes) + self.weight = tf.keras.backend.variable(np.ones(10), name='weight') + + def postprocess(self, prediction_dict, true_image_shapes): + return {} + + def updates(self): + return [] + + def restore_map(self, *args, **kwargs): + pass + + def restore_from_objects(self, fine_tune_checkpoint_type): + return {'model': self} + + def preprocess(self, _): + return tf.zeros((1, 128, 128, 3)), tf.constant([[128, 128, 3]]) + + def provide_groundtruth(self, *args, **kwargs): + pass + + def predict(self, pred_inputs, true_image_shapes): + return {'prediction': + tf.abs(tf.reduce_sum(self.weight) * tf.reduce_sum(pred_inputs))} + + def loss(self, prediction_dict, _): + return {'loss': tf.reduce_sum(prediction_dict['prediction'])} + + def regularization_losses(self): + return [] + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ModelCheckpointTest(tf.test.TestCase): + """Test for model checkpoint related functionality.""" + + def test_checkpoint_max_to_keep(self): + """Test that only the most recent checkpoints are kept.""" + + strategy = tf2.distribute.OneDeviceStrategy(device='/cpu:0') + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + with strategy.scope(): + mock_builder.return_value = SimpleModel() + model_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config') + config_util.clear_fine_tune_checkpoint(pipeline_config_path, + new_pipeline_config_path) + config_kwarg_overrides = _get_config_kwarg_overrides() + + with strategy.scope(): + model_lib_v2.train_loop( + new_pipeline_config_path, model_dir=model_dir, + train_steps=20, checkpoint_every_n=2, checkpoint_max_to_keep=3, + **config_kwarg_overrides + ) + ckpt_files = tf.io.gfile.glob(os.path.join(model_dir, 'ckpt-*.index')) + self.assertEqual(len(ckpt_files), 3, + '{} not of length 3.'.format(ckpt_files)) + + +class IncompatibleModel(SimpleModel): + + def restore_from_objects(self, *args, **kwargs): + return {'weight': self.weight} + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CheckpointV2Test(tf.test.TestCase): + + def setUp(self): + super(CheckpointV2Test, self).setUp() + + self._model = SimpleModel() + tf.keras.backend.set_value(self._model.weight, np.ones(10) * 42) + ckpt = tf.train.Checkpoint(model=self._model) + + self._test_dir = tf.test.get_temp_dir() + self._ckpt_path = ckpt.save(os.path.join(self._test_dir, 'ckpt')) + tf.keras.backend.set_value(self._model.weight, np.ones(10)) + + pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) + configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) + configs = config_util.merge_external_params_with_configs( + configs, kwargs_dict=_get_config_kwarg_overrides()) + self._train_input_fn = inputs.create_train_input_fn( + configs['train_config'], + configs['train_input_config'], + configs['model']) + + def test_restore_v2(self): + """Test that restoring a v2 style checkpoint works.""" + + model_lib_v2.load_fine_tune_checkpoint( + self._model, self._ckpt_path, checkpoint_type='', + checkpoint_version=train_pb2.CheckpointVersion.V2, + input_dataset=self._train_input_fn(), + unpad_groundtruth_tensors=True) + np.testing.assert_allclose(self._model.weight.numpy(), 42) + + def test_restore_map_incompatible_error(self): + """Test that restoring an incompatible restore map causes an error.""" + + with self.assertRaisesRegex(TypeError, + r'.*received a \(str -> ResourceVariable\).*'): + model_lib_v2.load_fine_tune_checkpoint( + IncompatibleModel(), self._ckpt_path, checkpoint_type='', + checkpoint_version=train_pb2.CheckpointVersion.V2, + input_dataset=self._train_input_fn(), + unpad_groundtruth_tensors=True) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_v2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..f6c8a9ea7be787e04aec3ece52ca6fbc197be6dd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_lib_v2.py @@ -0,0 +1,986 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Constructs model, inputs, and training environment.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import os +import time + +import tensorflow.compat.v1 as tf +import tensorflow.compat.v2 as tf2 + +from object_detection import eval_util +from object_detection import inputs +from object_detection import model_lib +from object_detection.builders import optimizer_builder +from object_detection.core import standard_fields as fields +from object_detection.protos import train_pb2 +from object_detection.utils import config_util +from object_detection.utils import label_map_util +from object_detection.utils import ops +from object_detection.utils import visualization_utils as vutils + +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib import tpu as contrib_tpu +except ImportError: + # TF 2.0 doesn't ship with contrib. + pass +# pylint: enable=g-import-not-at-top + +MODEL_BUILD_UTIL_MAP = model_lib.MODEL_BUILD_UTIL_MAP + + +RESTORE_MAP_ERROR_TEMPLATE = ( + 'Since we are restoring a v2 style checkpoint' + ' restore_map was expected to return a (str -> Model) mapping,' + ' but we received a ({} -> {}) mapping instead.' +) + + +def _compute_losses_and_predictions_dicts( + model, features, labels, + add_regularization_loss=True): + """Computes the losses dict and predictions dict for a model on inputs. + + Args: + model: a DetectionModel (based on Keras). + features: Dictionary of feature tensors from the input dataset. + Should be in the format output by `inputs.train_input` and + `inputs.eval_input`. + features[fields.InputDataFields.image] is a [batch_size, H, W, C] + float32 tensor with preprocessed images. + features[HASH_KEY] is a [batch_size] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] (optional) is a + [batch_size, H, W, C] float32 tensor with original images. + labels: A dictionary of groundtruth tensors post-unstacking. The original + labels are of the form returned by `inputs.train_input` and + `inputs.eval_input`. The shapes may have been modified by unstacking with + `model_lib.unstack_batch`. However, the dictionary includes the following + fields. + labels[fields.InputDataFields.num_groundtruth_boxes] is a + int32 tensor indicating the number of valid groundtruth boxes + per image. + labels[fields.InputDataFields.groundtruth_boxes] is a float32 tensor + containing the corners of the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a float32 + one-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_weights] is a float32 tensor + containing groundtruth weights for the boxes. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + float32 tensor containing only binary values, which represent + instance masks for objects. + labels[fields.InputDataFields.groundtruth_keypoints] is a + float32 tensor containing keypoints for each box. + labels[fields.InputDataFields.groundtruth_dp_num_points] is an int32 + tensor with the number of sampled DensePose points per object. + labels[fields.InputDataFields.groundtruth_dp_part_ids] is an int32 + tensor with the DensePose part ids (0-indexed) per object. + labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a + float32 tensor with the DensePose surface coordinates. + labels[fields.InputDataFields.groundtruth_group_of] is a tf.bool tensor + containing group_of annotations. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32 + k-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_track_ids] is a int32 + tensor of track IDs. + add_regularization_loss: Whether or not to include the model's + regularization loss in the losses dictionary. + + Returns: + A tuple containing the losses dictionary (with the total loss under + the key 'Loss/total_loss'), and the predictions dictionary produced by + `model.predict`. + + """ + model_lib.provide_groundtruth(model, labels) + preprocessed_images = features[fields.InputDataFields.image] + + prediction_dict = model.predict( + preprocessed_images, + features[fields.InputDataFields.true_image_shape], + **model.get_side_inputs(features)) + prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict) + + losses_dict = model.loss( + prediction_dict, features[fields.InputDataFields.true_image_shape]) + losses = [loss_tensor for loss_tensor in losses_dict.values()] + if add_regularization_loss: + # TODO(kaftan): As we figure out mixed precision & bfloat 16, we may + ## need to convert these regularization losses from bfloat16 to float32 + ## as well. + regularization_losses = model.regularization_losses() + if regularization_losses: + regularization_losses = ops.bfloat16_to_float32_nested( + regularization_losses) + regularization_loss = tf.add_n( + regularization_losses, name='regularization_loss') + losses.append(regularization_loss) + losses_dict['Loss/regularization_loss'] = regularization_loss + + total_loss = tf.add_n(losses, name='total_loss') + losses_dict['Loss/total_loss'] = total_loss + + return losses_dict, prediction_dict + + +# TODO(kaftan): Explore removing learning_rate from this method & returning +## The full losses dict instead of just total_loss, then doing all summaries +## saving in a utility method called by the outer training loop. +# TODO(kaftan): Explore adding gradient summaries +def eager_train_step(detection_model, + features, + labels, + unpad_groundtruth_tensors, + optimizer, + learning_rate, + add_regularization_loss=True, + clip_gradients_value=None, + global_step=None, + num_replicas=1.0): + """Process a single training batch. + + This method computes the loss for the model on a single training batch, + while tracking the gradients with a gradient tape. It then updates the + model variables with the optimizer, clipping the gradients if + clip_gradients_value is present. + + This method can run eagerly or inside a tf.function. + + Args: + detection_model: A DetectionModel (based on Keras) to train. + features: Dictionary of feature tensors from the input dataset. + Should be in the format output by `inputs.train_input. + features[fields.InputDataFields.image] is a [batch_size, H, W, C] + float32 tensor with preprocessed images. + features[HASH_KEY] is a [batch_size] int32 tensor representing unique + identifiers for the images. + features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] + int32 tensor representing the true image shapes, as preprocessed + images could be padded. + features[fields.InputDataFields.original_image] (optional, not used + during training) is a + [batch_size, H, W, C] float32 tensor with original images. + labels: A dictionary of groundtruth tensors. This method unstacks + these labels using model_lib.unstack_batch. The stacked labels are of + the form returned by `inputs.train_input` and `inputs.eval_input`. + labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] + int32 tensor indicating the number of valid groundtruth boxes + per image. + labels[fields.InputDataFields.groundtruth_boxes] is a + [batch_size, num_boxes, 4] float32 tensor containing the corners of + the groundtruth boxes. + labels[fields.InputDataFields.groundtruth_classes] is a + [batch_size, num_boxes, num_classes] float32 one-hot tensor of + classes. num_classes includes the background class. + labels[fields.InputDataFields.groundtruth_weights] is a + [batch_size, num_boxes] float32 tensor containing groundtruth weights + for the boxes. + -- Optional -- + labels[fields.InputDataFields.groundtruth_instance_masks] is a + [batch_size, num_boxes, H, W] float32 tensor containing only binary + values, which represent instance masks for objects. + labels[fields.InputDataFields.groundtruth_keypoints] is a + [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing + keypoints for each box. + labels[fields.InputDataFields.groundtruth_dp_num_points] is a + [batch_size, num_boxes] int32 tensor with the number of DensePose + sampled points per instance. + labels[fields.InputDataFields.groundtruth_dp_part_ids] is a + [batch_size, num_boxes, max_sampled_points] int32 tensor with the + part ids (0-indexed) for each instance. + labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a + [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the + surface coordinates for each point. Each surface coordinate is of the + form (y, x, v, u) where (y, x) are normalized image locations and + (v, u) are part-relative normalized surface coordinates. + labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32 + k-hot tensor of classes. + labels[fields.InputDataFields.groundtruth_track_ids] is a int32 + tensor of track IDs. + unpad_groundtruth_tensors: A parameter passed to unstack_batch. + optimizer: The training optimizer that will update the variables. + learning_rate: The learning rate tensor for the current training step. + This is used only for TensorBoard logging purposes, it does not affect + model training. + add_regularization_loss: Whether or not to include the model's + regularization loss in the losses dictionary. + clip_gradients_value: If this is present, clip the gradients global norm + at this value using `tf.clip_by_global_norm`. + global_step: The current training step. Used for TensorBoard logging + purposes. This step is not updated by this function and must be + incremented separately. + num_replicas: The number of replicas in the current distribution strategy. + This is used to scale the total loss so that training in a distribution + strategy works correctly. + + Returns: + The total loss observed at this training step + """ + # """Execute a single training step in the TF v2 style loop.""" + is_training = True + + detection_model._is_training = is_training # pylint: disable=protected-access + tf.keras.backend.set_learning_phase(is_training) + + labels = model_lib.unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + with tf.GradientTape() as tape: + losses_dict, _ = _compute_losses_and_predictions_dicts( + detection_model, features, labels, add_regularization_loss) + + total_loss = losses_dict['Loss/total_loss'] + + # Normalize loss for num replicas + total_loss = tf.math.divide(total_loss, + tf.constant(num_replicas, dtype=tf.float32)) + losses_dict['Loss/normalized_total_loss'] = total_loss + + for loss_type in losses_dict: + tf.compat.v2.summary.scalar( + loss_type, losses_dict[loss_type], step=global_step) + + trainable_variables = detection_model.trainable_variables + + gradients = tape.gradient(total_loss, trainable_variables) + + if clip_gradients_value: + gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients_value) + optimizer.apply_gradients(zip(gradients, trainable_variables)) + tf.compat.v2.summary.scalar('learning_rate', learning_rate, step=global_step) + tf.compat.v2.summary.image( + name='train_input_images', + step=global_step, + data=features[fields.InputDataFields.image], + max_outputs=3) + return total_loss + + +def validate_tf_v2_checkpoint_restore_map(checkpoint_restore_map): + """Ensure that given dict is a valid TF v2 style restore map. + + Args: + checkpoint_restore_map: A nested dict mapping strings to + tf.keras.Model objects. + + Raises: + ValueError: If they keys in checkpoint_restore_map are not strings or if + the values are not keras Model objects. + + """ + + for key, value in checkpoint_restore_map.items(): + if not (isinstance(key, str) and + (isinstance(value, tf.Module) + or isinstance(value, tf.train.Checkpoint))): + if isinstance(key, str) and isinstance(value, dict): + validate_tf_v2_checkpoint_restore_map(value) + else: + raise TypeError( + RESTORE_MAP_ERROR_TEMPLATE.format(key.__class__.__name__, + value.__class__.__name__)) + + +def is_object_based_checkpoint(checkpoint_path): + """Returns true if `checkpoint_path` points to an object-based checkpoint.""" + var_names = [var[0] for var in tf.train.list_variables(checkpoint_path)] + return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names + + +def load_fine_tune_checkpoint( + model, checkpoint_path, checkpoint_type, checkpoint_version, input_dataset, + unpad_groundtruth_tensors): + """Load a fine tuning classification or detection checkpoint. + + To make sure the model variables are all built, this method first executes + the model by computing a dummy loss. (Models might not have built their + variables before their first execution) + + It then loads an object-based classification or detection checkpoint. + + This method updates the model in-place and does not return a value. + + Args: + model: A DetectionModel (based on Keras) to load a fine-tuning + checkpoint for. + checkpoint_path: Directory with checkpoints file or path to checkpoint. + checkpoint_type: Whether to restore from a full detection + checkpoint (with compatible variable names) or to restore from a + classification checkpoint for initialization prior to training. + Valid values: `detection`, `classification`. + checkpoint_version: train_pb2.CheckpointVersion.V1 or V2 enum indicating + whether to load checkpoints in V1 style or V2 style. In this binary + we only support V2 style (object-based) checkpoints. + input_dataset: The tf.data Dataset the model is being trained on. Needed + to get the shapes for the dummy loss computation. + unpad_groundtruth_tensors: A parameter passed to unstack_batch. + + Raises: + IOError: if `checkpoint_path` does not point at a valid object-based + checkpoint + ValueError: if `checkpoint_version` is not train_pb2.CheckpointVersion.V2 + """ + if not is_object_based_checkpoint(checkpoint_path): + raise IOError('Checkpoint is expected to be an object-based checkpoint.') + if checkpoint_version == train_pb2.CheckpointVersion.V1: + raise ValueError('Checkpoint version should be V2') + + features, labels = iter(input_dataset).next() + + @tf.function + def _dummy_computation_fn(features, labels): + model._is_training = False # pylint: disable=protected-access + tf.keras.backend.set_learning_phase(False) + + labels = model_lib.unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + return _compute_losses_and_predictions_dicts( + model, + features, + labels) + + strategy = tf.compat.v2.distribute.get_strategy() + if hasattr(tf.distribute.Strategy, 'run'): + strategy.run( + _dummy_computation_fn, args=( + features, + labels, + )) + else: + strategy.experimental_run_v2( + _dummy_computation_fn, args=( + features, + labels, + )) + + restore_from_objects_dict = model.restore_from_objects( + fine_tune_checkpoint_type=checkpoint_type) + validate_tf_v2_checkpoint_restore_map(restore_from_objects_dict) + ckpt = tf.train.Checkpoint(**restore_from_objects_dict) + ckpt.restore(checkpoint_path).assert_existing_objects_matched() + + +def get_filepath(strategy, filepath): + """Get appropriate filepath for worker. + + Args: + strategy: A tf.distribute.Strategy object. + filepath: A path to where the Checkpoint object is stored. + + Returns: + A temporary filepath for non-chief workers to use or the original filepath + for the chief. + """ + if strategy.extended.should_checkpoint: + return filepath + else: + # TODO(vighneshb) Replace with the public API when TF exposes it. + task_id = strategy.extended._task_id # pylint:disable=protected-access + return os.path.join(filepath, 'temp_worker_{:03d}'.format(task_id)) + + +def clean_temporary_directories(strategy, filepath): + """Temporary directory clean up for MultiWorker Mirrored Strategy. + + This is needed for all non-chief workers. + + Args: + strategy: A tf.distribute.Strategy object. + filepath: The filepath for the temporary directory. + """ + if not strategy.extended.should_checkpoint: + if tf.io.gfile.exists(filepath) and tf.io.gfile.isdir(filepath): + tf.io.gfile.rmtree(filepath) + + +def train_loop( + pipeline_config_path, + model_dir, + config_override=None, + train_steps=None, + use_tpu=False, + save_final_config=False, + checkpoint_every_n=1000, + checkpoint_max_to_keep=7, + record_summaries=True, + **kwargs): + """Trains a model using eager + functions. + + This method: + 1. Processes the pipeline configs + 2. (Optionally) saves the as-run config + 3. Builds the model & optimizer + 4. Gets the training input data + 5. Loads a fine-tuning detection or classification checkpoint if requested + 6. Loops over the train data, executing distributed training steps inside + tf.functions. + 7. Checkpoints the model every `checkpoint_every_n` training steps. + 8. Logs the training metrics as TensorBoard summaries. + + Args: + pipeline_config_path: A path to a pipeline config file. + model_dir: + The directory to save checkpoints and summaries to. + config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to + override the config from `pipeline_config_path`. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + use_tpu: Boolean, whether training and evaluation should run on TPU. + save_final_config: Whether to save final config (obtained after applying + overrides) to `model_dir`. + checkpoint_every_n: + Checkpoint every n training steps. + checkpoint_max_to_keep: + int, the number of most recent checkpoints to keep in the model directory. + record_summaries: Boolean, whether or not to record summaries. + **kwargs: Additional keyword arguments for configuration override. + """ + ## Parse the configs + get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ + 'get_configs_from_pipeline_file'] + merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ + 'merge_external_params_with_configs'] + create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ + 'create_pipeline_proto_from_configs'] + + configs = get_configs_from_pipeline_file( + pipeline_config_path, config_override=config_override) + kwargs.update({ + 'train_steps': train_steps, + 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu + }) + configs = merge_external_params_with_configs( + configs, None, kwargs_dict=kwargs) + model_config = configs['model'] + train_config = configs['train_config'] + train_input_config = configs['train_input_config'] + + unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors + add_regularization_loss = train_config.add_regularization_loss + clip_gradients_value = None + if train_config.gradient_clipping_by_norm > 0: + clip_gradients_value = train_config.gradient_clipping_by_norm + + # update train_steps from config but only when non-zero value is provided + if train_steps is None and train_config.num_steps != 0: + train_steps = train_config.num_steps + + if kwargs['use_bfloat16']: + tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16') + + if train_config.load_all_detection_checkpoint_vars: + raise ValueError('train_pb2.load_all_detection_checkpoint_vars ' + 'unsupported in TF2') + + config_util.update_fine_tune_checkpoint_type(train_config) + fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type + fine_tune_checkpoint_version = train_config.fine_tune_checkpoint_version + + # Write the as-run pipeline config to disk. + if save_final_config: + pipeline_config_final = create_pipeline_proto_from_configs(configs) + config_util.save_pipeline_config(pipeline_config_final, model_dir) + + # Build the model, optimizer, and training input + strategy = tf.compat.v2.distribute.get_strategy() + with strategy.scope(): + detection_model = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']( + model_config=model_config, is_training=True) + + def train_dataset_fn(input_context): + """Callable to create train input.""" + # Create the inputs. + train_input = inputs.train_input( + train_config=train_config, + train_input_config=train_input_config, + model_config=model_config, + model=detection_model, + input_context=input_context) + train_input = train_input.repeat() + return train_input + + train_input = strategy.experimental_distribute_datasets_from_function( + train_dataset_fn) + + + global_step = tf.Variable( + 0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step', + aggregation=tf.compat.v2.VariableAggregation.ONLY_FIRST_REPLICA) + optimizer, (learning_rate,) = optimizer_builder.build( + train_config.optimizer, global_step=global_step) + + if callable(learning_rate): + learning_rate_fn = learning_rate + else: + learning_rate_fn = lambda: learning_rate + + ## Train the model + # Get the appropriate filepath (temporary or not) based on whether the worker + # is the chief. + summary_writer_filepath = get_filepath(strategy, + os.path.join(model_dir, 'train')) + if record_summaries: + summary_writer = tf.compat.v2.summary.create_file_writer( + summary_writer_filepath) + else: + summary_writer = tf2.summary.create_noop_writer() + + if use_tpu: + num_steps_per_iteration = 100 + else: + # TODO(b/135933080) Explore setting to 100 when GPU performance issues + # are fixed. + num_steps_per_iteration = 1 + + with summary_writer.as_default(): + with strategy.scope(): + with tf.compat.v2.summary.record_if( + lambda: global_step % num_steps_per_iteration == 0): + # Load a fine-tuning checkpoint. + if train_config.fine_tune_checkpoint: + load_fine_tune_checkpoint(detection_model, + train_config.fine_tune_checkpoint, + fine_tune_checkpoint_type, + fine_tune_checkpoint_version, + train_input, + unpad_groundtruth_tensors) + + ckpt = tf.compat.v2.train.Checkpoint( + step=global_step, model=detection_model, optimizer=optimizer) + + manager_dir = get_filepath(strategy, model_dir) + if not strategy.extended.should_checkpoint: + checkpoint_max_to_keep = 1 + manager = tf.compat.v2.train.CheckpointManager( + ckpt, manager_dir, max_to_keep=checkpoint_max_to_keep) + + # We use the following instead of manager.latest_checkpoint because + # manager_dir does not point to the model directory when we are running + # in a worker. + latest_checkpoint = tf.train.latest_checkpoint(model_dir) + ckpt.restore(latest_checkpoint) + + def train_step_fn(features, labels): + """Single train step.""" + loss = eager_train_step( + detection_model, + features, + labels, + unpad_groundtruth_tensors, + optimizer, + learning_rate=learning_rate_fn(), + add_regularization_loss=add_regularization_loss, + clip_gradients_value=clip_gradients_value, + global_step=global_step, + num_replicas=strategy.num_replicas_in_sync) + global_step.assign_add(1) + return loss + + def _sample_and_train(strategy, train_step_fn, data_iterator): + features, labels = data_iterator.next() + if hasattr(tf.distribute.Strategy, 'run'): + per_replica_losses = strategy.run( + train_step_fn, args=(features, labels)) + else: + per_replica_losses = strategy.experimental_run_v2( + train_step_fn, args=(features, labels)) + # TODO(anjalisridhar): explore if it is safe to remove the + ## num_replicas scaling of the loss and switch this to a ReduceOp.Mean + return strategy.reduce(tf.distribute.ReduceOp.SUM, + per_replica_losses, axis=None) + + @tf.function + def _dist_train_step(data_iterator): + """A distributed train step.""" + + if num_steps_per_iteration > 1: + for _ in tf.range(num_steps_per_iteration - 1): + # Following suggestion on yaqs/5402607292645376 + with tf.name_scope(''): + _sample_and_train(strategy, train_step_fn, data_iterator) + + return _sample_and_train(strategy, train_step_fn, data_iterator) + + train_input_iter = iter(train_input) + + if int(global_step.value()) == 0: + manager.save() + + checkpointed_step = int(global_step.value()) + logged_step = global_step.value() + + last_step_time = time.time() + for _ in range(global_step.value(), train_steps, + num_steps_per_iteration): + + loss = _dist_train_step(train_input_iter) + + time_taken = time.time() - last_step_time + last_step_time = time.time() + + tf.compat.v2.summary.scalar( + 'steps_per_sec', num_steps_per_iteration * 1.0 / time_taken, + step=global_step) + + if global_step.value() - logged_step >= 100: + tf.logging.info( + 'Step {} per-step time {:.3f}s loss={:.3f}'.format( + global_step.value(), time_taken / num_steps_per_iteration, + loss)) + logged_step = global_step.value() + + if ((int(global_step.value()) - checkpointed_step) >= + checkpoint_every_n): + manager.save() + checkpointed_step = int(global_step.value()) + + # Remove the checkpoint directories of the non-chief workers that + # MultiWorkerMirroredStrategy forces us to save during sync distributed + # training. + clean_temporary_directories(strategy, manager_dir) + clean_temporary_directories(strategy, summary_writer_filepath) + + +def eager_eval_loop( + detection_model, + configs, + eval_dataset, + use_tpu=False, + postprocess_on_cpu=False, + global_step=None): + """Evaluate the model eagerly on the evaluation dataset. + + This method will compute the evaluation metrics specified in the configs on + the entire evaluation dataset, then return the metrics. It will also log + the metrics to TensorBoard. + + Args: + detection_model: A DetectionModel (based on Keras) to evaluate. + configs: Object detection configs that specify the evaluators that should + be used, as well as whether regularization loss should be included and + if bfloat16 should be used on TPUs. + eval_dataset: Dataset containing evaluation data. + use_tpu: Whether a TPU is being used to execute the model for evaluation. + postprocess_on_cpu: Whether model postprocessing should happen on + the CPU when using a TPU to execute the model. + global_step: A variable containing the training step this model was trained + to. Used for logging purposes. + + Returns: + A dict of evaluation metrics representing the results of this evaluation. + """ + train_config = configs['train_config'] + eval_input_config = configs['eval_input_config'] + eval_config = configs['eval_config'] + add_regularization_loss = train_config.add_regularization_loss + + is_training = False + detection_model._is_training = is_training # pylint: disable=protected-access + tf.keras.backend.set_learning_phase(is_training) + + evaluator_options = eval_util.evaluator_options_from_eval_config( + eval_config) + batch_size = eval_config.batch_size + + class_agnostic_category_index = ( + label_map_util.create_class_agnostic_category_index()) + class_agnostic_evaluators = eval_util.get_evaluators( + eval_config, + list(class_agnostic_category_index.values()), + evaluator_options) + + class_aware_evaluators = None + if eval_input_config.label_map_path: + class_aware_category_index = ( + label_map_util.create_category_index_from_labelmap( + eval_input_config.label_map_path)) + class_aware_evaluators = eval_util.get_evaluators( + eval_config, + list(class_aware_category_index.values()), + evaluator_options) + + evaluators = None + loss_metrics = {} + + @tf.function + def compute_eval_dict(features, labels): + """Compute the evaluation result on an image.""" + # For evaling on train data, it is necessary to check whether groundtruth + # must be unpadded. + boxes_shape = ( + labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list()) + unpad_groundtruth_tensors = (boxes_shape[1] is not None + and not use_tpu + and batch_size == 1) + labels = model_lib.unstack_batch( + labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) + + losses_dict, prediction_dict = _compute_losses_and_predictions_dicts( + detection_model, features, labels, add_regularization_loss) + + def postprocess_wrapper(args): + return detection_model.postprocess(args[0], args[1]) + + # TODO(kaftan): Depending on how postprocessing will work for TPUS w/ + ## TPUStrategy, may be good to move wrapping to a utility method + if use_tpu and postprocess_on_cpu: + detections = contrib_tpu.outside_compilation( + postprocess_wrapper, + (prediction_dict, features[fields.InputDataFields.true_image_shape])) + else: + detections = postprocess_wrapper( + (prediction_dict, features[fields.InputDataFields.true_image_shape])) + + class_agnostic = ( + fields.DetectionResultFields.detection_classes not in detections) + # TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util + ## and call this from there. + groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access + detection_model, class_agnostic, eval_input_config.max_number_of_boxes) + use_original_images = fields.InputDataFields.original_image in features + if use_original_images: + eval_images = features[fields.InputDataFields.original_image] + true_image_shapes = tf.slice( + features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3]) + original_image_spatial_shapes = features[ + fields.InputDataFields.original_image_spatial_shape] + else: + eval_images = features[fields.InputDataFields.image] + true_image_shapes = None + original_image_spatial_shapes = None + + eval_dict = eval_util.result_dict_for_batched_example( + eval_images, + features[inputs.HASH_KEY], + detections, + groundtruth, + class_agnostic=class_agnostic, + scale_to_absolute=True, + original_image_spatial_shapes=original_image_spatial_shapes, + true_image_shapes=true_image_shapes) + + return eval_dict, losses_dict, class_agnostic + + agnostic_categories = label_map_util.create_class_agnostic_category_index() + per_class_categories = label_map_util.create_category_index_from_labelmap( + eval_input_config.label_map_path) + keypoint_edges = [ + (kp.start, kp.end) for kp in eval_config.keypoint_edge] + + for i, (features, labels) in enumerate(eval_dataset): + eval_dict, losses_dict, class_agnostic = compute_eval_dict(features, labels) + + if class_agnostic: + category_index = agnostic_categories + else: + category_index = per_class_categories + + if i % 100 == 0: + tf.logging.info('Finished eval step %d', i) + + use_original_images = fields.InputDataFields.original_image in features + if (use_original_images and i < eval_config.num_visualizations + and batch_size == 1): + sbys_image_list = vutils.draw_side_by_side_evaluation_image( + eval_dict, + category_index=category_index, + max_boxes_to_draw=eval_config.max_num_boxes_to_visualize, + min_score_thresh=eval_config.min_score_threshold, + use_normalized_coordinates=False, + keypoint_edges=keypoint_edges or None) + sbys_images = tf.concat(sbys_image_list, axis=0) + tf.compat.v2.summary.image( + name='eval_side_by_side_' + str(i), + step=global_step, + data=sbys_images, + max_outputs=eval_config.num_visualizations) + if eval_util.has_densepose(eval_dict): + dp_image_list = vutils.draw_densepose_visualizations( + eval_dict) + dp_images = tf.concat(dp_image_list, axis=0) + tf.compat.v2.summary.image( + name='densepose_detections_' + str(i), + step=global_step, + data=dp_images, + max_outputs=eval_config.num_visualizations) + + if evaluators is None: + if class_agnostic: + evaluators = class_agnostic_evaluators + else: + evaluators = class_aware_evaluators + + for evaluator in evaluators: + evaluator.add_eval_dict(eval_dict) + + for loss_key, loss_tensor in iter(losses_dict.items()): + if loss_key not in loss_metrics: + loss_metrics[loss_key] = tf.keras.metrics.Mean() + # Skip the loss with value equal or lower than 0.0 when calculating the + # average loss since they don't usually reflect the normal loss values + # causing spurious average loss value. + if loss_tensor <= 0.0: + continue + loss_metrics[loss_key].update_state(loss_tensor) + + eval_metrics = {} + + for evaluator in evaluators: + eval_metrics.update(evaluator.evaluate()) + for loss_key in loss_metrics: + eval_metrics[loss_key] = loss_metrics[loss_key].result() + + eval_metrics = {str(k): v for k, v in eval_metrics.items()} + tf.logging.info('Eval metrics at step %d', global_step) + for k in eval_metrics: + tf.compat.v2.summary.scalar(k, eval_metrics[k], step=global_step) + tf.logging.info('\t+ %s: %f', k, eval_metrics[k]) + + return eval_metrics + + +def eval_continuously( + pipeline_config_path, + config_override=None, + train_steps=None, + sample_1_of_n_eval_examples=1, + sample_1_of_n_eval_on_train_examples=1, + use_tpu=False, + override_eval_num_epochs=True, + postprocess_on_cpu=False, + model_dir=None, + checkpoint_dir=None, + wait_interval=180, + timeout=3600, + eval_index=None, + **kwargs): + """Run continuous evaluation of a detection model eagerly. + + This method builds the model, and continously restores it from the most + recent training checkpoint in the checkpoint directory & evaluates it + on the evaluation data. + + Args: + pipeline_config_path: A path to a pipeline config file. + config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to + override the config from `pipeline_config_path`. + train_steps: Number of training steps. If None, the number of training steps + is set from the `TrainConfig` proto. + sample_1_of_n_eval_examples: Integer representing how often an eval example + should be sampled. If 1, will sample all examples. + sample_1_of_n_eval_on_train_examples: Similar to + `sample_1_of_n_eval_examples`, except controls the sampling of training + data for evaluation. + use_tpu: Boolean, whether training and evaluation should run on TPU. + override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for + eval_input. + postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true, + postprocess is scheduled on the host cpu. + model_dir: Directory to output resulting evaluation summaries to. + checkpoint_dir: Directory that contains the training checkpoints. + wait_interval: The mimmum number of seconds to wait before checking for a + new checkpoint. + timeout: The maximum number of seconds to wait for a checkpoint. Execution + will terminate if no new checkpoints are found after these many seconds. + eval_index: int, optional If give, only evaluate the dataset at the given + index. + + **kwargs: Additional keyword arguments for configuration override. + """ + get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ + 'get_configs_from_pipeline_file'] + merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ + 'merge_external_params_with_configs'] + + configs = get_configs_from_pipeline_file( + pipeline_config_path, config_override=config_override) + kwargs.update({ + 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples, + 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu + }) + if train_steps is not None: + kwargs['train_steps'] = train_steps + if override_eval_num_epochs: + kwargs.update({'eval_num_epochs': 1}) + tf.logging.warning( + 'Forced number of epochs for all eval validations to be 1.') + configs = merge_external_params_with_configs( + configs, None, kwargs_dict=kwargs) + model_config = configs['model'] + train_input_config = configs['train_input_config'] + eval_config = configs['eval_config'] + eval_input_configs = configs['eval_input_configs'] + eval_on_train_input_config = copy.deepcopy(train_input_config) + eval_on_train_input_config.sample_1_of_n_examples = ( + sample_1_of_n_eval_on_train_examples) + if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1: + tf.logging.warning('Expected number of evaluation epochs is 1, but ' + 'instead encountered `eval_on_train_input_config' + '.num_epochs` = ' + '{}. Overwriting `num_epochs` to 1.'.format( + eval_on_train_input_config.num_epochs)) + eval_on_train_input_config.num_epochs = 1 + + if kwargs['use_bfloat16']: + tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16') + + detection_model = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']( + model_config=model_config, is_training=True) + + # Create the inputs. + eval_inputs = [] + for eval_input_config in eval_input_configs: + next_eval_input = inputs.eval_input( + eval_config=eval_config, + eval_input_config=eval_input_config, + model_config=model_config, + model=detection_model) + eval_inputs.append((eval_input_config.name, next_eval_input)) + + if eval_index is not None: + eval_inputs = [eval_inputs[eval_index]] + + global_step = tf.compat.v2.Variable( + 0, trainable=False, dtype=tf.compat.v2.dtypes.int64) + + for latest_checkpoint in tf.train.checkpoints_iterator( + checkpoint_dir, timeout=timeout, min_interval_secs=wait_interval): + ckpt = tf.compat.v2.train.Checkpoint( + step=global_step, model=detection_model) + + ckpt.restore(latest_checkpoint).expect_partial() + + for eval_name, eval_input in eval_inputs: + summary_writer = tf.compat.v2.summary.create_file_writer( + os.path.join(model_dir, 'eval', eval_name)) + with summary_writer.as_default(): + eager_eval_loop( + detection_model, + configs, + eval_input, + use_tpu=use_tpu, + postprocess_on_cpu=postprocess_on_cpu, + global_step=global_step) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_main.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_main.py new file mode 100644 index 0000000000000000000000000000000000000000..e8afb555e1e4981cfc8be376b5863d4178cf088b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_main.py @@ -0,0 +1,108 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Binary to run train and evaluation on object detection model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags + +import tensorflow.compat.v1 as tf + +from object_detection import model_lib + +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job. Note ' + 'that one call only use this in eval-only mode, and ' + '`checkpoint_dir` must be supplied.') +flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' + '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' + 'writing resulting metrics to `model_dir`.') +flags.DEFINE_boolean( + 'run_once', False, 'If running in eval-only mode, whether to run just ' + 'one round of eval vs running continuously (default).' +) +flags.DEFINE_integer( + 'max_eval_retries', 0, 'If running continuous eval, the maximum number of ' + 'retries upon encountering tf.errors.InvalidArgumentError. If negative, ' + 'will always retry the evaluation.' +) +FLAGS = flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir) + + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config=config, + pipeline_config_path=FLAGS.pipeline_config_path, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples)) + estimator = train_and_eval_dict['estimator'] + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] + predict_input_fn = train_and_eval_dict['predict_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + if FLAGS.checkpoint_dir: + if FLAGS.eval_training_data: + name = 'training_data' + input_fn = eval_on_train_input_fn + else: + name = 'validation_data' + # The first eval input will be evaluated. + input_fn = eval_input_fns[0] + if FLAGS.run_once: + estimator.evaluate(input_fn, + steps=None, + checkpoint_path=tf.train.latest_checkpoint( + FLAGS.checkpoint_dir)) + else: + model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn, + train_steps, name, FLAGS.max_eval_retries) + else: + train_spec, eval_specs = model_lib.create_train_and_eval_specs( + train_input_fn, + eval_input_fns, + eval_on_train_input_fn, + predict_input_fn, + train_steps, + eval_on_train_data=False) + + # Currently only a single Eval Spec is allowed. + tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0]) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_main_tf2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_main_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf053039ec16461fef0c1eb2f94df66fad2b70c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_main_tf2.py @@ -0,0 +1,113 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Creates and runs TF2 object detection models. + +For local training/evaluation run: +PIPELINE_CONFIG_PATH=path/to/pipeline.config +MODEL_DIR=/tmp/model_outputs +NUM_TRAIN_STEPS=10000 +SAMPLE_1_OF_N_EVAL_EXAMPLES=1 +python model_main_tf2.py -- \ + --model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \ + --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ + --pipeline_config_path=$PIPELINE_CONFIG_PATH \ + --alsologtostderr +""" +from absl import flags +import tensorflow.compat.v2 as tf +from object_detection import model_lib_v2 + +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train ' + 'data (only supported in distributed training).') +flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string( + 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' + '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' + 'writing resulting metrics to `model_dir`.') + +flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an' + 'evaluation checkpoint before exiting.') + +flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.') +flags.DEFINE_string( + 'tpu_name', + default=None, + help='Name of the Cloud TPU for Cluster Resolvers.') +flags.DEFINE_integer( + 'num_workers', 1, 'When num_workers > 1, training uses ' + 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses ' + 'MirroredStrategy.') +flags.DEFINE_integer( + 'checkpoint_every_n', 1000, 'Integer defining how often we checkpoint.') +flags.DEFINE_boolean('record_summaries', True, + ('Whether or not to record summaries during' + ' training.')) + +FLAGS = flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + tf.config.set_soft_device_placement(True) + + if FLAGS.checkpoint_dir: + model_lib_v2.eval_continuously( + pipeline_config_path=FLAGS.pipeline_config_path, + model_dir=FLAGS.model_dir, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples), + checkpoint_dir=FLAGS.checkpoint_dir, + wait_interval=300, timeout=FLAGS.eval_timeout) + else: + if FLAGS.use_tpu: + # TPU is automatically inferred if tpu_name is None and + # we are running under cloud ai-platform. + resolver = tf.distribute.cluster_resolver.TPUClusterResolver( + FLAGS.tpu_name) + tf.config.experimental_connect_to_cluster(resolver) + tf.tpu.experimental.initialize_tpu_system(resolver) + strategy = tf.distribute.experimental.TPUStrategy(resolver) + elif FLAGS.num_workers > 1: + strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() + else: + strategy = tf.compat.v2.distribute.MirroredStrategy() + + with strategy.scope(): + model_lib_v2.train_loop( + pipeline_config_path=FLAGS.pipeline_config_path, + model_dir=FLAGS.model_dir, + train_steps=FLAGS.num_train_steps, + use_tpu=FLAGS.use_tpu, + checkpoint_every_n=FLAGS.checkpoint_every_n, + record_summaries=FLAGS.record_summaries) + +if __name__ == '__main__': + tf.compat.v1.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_tpu_main.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_tpu_main.py new file mode 100644 index 0000000000000000000000000000000000000000..d72cc01c6ef5e18d99c74f417c2a428e220c019e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/model_tpu_main.py @@ -0,0 +1,139 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Creates and runs `Estimator` for object detection model on TPUs. + +This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes. +""" +# pylint: enable=line-too-long + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +import tensorflow.compat.v1 as tf + + +from object_detection import model_lib + +tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs') + +# Cloud TPU Cluster Resolvers +flags.DEFINE_string( + 'gcp_project', + default=None, + help='Project name for the Cloud TPU-enabled project. If not specified, we ' + 'will attempt to automatically detect the GCE project from metadata.') +flags.DEFINE_string( + 'tpu_zone', + default=None, + help='GCE zone where the Cloud TPU is located in. If not specified, we ' + 'will attempt to automatically detect the GCE project from metadata.') +flags.DEFINE_string( + 'tpu_name', + default=None, + help='Name of the Cloud TPU for Cluster Resolvers.') + +flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).') +flags.DEFINE_integer('iterations_per_loop', 100, + 'Number of iterations per TPU training loop.') +# For mode=train_and_eval, evaluation occurs after training is finished. +# Note: independently of steps_per_checkpoint, estimator will save the most +# recent checkpoint every 10 minutes by default for train_and_eval +flags.DEFINE_string('mode', 'train', + 'Mode to run: train, eval') +flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If ' + 'this is not provided, batch size is read from training ' + 'config.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_boolean('eval_training_data', False, + 'If training data should be evaluated for this job.') +flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer( + 'max_eval_retries', 0, 'If running continuous eval, the maximum number of ' + 'retries upon encountering tf.errors.InvalidArgumentError. If negative, ' + 'will always retry the evaluation.' +) + +FLAGS = tf.flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + + tpu_cluster_resolver = ( + tf.distribute.cluster_resolver.TPUClusterResolver( + tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)) + tpu_grpc_url = tpu_cluster_resolver.get_master() + + config = tf.estimator.tpu.RunConfig( + master=tpu_grpc_url, + evaluation_master=tpu_grpc_url, + model_dir=FLAGS.model_dir, + tpu_config=tf.estimator.tpu.TPUConfig( + iterations_per_loop=FLAGS.iterations_per_loop, + num_shards=FLAGS.num_shards)) + + kwargs = {} + if FLAGS.train_batch_size: + kwargs['batch_size'] = FLAGS.train_batch_size + + train_and_eval_dict = model_lib.create_estimator_and_inputs( + run_config=config, + pipeline_config_path=FLAGS.pipeline_config_path, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples), + use_tpu_estimator=True, + use_tpu=FLAGS.use_tpu, + num_shards=FLAGS.num_shards, + save_final_config=FLAGS.mode == 'train', + **kwargs) + estimator = train_and_eval_dict['estimator'] + train_input_fn = train_and_eval_dict['train_input_fn'] + eval_input_fns = train_and_eval_dict['eval_input_fns'] + eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] + train_steps = train_and_eval_dict['train_steps'] + + if FLAGS.mode == 'train': + estimator.train(input_fn=train_input_fn, max_steps=train_steps) + + # Continuously evaluating. + if FLAGS.mode == 'eval': + if FLAGS.eval_training_data: + name = 'training_data' + input_fn = eval_on_train_input_fn + else: + name = 'validation_data' + # Currently only a single eval input is allowed. + input_fn = eval_input_fns[0] + model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps, + name, FLAGS.max_eval_retries) + + +if __name__ == '__main__': + tf.app.run() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db5f748b9444058bbdadc6d2a3c6ab84e7e38cad Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1931ff0185cf409122779fc3100dbe6b7329ff0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/embedded_ssd_mobilenet_v1_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/embedded_ssd_mobilenet_v1_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cd29ae0ef9969d93e40765f5bac0ca095c4b86e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/embedded_ssd_mobilenet_v1_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_inception_resnet_v2_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_inception_resnet_v2_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84d6065f2eaa202a9d650fe0cf93f09627c5a075 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_inception_resnet_v2_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_inception_v2_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_inception_v2_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b66d4f9ddd1c5ea7b49c576af06a53e48043c4b4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_inception_v2_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_nas_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_nas_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1031be3e516f47955acf3688b71c923dd93aec36 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_nas_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_pnas_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_pnas_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b68dbd9b7ab9f6d793bd1fcbf46e3a81c52f05d8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_pnas_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_resnet_v1_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_resnet_v1_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81b1431a988d653d20c031e172aab92857b95fa8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/faster_rcnn_resnet_v1_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/feature_map_generators.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/feature_map_generators.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c831ffbf5dd305d2911bd53a8078be5833d895fe Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/feature_map_generators.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_inception_v2_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_inception_v2_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1717ebacd90380a88ee8649c044d9554b52da7aa Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_inception_v2_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_inception_v3_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_inception_v3_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5243a18094c91f31547af456ae4a629d5eabc428 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_inception_v3_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobiledet_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobiledet_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfedf7671164287f23e7043ef5bc22fc2a0067da Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobiledet_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_edgetpu_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_edgetpu_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c0df46375135243ab620e8c58f6812d700b31fc Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_edgetpu_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa18a456fecdeac3e4e9c2ffe548c7d78ece1134 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_fpn_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_fpn_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b507317232b62aab2732795c6ce851ac48d9e38 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_fpn_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_ppn_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_ppn_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b013b41490fb46477b01d01b4b51faee3de156d7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v1_ppn_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65b3e16fb4d1bb467d5d861da6945960a4fec6fd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_fpn_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_fpn_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9394e43b824ae801ff96f158ad72c6d50d78304c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_fpn_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_mnasfpn_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_mnasfpn_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf94e015144e28d658259afd1a921a44e80252be Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v2_mnasfpn_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v3_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v3_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efe1351118e5c22b3171a257599f83683e870bf1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_mobilenet_v3_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_pnasnet_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_pnasnet_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ef0421218dfce243e9379e50fdd50884b051b9c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_pnasnet_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_resnet_v1_fpn_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_resnet_v1_fpn_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..517f46857b410b5176a0b64958fa3790e313ad32 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_resnet_v1_fpn_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_resnet_v1_ppn_feature_extractor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_resnet_v1_ppn_feature_extractor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceaf8ebec03863948ddd35ccbefc76ffbd3df467 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/__pycache__/ssd_resnet_v1_ppn_feature_extractor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/bidirectional_feature_pyramid_generators.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/bidirectional_feature_pyramid_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..b53dc60ef6465c408900800216cbe066e6d18259 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/bidirectional_feature_pyramid_generators.py @@ -0,0 +1,486 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions to generate bidirectional feature pyramids based on image features. + +Provides bidirectional feature pyramid network (BiFPN) generators that can be +used to build object detection feature extractors, as proposed by Tan et al. +See https://arxiv.org/abs/1911.09070 for more details. +""" +import collections +import functools +from six.moves import range +from six.moves import zip +import tensorflow as tf + +from object_detection.utils import bifpn_utils + + +def _create_bifpn_input_config(fpn_min_level, + fpn_max_level, + input_max_level, + level_scales=None): + """Creates a BiFPN input config for the input levels from a backbone network. + + Args: + fpn_min_level: the minimum pyramid level (highest feature map resolution) to + use in the BiFPN. + fpn_max_level: the maximum pyramid level (lowest feature map resolution) to + use in the BiFPN. + input_max_level: the maximum pyramid level that will be provided as input to + the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels + from input_max_level, up to the desired fpn_max_level. + level_scales: a list of pyramid level scale factors. If 'None', each level's + scale is set to 2^level by default, which corresponds to each successive + feature map scaling by a factor of 2. + + Returns: + A list of dictionaries for each feature map expected as input to the BiFPN, + where each has entries for the feature map 'name' and 'scale'. + """ + if not level_scales: + level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)] + + bifpn_input_params = [] + for i in range(fpn_min_level, min(fpn_max_level, input_max_level) + 1): + bifpn_input_params.append({ + 'name': '0_up_lvl_{}'.format(i), + 'scale': level_scales[i - fpn_min_level] + }) + + return bifpn_input_params + + +def _get_bifpn_output_node_names(fpn_min_level, fpn_max_level, node_config): + """Returns a list of BiFPN output node names, given a BiFPN node config. + + Args: + fpn_min_level: the minimum pyramid level (highest feature map resolution) + used by the BiFPN. + fpn_max_level: the maximum pyramid level (lowest feature map resolution) + used by the BiFPN. + node_config: the BiFPN node_config, a list of dictionaries corresponding to + each node in the BiFPN computation graph, where each entry should have an + associated 'name'. + + Returns: + A list of strings corresponding to the names of the output BiFPN nodes. + """ + num_output_nodes = fpn_max_level - fpn_min_level + 1 + return [node['name'] for node in node_config[-num_output_nodes:]] + + +def _create_bifpn_node_config(bifpn_num_iterations, + bifpn_num_filters, + fpn_min_level, + fpn_max_level, + input_max_level, + bifpn_node_params=None, + level_scales=None): + """Creates a config specifying a bidirectional feature pyramid network. + + Args: + bifpn_num_iterations: the number of top-down bottom-up feature computations + to repeat in the BiFPN. + bifpn_num_filters: the number of filters (channels) for every feature map + used in the BiFPN. + fpn_min_level: the minimum pyramid level (highest feature map resolution) to + use in the BiFPN. + fpn_max_level: the maximum pyramid level (lowest feature map resolution) to + use in the BiFPN. + input_max_level: the maximum pyramid level that will be provided as input to + the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels + from input_max_level, up to the desired fpn_max_level. + bifpn_node_params: If not 'None', a dictionary of additional default BiFPN + node parameters that will be applied to all BiFPN nodes. + level_scales: a list of pyramid level scale factors. If 'None', each level's + scale is set to 2^level by default, which corresponds to each successive + feature map scaling by a factor of 2. + + Returns: + A list of dictionaries used to define nodes in the BiFPN computation graph, + as proposed by EfficientDet, Tan et al (https://arxiv.org/abs/1911.09070). + Each node's entry has the corresponding keys: + name: String. The name of this node in the BiFPN. The node name follows + the format '{bifpn_iteration}_{dn|up}_lvl_{pyramid_level}', where 'dn' + or 'up' refers to whether the node is in the top-down or bottom-up + portion of a single BiFPN iteration. + scale: the scale factor for this node, by default 2^level. + inputs: A list of names of nodes which are inputs to this node. + num_channels: The number of channels for this node. + combine_method: String. Name of the method used to combine input + node feature maps, 'fast_attention' by default for nodes which have more + than one input. Otherwise, 'None' for nodes with only one input node. + input_op: A (partial) function which is called to construct the layers + that will be applied to this BiFPN node's inputs. This function is + called with the arguments: + input_op(name, input_scale, input_num_channels, output_scale, + output_num_channels, conv_hyperparams, is_training, + freeze_batchnorm) + post_combine_op: A (partial) function which is called to construct the + layers that will be applied to the result of the combine operation for + this BiFPN node. This function will be called with the arguments: + post_combine_op(name, conv_hyperparams, is_training, freeze_batchnorm) + If 'None', then no layers will be applied after the combine operation + for this node. + """ + if not level_scales: + level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)] + + default_node_params = { + 'num_channels': + bifpn_num_filters, + 'combine_method': + 'fast_attention', + 'input_op': + functools.partial( + _create_bifpn_resample_block, downsample_method='max_pooling'), + 'post_combine_op': + functools.partial( + bifpn_utils.create_conv_block, + num_filters=bifpn_num_filters, + kernel_size=3, + strides=1, + padding='SAME', + use_separable=True, + apply_batchnorm=True, + apply_activation=True, + conv_bn_act_pattern=False), + } + if bifpn_node_params: + default_node_params.update(bifpn_node_params) + + bifpn_node_params = [] + # Create additional base pyramid levels not provided as input to the BiFPN. + # Note, combine_method and post_combine_op are set to None for additional + # base pyramid levels because they do not combine multiple input BiFPN nodes. + for i in range(input_max_level + 1, fpn_max_level + 1): + node_params = dict(default_node_params) + node_params.update({ + 'name': '0_up_lvl_{}'.format(i), + 'scale': level_scales[i - fpn_min_level], + 'inputs': ['0_up_lvl_{}'.format(i - 1)], + 'combine_method': None, + 'post_combine_op': None, + }) + bifpn_node_params.append(node_params) + + for i in range(bifpn_num_iterations): + # The first bottom-up feature pyramid (which includes the input pyramid + # levels from the backbone network and the additional base pyramid levels) + # is indexed at 0. So, the first top-down bottom-up pass of the BiFPN is + # indexed from 1, and repeated for bifpn_num_iterations iterations. + bifpn_i = i + 1 + + # Create top-down nodes. + for level_i in reversed(range(fpn_min_level, fpn_max_level)): + inputs = [] + # BiFPN nodes in the top-down pass receive input from the corresponding + # level from the previous BiFPN iteration's bottom-up pass, except for the + # bottom-most (min) level node, which is computed once in the initial + # bottom-up pass, and is afterwards only computed in each top-down pass. + if level_i > fpn_min_level or bifpn_i == 1: + inputs.append('{}_up_lvl_{}'.format(bifpn_i - 1, level_i)) + else: + inputs.append('{}_dn_lvl_{}'.format(bifpn_i - 1, level_i)) + inputs.append(bifpn_node_params[-1]['name']) + node_params = dict(default_node_params) + node_params.update({ + 'name': '{}_dn_lvl_{}'.format(bifpn_i, level_i), + 'scale': level_scales[level_i - fpn_min_level], + 'inputs': inputs + }) + bifpn_node_params.append(node_params) + + # Create bottom-up nodes. + for level_i in range(fpn_min_level + 1, fpn_max_level + 1): + # BiFPN nodes in the bottom-up pass receive input from the corresponding + # level from the preceding top-down pass, except for the top (max) level + # which does not have a corresponding node in the top-down pass. + inputs = ['{}_up_lvl_{}'.format(bifpn_i - 1, level_i)] + if level_i < fpn_max_level: + inputs.append('{}_dn_lvl_{}'.format(bifpn_i, level_i)) + inputs.append(bifpn_node_params[-1]['name']) + node_params = dict(default_node_params) + node_params.update({ + 'name': '{}_up_lvl_{}'.format(bifpn_i, level_i), + 'scale': level_scales[level_i - fpn_min_level], + 'inputs': inputs + }) + bifpn_node_params.append(node_params) + + return bifpn_node_params + + +def _create_bifpn_resample_block(name, + input_scale, + input_num_channels, + output_scale, + output_num_channels, + conv_hyperparams, + is_training, + freeze_batchnorm, + downsample_method=None, + use_native_resize_op=False, + maybe_apply_1x1_conv=True, + apply_1x1_pre_sampling=True, + apply_1x1_post_sampling=False): + """Creates resample block layers for input feature maps to BiFPN nodes. + + Args: + name: String. Name used for this block of layers. + input_scale: Scale factor of the input feature map. + input_num_channels: Number of channels in the input feature map. + output_scale: Scale factor of the output feature map. + output_num_channels: Number of channels in the output feature map. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + is_training: Indicates whether the feature generator is in training mode. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + downsample_method: String. Method to use when downsampling feature maps. + use_native_resize_op: Bool. Whether to use the native resize up when + upsampling feature maps. + maybe_apply_1x1_conv: Bool. If 'True', a 1x1 convolution will only be + applied if the input_num_channels differs from the output_num_channels. + apply_1x1_pre_sampling: Bool. Whether a 1x1 convolution will be applied to + the input feature map before the up/down-sampling operation. + apply_1x1_post_sampling: Bool. Whether a 1x1 convolution will be applied to + the input feature map after the up/down-sampling operation. + + Returns: + A list of layers which may be applied to the input feature maps in order to + compute feature maps with the specified scale and number of channels. + """ + # By default, 1x1 convolutions are only applied before sampling when the + # number of input and output channels differ. + if maybe_apply_1x1_conv and output_num_channels == input_num_channels: + apply_1x1_pre_sampling = False + apply_1x1_post_sampling = False + + apply_bn_for_resampling = True + layers = [] + if apply_1x1_pre_sampling: + layers.extend( + bifpn_utils.create_conv_block( + name=name + '1x1_pre_sample/', + num_filters=output_num_channels, + kernel_size=1, + strides=1, + padding='SAME', + use_separable=False, + apply_batchnorm=apply_bn_for_resampling, + apply_activation=False, + conv_hyperparams=conv_hyperparams, + is_training=is_training, + freeze_batchnorm=freeze_batchnorm)) + + layers.extend( + bifpn_utils.create_resample_feature_map_ops(input_scale, output_scale, + downsample_method, + use_native_resize_op, + conv_hyperparams, is_training, + freeze_batchnorm, name)) + + if apply_1x1_post_sampling: + layers.extend( + bifpn_utils.create_conv_block( + name=name + '1x1_post_sample/', + num_filters=output_num_channels, + kernel_size=1, + strides=1, + padding='SAME', + use_separable=False, + apply_batchnorm=apply_bn_for_resampling, + apply_activation=False, + conv_hyperparams=conv_hyperparams, + is_training=is_training, + freeze_batchnorm=freeze_batchnorm)) + + return layers + + +def _create_bifpn_combine_op(num_inputs, name, combine_method): + """Creates a BiFPN output config, a list of the output BiFPN node names. + + Args: + num_inputs: The number of inputs to this combine operation. + name: String. The name of this combine operation. + combine_method: String. The method used to combine input feature maps. + + Returns: + A function which may be called with a list of num_inputs feature maps + and which will return a single feature map. + """ + + combine_op = None + if num_inputs < 1: + raise ValueError('Expected at least 1 input for BiFPN combine.') + elif num_inputs == 1: + combine_op = lambda x: x[0] + else: + combine_op = bifpn_utils.BiFPNCombineLayer( + combine_method=combine_method, name=name) + return combine_op + + +class KerasBiFpnFeatureMaps(tf.keras.Model): + """Generates Keras based BiFPN feature maps from an input feature map pyramid. + + A Keras model that generates multi-scale feature maps for detection by + iteratively computing top-down and bottom-up feature pyramids, as in the + EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070 for details. + """ + + def __init__(self, + bifpn_num_iterations, + bifpn_num_filters, + fpn_min_level, + fpn_max_level, + input_max_level, + is_training, + conv_hyperparams, + freeze_batchnorm, + bifpn_node_params=None, + name=None): + """Constructor. + + Args: + bifpn_num_iterations: The number of top-down bottom-up iterations. + bifpn_num_filters: The number of filters (channels) to be used for all + feature maps in this BiFPN. + fpn_min_level: The minimum pyramid level (highest feature map resolution) + to use in the BiFPN. + fpn_max_level: The maximum pyramid level (lowest feature map resolution) + to use in the BiFPN. + input_max_level: The maximum pyramid level that will be provided as input + to the BiFPN. Accordingly, the BiFPN will compute any additional pyramid + levels from input_max_level up to the desired fpn_max_level, with each + successivel level downsampling by a scale factor of 2 by default. + is_training: Indicates whether the feature generator is in training mode. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + bifpn_node_params: An optional dictionary that may be used to specify + default parameters for BiFPN nodes, without the need to provide a custom + bifpn_node_config. For example, if '{ combine_method: 'sum' }', then all + BiFPN nodes will combine input feature maps by summation, rather than + by the default fast attention method. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(KerasBiFpnFeatureMaps, self).__init__(name=name) + bifpn_node_config = _create_bifpn_node_config( + bifpn_num_iterations, bifpn_num_filters, fpn_min_level, fpn_max_level, + input_max_level, bifpn_node_params) + bifpn_input_config = _create_bifpn_input_config( + fpn_min_level, fpn_max_level, input_max_level) + bifpn_output_node_names = _get_bifpn_output_node_names( + fpn_min_level, fpn_max_level, bifpn_node_config) + + self.bifpn_node_config = bifpn_node_config + self.bifpn_output_node_names = bifpn_output_node_names + self.node_input_blocks = [] + self.node_combine_op = [] + self.node_post_combine_block = [] + + all_node_params = bifpn_input_config + all_node_names = [node['name'] for node in all_node_params] + for node_config in bifpn_node_config: + # Maybe transform and/or resample input feature maps. + input_blocks = [] + for input_name in node_config['inputs']: + if input_name not in all_node_names: + raise ValueError( + 'Input feature map ({}) does not exist:'.format(input_name)) + input_index = all_node_names.index(input_name) + input_params = all_node_params[input_index] + input_block = node_config['input_op']( + name='{}/input_{}/'.format(node_config['name'], input_name), + input_scale=input_params['scale'], + input_num_channels=input_params.get('num_channels', None), + output_scale=node_config['scale'], + output_num_channels=node_config['num_channels'], + conv_hyperparams=conv_hyperparams, + is_training=is_training, + freeze_batchnorm=freeze_batchnorm) + input_blocks.append((input_index, input_block)) + + # Combine input feature maps. + combine_op = _create_bifpn_combine_op( + num_inputs=len(input_blocks), + name=(node_config['name'] + '/combine'), + combine_method=node_config['combine_method']) + + # Post-combine layers. + post_combine_block = [] + if node_config['post_combine_op']: + post_combine_block.extend(node_config['post_combine_op']( + name=node_config['name'] + '/post_combine/', + conv_hyperparams=conv_hyperparams, + is_training=is_training, + freeze_batchnorm=freeze_batchnorm)) + + self.node_input_blocks.append(input_blocks) + self.node_combine_op.append(combine_op) + self.node_post_combine_block.append(post_combine_block) + all_node_params.append(node_config) + all_node_names.append(node_config['name']) + + def call(self, feature_pyramid): + """Compute BiFPN feature maps from input feature pyramid. + + Executed when calling the `.__call__` method on input. + + Args: + feature_pyramid: list of tuples of (tensor_name, image_feature_tensor). + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + feature_maps = [el[1] for el in feature_pyramid] + output_feature_maps = [None for node in self.bifpn_output_node_names] + + for index, node in enumerate(self.bifpn_node_config): + node_scope = 'node_{:02d}'.format(index) + with tf.name_scope(node_scope): + # Apply layer blocks to this node's input feature maps. + input_block_results = [] + for input_index, input_block in self.node_input_blocks[index]: + block_result = feature_maps[input_index] + for layer in input_block: + block_result = layer(block_result) + input_block_results.append(block_result) + + # Combine the resulting feature maps. + node_result = self.node_combine_op[index](input_block_results) + + # Apply post-combine layer block if applicable. + for layer in self.node_post_combine_block[index]: + node_result = layer(node_result) + + feature_maps.append(node_result) + + if node['name'] in self.bifpn_output_node_names: + index = self.bifpn_output_node_names.index(node['name']) + output_feature_maps[index] = node_result + + return collections.OrderedDict( + zip(self.bifpn_output_node_names, output_feature_maps)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/bidirectional_feature_pyramid_generators_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/bidirectional_feature_pyramid_generators_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc815cc446add205a5b307cd56cf81ee60a1041 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/bidirectional_feature_pyramid_generators_tf2_test.py @@ -0,0 +1,167 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for bidirectional feature pyramid generators.""" +import unittest +from absl.testing import parameterized + +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + + +@parameterized.parameters({'bifpn_num_iterations': 2}, + {'bifpn_num_iterations': 8}) +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class BiFPNFeaturePyramidGeneratorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + force_use_bias: true + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_get_expected_feature_map_shapes(self, bifpn_num_iterations): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)) + ] + bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps( + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=128, + fpn_min_level=3, + fpn_max_level=7, + input_max_level=5, + is_training=True, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False) + def graph_fn(): + feature_maps = bifpn_generator(image_features) + return feature_maps + + expected_feature_map_shapes = { + '{}_dn_lvl_3'.format(bifpn_num_iterations): (4, 16, 16, 128), + '{}_up_lvl_4'.format(bifpn_num_iterations): (4, 8, 8, 128), + '{}_up_lvl_5'.format(bifpn_num_iterations): (4, 4, 4, 128), + '{}_up_lvl_6'.format(bifpn_num_iterations): (4, 2, 2, 128), + '{}_up_lvl_7'.format(bifpn_num_iterations): (4, 1, 1, 128)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_variable_names(self, bifpn_num_iterations): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)) + ] + bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps( + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=128, + fpn_min_level=3, + fpn_max_level=7, + input_max_level=5, + is_training=True, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + name='bifpn') + def graph_fn(): + return bifpn_generator(image_features) + + self.execute(graph_fn, [], g) + expected_variables = [ + 'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/bias', + 'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/kernel', + 'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias', + 'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel', + 'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias', + 'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel', + 'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/bias', + 'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/kernel', + 'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias', + 'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel', + 'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias', + 'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel'] + expected_node_variable_patterns = [ + ['bifpn/node_{:02}/{}_dn_lvl_6/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_dn_lvl_5/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_dn_lvl_4/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_dn_lvl_3/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_up_lvl_4/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_up_lvl_5/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_up_lvl_6/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/pointwise_kernel'], + ['bifpn/node_{:02}/{}_up_lvl_7/combine/bifpn_combine_weights', + 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/bias', + 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/depthwise_kernel', + 'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/pointwise_kernel']] + + node_i = 2 + for iter_i in range(1, bifpn_num_iterations+1): + for node_variable_patterns in expected_node_variable_patterns: + for pattern in node_variable_patterns: + expected_variables.append(pattern.format(node_i, iter_i)) + node_i += 1 + + expected_variables = set(expected_variables) + actual_variable_set = set( + [var.name.split(':')[0] for var in bifpn_generator.variables]) + self.assertSetEqual(expected_variables, actual_variable_set) + +# TODO(aom): Tests for create_bifpn_combine_op. + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_hourglass_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_hourglass_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf84abf6a13566d43d102698d6546a11d99180e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_hourglass_feature_extractor.py @@ -0,0 +1,82 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hourglass[1] feature extractor for CenterNet[2] meta architecture. + +[1]: https://arxiv.org/abs/1603.06937 +[2]: https://arxiv.org/abs/1904.07850 +""" + +from object_detection.meta_architectures import center_net_meta_arch +from object_detection.models.keras_models import hourglass_network + + +class CenterNetHourglassFeatureExtractor( + center_net_meta_arch.CenterNetFeatureExtractor): + """The hourglass feature extractor for CenterNet. + + This class is a thin wrapper around the HourglassFeatureExtractor class + along with some preprocessing methods inherited from the base class. + """ + + def __init__(self, hourglass_net, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Intializes the feature extractor. + + Args: + hourglass_net: The underlying hourglass network to use. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + """ + + super(CenterNetHourglassFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + self._network = hourglass_net + + def call(self, inputs): + return self._network(inputs) + + @property + def out_stride(self): + """The stride in the output image of the network.""" + return 4 + + @property + def num_feature_outputs(self): + """Ther number of feature outputs returned by the feature extractor.""" + return self._network.num_hourglasses + + @property + def supported_sub_model_types(self): + return ['detection'] + + def get_sub_model(self, sub_model_type): + if sub_model_type == 'detection': + return self._network + else: + ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) + + +def hourglass_104(channel_means, channel_stds, bgr_ordering): + """The Hourglass-104 backbone for CenterNet.""" + + network = hourglass_network.hourglass_104() + return CenterNetHourglassFeatureExtractor( + network, channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..31c26c5ab9efddc99518e92a2320ed409d737ea3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing hourglass feature extractor for CenterNet.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_hourglass_feature_extractor as hourglass +from object_detection.models.keras_models import hourglass_network +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetHourglassFeatureExtractorTest(test_case.TestCase): + + def test_center_net_hourglass_feature_extractor(self): + + net = hourglass_network.HourglassNetwork( + num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], + input_channel_dims=4, channel_dims_per_stage=[6, 8, 10, 12, 14], + num_hourglasses=2) + + model = hourglass.CenterNetHourglassFeatureExtractor(net) + def graph_fn(): + return model(tf.zeros((2, 64, 64, 3), dtype=np.float32)) + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs[0].shape, (2, 16, 16, 6)) + self.assertEqual(outputs[1].shape, (2, 16, 16, 6)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..09cf4ec7d1333e752df43997f93a7b57b66b9527 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_feature_extractor.py @@ -0,0 +1,122 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""MobileNet V2[1] feature extractor for CenterNet[2] meta architecture. + +[1]: https://arxiv.org/abs/1801.04381 +[2]: https://arxiv.org/abs/1904.07850 +""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import center_net_meta_arch +from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2 + + +class CenterNetMobileNetV2FeatureExtractor( + center_net_meta_arch.CenterNetFeatureExtractor): + """The MobileNet V2 feature extractor for CenterNet.""" + + def __init__(self, + mobilenet_v2_net, + channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), + bgr_ordering=False): + """Intializes the feature extractor. + + Args: + mobilenet_v2_net: The underlying mobilenet_v2 network to use. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + """ + + super(CenterNetMobileNetV2FeatureExtractor, self).__init__( + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + self._network = mobilenet_v2_net + + output = self._network(self._network.input) + + # MobileNet by itself transforms a 224x224x3 volume into a 7x7x1280, which + # leads to a stride of 32. We perform upsampling to get it to a target + # stride of 4. + for num_filters in [256, 128, 64]: + # 1. We use a simple convolution instead of a deformable convolution + conv = tf.keras.layers.Conv2D( + filters=num_filters, kernel_size=1, strides=1, padding='same') + output = conv(output) + output = tf.keras.layers.BatchNormalization()(output) + output = tf.keras.layers.ReLU()(output) + + # 2. We use the default initialization for the convolution layers + # instead of initializing it to do bilinear upsampling. + conv_transpose = tf.keras.layers.Conv2DTranspose( + filters=num_filters, kernel_size=3, strides=2, padding='same') + output = conv_transpose(output) + output = tf.keras.layers.BatchNormalization()(output) + output = tf.keras.layers.ReLU()(output) + + self._network = tf.keras.models.Model( + inputs=self._network.input, outputs=output) + + def preprocess(self, resized_inputs): + resized_inputs = super(CenterNetMobileNetV2FeatureExtractor, + self).preprocess(resized_inputs) + return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs) + + def load_feature_extractor_weights(self, path): + self._network.load_weights(path) + + def get_base_model(self): + return self._network + + def call(self, inputs): + return [self._network(inputs)] + + @property + def out_stride(self): + """The stride in the output image of the network.""" + return 4 + + @property + def num_feature_outputs(self): + """The number of feature outputs returned by the feature extractor.""" + return 1 + + @property + def supported_sub_model_types(self): + return ['detection'] + + def get_sub_model(self, sub_model_type): + if sub_model_type == 'detection': + return self._network + else: + ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) + + +def mobilenet_v2(channel_means, channel_stds, bgr_ordering): + """The MobileNetV2 backbone for CenterNet.""" + + # We set 'is_training' to True for now. + network = mobilenetv2.mobilenet_v2(True, include_top=False) + return CenterNetMobileNetV2FeatureExtractor( + network, + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5211701138d8e134bba7c2ff6b247cf19d156691 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py @@ -0,0 +1,46 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing mobilenet_v2 feature extractor for CenterNet.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_mobilenet_v2_feature_extractor +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMobileNetV2FeatureExtractorTest(test_case.TestCase): + + def test_center_net_mobilenet_v2_feature_extractor(self): + + net = mobilenet_v2.mobilenet_v2(True, include_top=False) + + model = center_net_mobilenet_v2_feature_extractor.CenterNetMobileNetV2FeatureExtractor( + net) + + def graph_fn(): + img = np.zeros((8, 224, 224, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs.shape, (8, 56, 56, 64)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..b6d4b6ebc2bbd7965f2545e6338e6271de8e4896 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py @@ -0,0 +1,142 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""MobileNet V2[1] + FPN[2] feature extractor for CenterNet[3] meta architecture. + +[1]: https://arxiv.org/abs/1801.04381 +[2]: https://arxiv.org/abs/1612.03144. +[3]: https://arxiv.org/abs/1904.07850 +""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import center_net_meta_arch +from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2 + + +_MOBILENET_V2_FPN_SKIP_LAYERS = [ + 'block_2_add', 'block_5_add', 'block_9_add', 'out_relu' +] + + +class CenterNetMobileNetV2FPNFeatureExtractor( + center_net_meta_arch.CenterNetFeatureExtractor): + """The MobileNet V2 with FPN skip layers feature extractor for CenterNet.""" + + def __init__(self, + mobilenet_v2_net, + channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), + bgr_ordering=False): + """Intializes the feature extractor. + + Args: + mobilenet_v2_net: The underlying mobilenet_v2 network to use. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + """ + + super(CenterNetMobileNetV2FPNFeatureExtractor, self).__init__( + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + self._network = mobilenet_v2_net + + output = self._network(self._network.input) + + # Add pyramid feature network on every layer that has stride 2. + skip_outputs = [ + self._network.get_layer(skip_layer_name).output + for skip_layer_name in _MOBILENET_V2_FPN_SKIP_LAYERS + ] + self._fpn_model = tf.keras.models.Model( + inputs=self._network.input, outputs=skip_outputs) + fpn_outputs = self._fpn_model(self._network.input) + + # Construct the top-down feature maps -- we start with an output of + # 7x7x1280, which we continually upsample, apply a residual on and merge. + # This results in a 56x56x24 output volume. + top_layer = fpn_outputs[-1] + residual_op = tf.keras.layers.Conv2D( + filters=64, kernel_size=1, strides=1, padding='same') + top_down = residual_op(top_layer) + + num_filters_list = [64, 32, 24] + for i, num_filters in enumerate(num_filters_list): + level_ind = len(num_filters_list) - 1 - i + # Upsample. + upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest') + top_down = upsample_op(top_down) + + # Residual (skip-connection) from bottom-up pathway. + residual_op = tf.keras.layers.Conv2D( + filters=num_filters, kernel_size=1, strides=1, padding='same') + residual = residual_op(fpn_outputs[level_ind]) + + # Merge. + top_down = top_down + residual + next_num_filters = num_filters_list[i + 1] if i + 1 <= 2 else 24 + conv = tf.keras.layers.Conv2D( + filters=next_num_filters, kernel_size=3, strides=1, padding='same') + top_down = conv(top_down) + top_down = tf.keras.layers.BatchNormalization()(top_down) + top_down = tf.keras.layers.ReLU()(top_down) + + output = top_down + + self._network = tf.keras.models.Model( + inputs=self._network.input, outputs=output) + + def preprocess(self, resized_inputs): + resized_inputs = super(CenterNetMobileNetV2FPNFeatureExtractor, + self).preprocess(resized_inputs) + return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs) + + def load_feature_extractor_weights(self, path): + self._network.load_weights(path) + + def get_base_model(self): + return self._network + + def call(self, inputs): + return [self._network(inputs)] + + @property + def out_stride(self): + """The stride in the output image of the network.""" + return 4 + + @property + def num_feature_outputs(self): + """The number of feature outputs returned by the feature extractor.""" + return 1 + + def get_model(self): + return self._network + + +def mobilenet_v2_fpn(channel_means, channel_stds, bgr_ordering): + """The MobileNetV2+FPN backbone for CenterNet.""" + + # Set to is_training to True for now. + network = mobilenetv2.mobilenet_v2(True, include_top=False) + return CenterNetMobileNetV2FPNFeatureExtractor( + network, + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b570b9643a9a9e142fca22ecf7ea71259fdd2b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,46 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing mobilenet_v2+FPN feature extractor for CenterNet.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetMobileNetV2FPNFeatureExtractorTest(test_case.TestCase): + + def test_center_net_mobilenet_v2_fpn_feature_extractor(self): + + net = mobilenet_v2.mobilenet_v2(True, include_top=False) + + model = center_net_mobilenet_v2_fpn_feature_extractor.CenterNetMobileNetV2FPNFeatureExtractor( + net) + + def graph_fn(): + img = np.zeros((8, 224, 224, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs.shape, (8, 56, 56, 24)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..793aadcad2b55c83b6fdfab7dd75eb75afb1138a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_feature_extractor.py @@ -0,0 +1,157 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Resnetv2 based feature extractors for CenterNet[1] meta architecture. + +[1]: https://arxiv.org/abs/1904.07850 +""" + + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor + + +class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor): + """Resnet v2 base feature extractor for the CenterNet model.""" + + def __init__(self, resnet_type, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Initializes the feature extractor with a specific ResNet architecture. + + Args: + resnet_type: A string specifying which kind of ResNet to use. Currently + only `resnet_v2_50` and `resnet_v2_101` are supported. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + + """ + + super(CenterNetResnetFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + if resnet_type == 'resnet_v2_101': + self._base_model = tf.keras.applications.ResNet101V2(weights=None, + include_top=False) + output_layer = 'conv5_block3_out' + elif resnet_type == 'resnet_v2_50': + self._base_model = tf.keras.applications.ResNet50V2(weights=None, + include_top=False) + output_layer = 'conv5_block3_out' + else: + raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) + output_layer = self._base_model.get_layer(output_layer) + + self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, + outputs=output_layer.output) + resnet_output = self._resnet_model(self._base_model.input) + + for num_filters in [256, 128, 64]: + # TODO(vighneshb) This section has a few differences from the paper + # Figure out how much of a performance impact they have. + + # 1. We use a simple convolution instead of a deformable convolution + conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3, + strides=1, padding='same') + resnet_output = conv(resnet_output) + resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) + resnet_output = tf.keras.layers.ReLU()(resnet_output) + + # 2. We use the default initialization for the convolution layers + # instead of initializing it to do bilinear upsampling. + conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters, + kernel_size=3, strides=2, + padding='same') + resnet_output = conv_transpose(resnet_output) + resnet_output = tf.keras.layers.BatchNormalization()(resnet_output) + resnet_output = tf.keras.layers.ReLU()(resnet_output) + + self._feature_extractor_model = tf.keras.models.Model( + inputs=self._base_model.input, outputs=resnet_output) + + def preprocess(self, resized_inputs): + """Preprocess input images for the ResNet model. + + This scales images in the range [0, 255] to the range [-1, 1] + + Args: + resized_inputs: a [batch, height, width, channels] float32 tensor. + + Returns: + outputs: a [batch, height, width, channels] float32 tensor. + + """ + resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess( + resized_inputs) + return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs) + + def load_feature_extractor_weights(self, path): + self._base_model.load_weights(path) + + def call(self, inputs): + """Returns image features extracted by the backbone. + + Args: + inputs: An image tensor of shape [batch_size, input_height, + input_width, 3] + + Returns: + features_list: A list of length 1 containing a tensor of shape + [batch_size, input_height // 4, input_width // 4, 64] containing + the features extracted by the ResNet. + """ + return [self._feature_extractor_model(inputs)] + + @property + def num_feature_outputs(self): + return 1 + + @property + def out_stride(self): + return 4 + + @property + def supported_sub_model_types(self): + return ['classification'] + + def get_sub_model(self, sub_model_type): + if sub_model_type == 'classification': + return self._base_model + else: + ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) + + +def resnet_v2_101(channel_means, channel_stds, bgr_ordering): + """The ResNet v2 101 feature extractor.""" + + return CenterNetResnetFeatureExtractor( + resnet_type='resnet_v2_101', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering + ) + + +def resnet_v2_50(channel_means, channel_stds, bgr_ordering): + """The ResNet v2 50 feature extractor.""" + + return CenterNetResnetFeatureExtractor( + resnet_type='resnet_v2_50', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d8f9b22a746cbd6da862f9a37f4ef2e57f10b451 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py @@ -0,0 +1,54 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing ResNet v2 models for the CenterNet meta architecture.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_resnet_feature_extractor +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetResnetFeatureExtractorTest(test_case.TestCase): + + def test_output_size(self): + """Verify that shape of features returned by the backbone is correct.""" + + model = center_net_resnet_feature_extractor.\ + CenterNetResnetFeatureExtractor('resnet_v2_101') + def graph_fn(): + img = np.zeros((8, 512, 512, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs.shape, (8, 128, 128, 64)) + + def test_output_size_resnet50(self): + """Verify that shape of features returned by the backbone is correct.""" + + model = center_net_resnet_feature_extractor.\ + CenterNetResnetFeatureExtractor('resnet_v2_50') + def graph_fn(): + img = np.zeros((8, 224, 224, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + outputs = self.execute(graph_fn, []) + self.assertEqual(outputs.shape, (8, 56, 56, 64)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..300d9ebe411db6e1b0e7d0b47f22c35f21f00906 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py @@ -0,0 +1,214 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Resnetv1 FPN [1] based feature extractors for CenterNet[2] meta architecture. + + +[1]: https://arxiv.org/abs/1612.03144. +[2]: https://arxiv.org/abs/1904.07850. +""" +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor +from object_detection.models.keras_models import resnet_v1 + + +_RESNET_MODEL_OUTPUT_LAYERS = { + 'resnet_v1_18': ['conv2_block2_out', 'conv3_block2_out', + 'conv4_block2_out', 'conv5_block2_out'], + 'resnet_v1_34': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block23_out', 'conv5_block3_out'], +} + + +class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor): + """Resnet v1 FPN base feature extractor for the CenterNet model. + + This feature extractor uses residual skip connections and nearest neighbor + upsampling to produce an output feature map of stride 4, which has precise + localization information along with strong semantic information from the top + of the net. This design does not exactly follow the original FPN design, + specifically: + - Since only one output map is necessary for heatmap prediction (stride 4 + output), the top-down feature maps can have different numbers of channels. + Specifically, the top down feature maps have the following sizes: + [h/4, w/4, 64], [h/8, w/8, 128], [h/16, w/16, 256], [h/32, w/32, 256]. + - No additional coarse features are used after conv5_x. + """ + + def __init__(self, resnet_type, channel_means=(0., 0., 0.), + channel_stds=(1., 1., 1.), bgr_ordering=False): + """Initializes the feature extractor with a specific ResNet architecture. + + Args: + resnet_type: A string specifying which kind of ResNet to use. Currently + only `resnet_v1_50` and `resnet_v1_101` are supported. + channel_means: A tuple of floats, denoting the mean of each channel + which will be subtracted from it. + channel_stds: A tuple of floats, denoting the standard deviation of each + channel. Each channel will be divided by its standard deviation value. + bgr_ordering: bool, if set will change the channel ordering to be in the + [blue, red, green] order. + + """ + + super(CenterNetResnetV1FpnFeatureExtractor, self).__init__( + channel_means=channel_means, channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + if resnet_type == 'resnet_v1_50': + self._base_model = tf.keras.applications.ResNet50(weights=None, + include_top=False) + elif resnet_type == 'resnet_v1_101': + self._base_model = tf.keras.applications.ResNet101(weights=None, + include_top=False) + elif resnet_type == 'resnet_v1_18': + self._base_model = resnet_v1.resnet_v1_18(weights=None, include_top=False) + elif resnet_type == 'resnet_v1_34': + self._base_model = resnet_v1.resnet_v1_34(weights=None, include_top=False) + else: + raise ValueError('Unknown Resnet Model {}'.format(resnet_type)) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[resnet_type] + outputs = [self._base_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + + self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input, + outputs=outputs) + resnet_outputs = self._resnet_model(self._base_model.input) + + # Construct the top-down feature maps. + top_layer = resnet_outputs[-1] + residual_op = tf.keras.layers.Conv2D(filters=256, kernel_size=1, + strides=1, padding='same') + top_down = residual_op(top_layer) + + num_filters_list = [256, 128, 64] + for i, num_filters in enumerate(num_filters_list): + level_ind = 2 - i + # Upsample. + upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest') + top_down = upsample_op(top_down) + + # Residual (skip-connection) from bottom-up pathway. + residual_op = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=1, + strides=1, padding='same') + residual = residual_op(resnet_outputs[level_ind]) + + # Merge. + top_down = top_down + residual + next_num_filters = num_filters_list[i+1] if i + 1 <= 2 else 64 + conv = tf.keras.layers.Conv2D(filters=next_num_filters, + kernel_size=3, strides=1, padding='same') + top_down = conv(top_down) + top_down = tf.keras.layers.BatchNormalization()(top_down) + top_down = tf.keras.layers.ReLU()(top_down) + + self._feature_extractor_model = tf.keras.models.Model( + inputs=self._base_model.input, outputs=top_down) + + def preprocess(self, resized_inputs): + """Preprocess input images for the ResNet model. + + This scales images in the range [0, 255] to the range [-1, 1] + + Args: + resized_inputs: a [batch, height, width, channels] float32 tensor. + + Returns: + outputs: a [batch, height, width, channels] float32 tensor. + + """ + resized_inputs = super( + CenterNetResnetV1FpnFeatureExtractor, self).preprocess(resized_inputs) + return tf.keras.applications.resnet.preprocess_input(resized_inputs) + + def load_feature_extractor_weights(self, path): + self._base_model.load_weights(path) + + def call(self, inputs): + """Returns image features extracted by the backbone. + + Args: + inputs: An image tensor of shape [batch_size, input_height, + input_width, 3] + + Returns: + features_list: A list of length 1 containing a tensor of shape + [batch_size, input_height // 4, input_width // 4, 64] containing + the features extracted by the ResNet. + """ + return [self._feature_extractor_model(inputs)] + + @property + def num_feature_outputs(self): + return 1 + + @property + def out_stride(self): + return 4 + + @property + def supported_sub_model_types(self): + return ['classification'] + + def get_sub_model(self, sub_model_type): + if sub_model_type == 'classification': + return self._base_model + else: + ValueError('Sub model type "{}" not supported.'.format(sub_model_type)) + + +def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering): + """The ResNet v1 101 FPN feature extractor.""" + + return CenterNetResnetV1FpnFeatureExtractor( + resnet_type='resnet_v1_101', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering + ) + + +def resnet_v1_50_fpn(channel_means, channel_stds, bgr_ordering): + """The ResNet v1 50 FPN feature extractor.""" + + return CenterNetResnetV1FpnFeatureExtractor( + resnet_type='resnet_v1_50', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) + + +def resnet_v1_34_fpn(channel_means, channel_stds, bgr_ordering): + """The ResNet v1 34 FPN feature extractor.""" + + return CenterNetResnetV1FpnFeatureExtractor( + resnet_type='resnet_v1_34', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering + ) + + +def resnet_v1_18_fpn(channel_means, channel_stds, bgr_ordering): + """The ResNet v1 18 FPN feature extractor.""" + + return CenterNetResnetV1FpnFeatureExtractor( + resnet_type='resnet_v1_18', + channel_means=channel_means, + channel_stds=channel_stds, + bgr_ordering=bgr_ordering) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2508e52f793157c9bf3b644601e7772f38511534 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,51 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing ResNet v1 FPN models for the CenterNet meta architecture.""" +import unittest +from absl.testing import parameterized + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import center_net_resnet_v1_fpn_feature_extractor +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class CenterNetResnetV1FpnFeatureExtractorTest(test_case.TestCase, + parameterized.TestCase): + + @parameterized.parameters( + {'resnet_type': 'resnet_v1_50'}, + {'resnet_type': 'resnet_v1_101'}, + {'resnet_type': 'resnet_v1_18'}, + {'resnet_type': 'resnet_v1_34'}, + ) + def test_correct_output_size(self, resnet_type): + """Verify that shape of features returned by the backbone is correct.""" + + model = center_net_resnet_v1_fpn_feature_extractor.\ + CenterNetResnetV1FpnFeatureExtractor(resnet_type) + def graph_fn(): + img = np.zeros((8, 512, 512, 3), dtype=np.float32) + processed_img = model.preprocess(img) + return model(processed_img) + + self.assertEqual(self.execute(graph_fn, []).shape, (8, 128, 128, 64)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..ac1886e025280165dfde4c6d4158fe8964cdc0eb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py @@ -0,0 +1,165 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Embedded-friendly SSDFeatureExtractor for MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from nets import mobilenet_v1 + + +class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """Embedded-friendly SSD Feature Extractor using MobilenetV1 features. + + This feature extractor is similar to SSD MobileNetV1 feature extractor, and + it fixes input resolution to be 256x256, reduces the number of feature maps + used for box prediction and ensures convolution kernel to be no larger + than input tensor in spatial dimensions. + + This feature extractor requires support of the following ops if used in + embedded devices: + - Conv + - DepthwiseConv + - Relu6 + + All conv/depthwiseconv use SAME padding, and no additional spatial padding is + needed. + """ + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """MobileNetV1 Feature Extractor for Embedded-friendly SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. For EmbeddedSSD it must be set to 1. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: upon invalid `pad_to_multiple` values. + """ + if pad_to_multiple != 1: + raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` ' + 'of 1.') + + super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + + Raises: + ValueError: if image height or width are not 256 pixels. + """ + image_shape = preprocessed_inputs.get_shape() + image_shape.assert_has_rank(4) + image_height = image_shape[1].value + image_width = image_shape[2].value + + if image_height is None or image_width is None: + shape_assert = tf.Assert( + tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256), + tf.equal(tf.shape(preprocessed_inputs)[2], 256)), + ['image size must be 256 in both height and width.']) + with tf.control_dependencies([shape_assert]): + preprocessed_inputs = tf.identity(preprocessed_inputs) + elif image_height != 256 or image_width != 256: + raise ValueError('image size must be = 256 in both height and width;' + ' image dim = %d,%d' % (image_height, image_width)) + + feature_map_layout = { + 'from_layer': [ + 'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '' + ], + 'layer_depth': [-1, -1, 512, 256, 256], + 'conv_kernel_size': [-1, -1, 3, 3, 2], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3b94e4f58d43260e14c9039f0097370dee9447a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4a27e8c8d649c4cb9ae961bffafc7ad824b63b25 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,132 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for embedded_ssd_mobilenet_v1_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor +from object_detection.models import ssd_feature_extractor_test +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class EmbeddedSSDMobileNetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return (embedded_ssd_mobilenet_v1_feature_extractor. + EmbeddedSSDMobileNetV1FeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + override_base_feature_extractor_hyperparams=True)) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), + (2, 4, 4, 512), (2, 2, 2, 256), + (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), + (2, 4, 4, 512), (2, 2, 2, 256), + (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple_of_1( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024), + (2, 4, 4, 512), (2, 2, 2, 256), + (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_pad_to_multiple_not_1(self): + depth_multiplier = 1.0 + pad_to_multiple = 2 + with self.assertRaises(ValueError): + _ = self._create_feature_extractor(depth_multiplier, pad_to_multiple) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..a94aa207b3a6fdbddc440ea4bac64a9ba9e5d8de --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py @@ -0,0 +1,212 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inception Resnet v2 Faster R-CNN implementation. + +See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on +Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) +as well as +"Speed/accuracy trade-offs for modern convolutional object detectors" by +Huang et al. (https://arxiv.org/abs/1611.10012) +""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import variables_helper +from nets import inception_resnet_v2 + + +class FasterRCNNInceptionResnetV2FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN with Inception Resnet v2 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Extracts features using the first half of the Inception Resnet v2 network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + + with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( + weight_decay=self._weight_decay)): + # Forces is_training to False to disable batch norm update. + with slim.arg_scope([slim.batch_norm], + is_training=self._train_batch_norm): + with tf.variable_scope('InceptionResnetV2', + reuse=self._reuse_weights) as scope: + return inception_resnet_v2.inception_resnet_v2_base( + preprocessed_inputs, final_endpoint='PreAuxLogits', + scope=scope, output_stride=self._first_stage_features_stride, + align_feature_maps=True) + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + This function reconstructs the "second half" of the Inception ResNet v2 + network after the part defined in `_extract_proposal_features`. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): + with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( + weight_decay=self._weight_decay)): + # Forces is_training to False to disable batch norm update. + with slim.arg_scope([slim.batch_norm], + is_training=self._train_batch_norm): + with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], + stride=1, padding='SAME'): + with tf.variable_scope('Mixed_7a'): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d(proposal_feature_maps, + 256, 1, scope='Conv2d_0a_1x1') + tower_conv_1 = slim.conv2d( + tower_conv, 384, 3, stride=2, + padding='VALID', scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_1'): + tower_conv1 = slim.conv2d( + proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') + tower_conv1_1 = slim.conv2d( + tower_conv1, 288, 3, stride=2, + padding='VALID', scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_2'): + tower_conv2 = slim.conv2d( + proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') + tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, + scope='Conv2d_0b_3x3') + tower_conv2_2 = slim.conv2d( + tower_conv2_1, 320, 3, stride=2, + padding='VALID', scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_3'): + tower_pool = slim.max_pool2d( + proposal_feature_maps, 3, stride=2, padding='VALID', + scope='MaxPool_1a_3x3') + net = tf.concat( + [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) + net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) + net = inception_resnet_v2.block8(net, activation_fn=None) + proposal_classifier_features = slim.conv2d( + net, 1536, 1, scope='Conv2d_7b_1x1') + return proposal_classifier_features + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for + InceptionResnetV2 checkpoints. + + TODO(jonathanhuang,rathodv): revisit whether it's possible to force the + `Repeat` namescope as created in `_extract_box_classifier_features` to + start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can + be used. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith( + first_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + first_stage_feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + if variable.op.name.startswith( + second_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + second_stage_feature_extractor_scope + + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') + var_name = var_name.replace( + second_stage_feature_extractor_scope + '/', '') + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2dcd655a1441d850e786226921e7f6ae4bda90f Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2505fbfb3ad6e8621a3b2d05caba506b350f0f49 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py @@ -0,0 +1,111 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 19, 19, 1088]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 28, 28, 1088]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 1088]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [2, 8, 8, 1536]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..f185aa01dd377c66b94ca37cc244350b2071f21c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py @@ -0,0 +1,159 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inception Resnet v2 Faster R-CNN implementation in Keras. + +See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on +Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) +as well as +"Speed/accuracy trade-offs for modern convolutional object detectors" by +Huang et al. (https://arxiv.org/abs/1611.10012) +""" + +# Skip pylint for this file because it times out +# pylint: skip-file + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.models.keras_models import inception_resnet_v2 +from object_detection.utils import model_util +from object_detection.utils import variables_helper + + +class FasterRCNNInceptionResnetV2KerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + super(FasterRCNNInceptionResnetV2KerasFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + weight_decay) + self._variable_dict = {} + self.classification_backbone = None + + def preprocess(self, resized_inputs): + """Faster R-CNN with Inception Resnet v2 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def get_proposal_feature_extractor_model(self, name=None): + """Returns a model that extracts first stage RPN features. + + Extracts features using the first half of the Inception Resnet v2 network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes preprocessed_inputs: + A [batch, height, width, channels] float32 tensor + representing a batch of images. + + And returns rpn_feature_map: + A tensor with shape [batch, height, width, depth] + """ + if not self.classification_backbone: + self.classification_backbone = inception_resnet_v2.inception_resnet_v2( + self._train_batch_norm, + output_stride=self._first_stage_features_stride, + align_feature_maps=True, + weight_decay=self._weight_decay, + weights=None, + include_top=False) + with tf.name_scope(name): + with tf.name_scope('InceptionResnetV2'): + proposal_features = self.classification_backbone.get_layer( + name='block17_20_ac').output + keras_model = tf.keras.Model( + inputs=self.classification_backbone.inputs, + outputs=proposal_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + def get_box_classifier_feature_extractor_model(self, name=None): + """Returns a model that extracts second stage box classifier features. + + This function reconstructs the "second half" of the Inception ResNet v2 + network after the part defined in `get_proposal_feature_extractor_model`. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes proposal_feature_maps: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + And returns proposal_classifier_features: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + if not self.classification_backbone: + self.classification_backbone = inception_resnet_v2.inception_resnet_v2( + self._train_batch_norm, + output_stride=self._first_stage_features_stride, + align_feature_maps=True, + weight_decay=self._weight_decay, + weights=None, + include_top=False) + with tf.name_scope(name): + with tf.name_scope('InceptionResnetV2'): + proposal_feature_maps = self.classification_backbone.get_layer( + name='block17_20_ac').output + proposal_classifier_features = self.classification_backbone.get_layer( + name='conv_7b_ac').output + + keras_model = model_util.extract_submodel( + model=self.classification_backbone, + inputs=proposal_feature_maps, + outputs=proposal_classifier_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..20bb50ef836aaf71448f9711f430b532d5a01b5b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py @@ -0,0 +1,80 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + + self.assertAllEqual(features_shape.numpy(), [1, 19, 19, 1088]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + + self.assertAllEqual(features_shape.numpy(), [1, 28, 28, 1088]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1088]) + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + model = feature_extractor.get_box_classifier_feature_extractor_model( + name='TestScope') + proposal_classifier_features = ( + model(proposal_feature_maps)) + features_shape = tf.shape(proposal_classifier_features) + self.assertAllEqual(features_shape.numpy(), [2, 9, 9, 1536]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..549ad6bb2f42ccf834c4dcea6834f8b4b9d10ee7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py @@ -0,0 +1,253 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Inception V2 Faster R-CNN implementation. + +See "Rethinking the Inception Architecture for Computer Vision" +https://arxiv.org/abs/1512.00567 +""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from nets import inception_v2 + + +def _batch_norm_arg_scope(list_ops, + use_batch_norm=True, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001, + batch_norm_scale=False, + train_batch_norm=False): + """Slim arg scope for InceptionV2 batch norm.""" + if use_batch_norm: + batch_norm_params = { + 'is_training': train_batch_norm, + 'scale': batch_norm_scale, + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon + } + normalizer_fn = slim.batch_norm + else: + normalizer_fn = None + batch_norm_params = None + + return slim.arg_scope(list_ops, + normalizer_fn=normalizer_fn, + normalizer_params=batch_norm_params) + + +class FasterRCNNInceptionV2FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN Inception V2 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + depth_multiplier=1.0, + min_depth=16): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + super(FasterRCNNInceptionV2FeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN Inception V2 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping feature extractor tensor names to + tensors + + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + + preprocessed_inputs.get_shape().assert_has_rank(4) + shape_assert = tf.Assert( + tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), + tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), + ['image size must at least be 33 in both height and width.']) + + with tf.control_dependencies([shape_assert]): + with tf.variable_scope('InceptionV2', + reuse=self._reuse_weights) as scope: + with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], + batch_norm_scale=True, + train_batch_norm=self._train_batch_norm): + _, activations = inception_v2.inception_v2_base( + preprocessed_inputs, + final_endpoint='Mixed_4e', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + + return activations['Mixed_4e'], activations + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name (unused). + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + net = proposal_feature_maps + + depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) + + data_format = 'NHWC' + concat_dim = 3 if data_format == 'NHWC' else 1 + + with tf.variable_scope('InceptionV2', reuse=self._reuse_weights): + with slim.arg_scope( + [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], + stride=1, + padding='SAME', + data_format=data_format): + with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d], + batch_norm_scale=True, + train_batch_norm=self._train_batch_norm): + + with tf.variable_scope('Mixed_5a'): + with tf.variable_scope('Branch_0'): + branch_0 = slim.conv2d( + net, depth(128), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_1'): + branch_1 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], + scope='Conv2d_0b_3x3') + branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, + scope='Conv2d_1a_3x3') + with tf.variable_scope('Branch_2'): + branch_2 = slim.max_pool2d(net, [3, 3], stride=2, + scope='MaxPool_1a_3x3') + net = tf.concat([branch_0, branch_1, branch_2], concat_dim) + + with tf.variable_scope('Mixed_5b'): + with tf.variable_scope('Branch_0'): + branch_0 = slim.conv2d(net, depth(352), [1, 1], + scope='Conv2d_0a_1x1') + with tf.variable_scope('Branch_1'): + branch_1 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], + scope='Conv2d_0b_3x3') + with tf.variable_scope('Branch_2'): + branch_2 = slim.conv2d( + net, depth(160), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0b_3x3') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0c_3x3') + with tf.variable_scope('Branch_3'): + branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') + branch_3 = slim.conv2d( + branch_3, depth(128), [1, 1], + weights_initializer=trunc_normal(0.1), + scope='Conv2d_0b_1x1') + net = tf.concat([branch_0, branch_1, branch_2, branch_3], + concat_dim) + + with tf.variable_scope('Mixed_5c'): + with tf.variable_scope('Branch_0'): + branch_0 = slim.conv2d(net, depth(352), [1, 1], + scope='Conv2d_0a_1x1') + with tf.variable_scope('Branch_1'): + branch_1 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], + scope='Conv2d_0b_3x3') + with tf.variable_scope('Branch_2'): + branch_2 = slim.conv2d( + net, depth(192), [1, 1], + weights_initializer=trunc_normal(0.09), + scope='Conv2d_0a_1x1') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0b_3x3') + branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], + scope='Conv2d_0c_3x3') + with tf.variable_scope('Branch_3'): + branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') + branch_3 = slim.conv2d( + branch_3, depth(128), [1, 1], + weights_initializer=trunc_normal(0.1), + scope='Conv2d_0b_1x1') + proposal_classifier_features = tf.concat( + [branch_0, branch_1, branch_2, branch_3], concat_dim) + + return proposal_classifier_features diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59dd7c189d911252a10d4604605e3a71d9ba439b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d01145f291f7b795a917e5a96632d52b42bac5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py @@ -0,0 +1,128 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for faster_rcnn_inception_v2_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnInceptionV2FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return faster_rcnn_inception_v2.FasterRCNNInceptionV2FeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 576]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 576]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 576]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_on_very_small_images(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run( + features_shape, + feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [3, 14, 14, 576], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [3, 7, 7, 1024]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..aa37848bb844dc58037cc815783b156b03b928a0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py @@ -0,0 +1,193 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mobilenet v1 Faster R-CNN implementation.""" +import numpy as np + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage): + if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]: + raise ValueError( + 'Only the following ratio percentages are supported: 25, 50, 75, 100') + conv_depth_ratio_in_percentage = float(conv_depth_ratio_in_percentage) / 100.0 + channels = np.array([ + 32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024 + ], dtype=np.float32) + channels = (channels * conv_depth_ratio_in_percentage).astype(np.int32) + return [ + mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=channels[0]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[1]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[2]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[3]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[4]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[5]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[6]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[7]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[8]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[9]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[10]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[11]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[12]), + mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[13]) + ] + + +class FasterRCNNMobilenetV1FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN Mobilenet V1 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + depth_multiplier=1.0, + min_depth=16, + skip_last_stride=False, + conv_depth_ratio_in_percentage=100): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + skip_last_stride: Skip the last stride if True. + conv_depth_ratio_in_percentage: Conv depth ratio in percentage. Only + applied if skip_last_stride is True. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + self._depth_multiplier = depth_multiplier + self._min_depth = min_depth + self._skip_last_stride = skip_last_stride + self._conv_depth_ratio_in_percentage = conv_depth_ratio_in_percentage + super(FasterRCNNMobilenetV1FeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN Mobilenet V1 preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping feature extractor tensor names to + tensors + + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + + preprocessed_inputs.get_shape().assert_has_rank(4) + preprocessed_inputs = shape_utils.check_min_image_dim( + min_dim=33, image_tensor=preprocessed_inputs) + + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=self._train_batch_norm, + weight_decay=self._weight_decay)): + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + params = {} + if self._skip_last_stride: + params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs( + conv_depth_ratio_in_percentage=self. + _conv_depth_ratio_in_percentage) + _, activations = mobilenet_v1.mobilenet_v1_base( + preprocessed_inputs, + final_endpoint='Conv2d_11_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope, + **params) + return activations['Conv2d_11_pointwise'], activations + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name (unused). + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + net = proposal_feature_maps + + conv_depth = 1024 + if self._skip_last_stride: + conv_depth_ratio = float(self._conv_depth_ratio_in_percentage) / 100.0 + conv_depth = int(float(conv_depth) * conv_depth_ratio) + + depth = lambda d: max(int(d * 1.0), 16) + with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights): + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=self._train_batch_norm, + weight_decay=self._weight_decay)): + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], padding='SAME'): + net = slim.separable_conv2d( + net, + depth(conv_depth), [3, 3], + depth_multiplier=1, + stride=2, + scope='Conv2d_12_pointwise') + return slim.separable_conv2d( + net, + depth(conv_depth), [3, 3], + depth_multiplier=1, + stride=1, + scope='Conv2d_13_pointwise') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..65a4958e4c20964b2857f95f7bc2b83d05d3cc02 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,128 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for faster_rcnn_mobilenet_v1_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnMobilenetV1FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return faster_rcnn_mobilenet_v1.FasterRCNNMobilenetV1FeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 512]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 512]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 512]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_on_very_small_images(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run( + features_shape, + feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [3, 14, 14, 576], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [3, 7, 7, 1024]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9fe17cbea856dd1ed8ca0bf1a8c25327714c5b6d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor.py @@ -0,0 +1,336 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""NASNet Faster R-CNN implementation. + +Learning Transferable Architectures for Scalable Image Recognition +Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le +https://arxiv.org/abs/1707.07012 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import variables_helper + +# pylint: disable=g-import-not-at-top +try: + from nets.nasnet import nasnet + from nets.nasnet import nasnet_utils +except: # pylint: disable=bare-except + pass +# pylint: enable=g-import-not-at-top + +arg_scope = slim.arg_scope + + +def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False): + """Defines the default arg scope for the NASNet-A Large for object detection. + + This provides a small edit to switch batch norm training on and off. + + Args: + is_batch_norm_training: Boolean indicating whether to train with batch norm. + + Returns: + An `arg_scope` to use for the NASNet Large Model. + """ + imagenet_scope = nasnet.nasnet_large_arg_scope() + with arg_scope(imagenet_scope): + with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: + return sc + + +# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but +# with special edits to remove instantiation of the stem and the special +# ability to receive as input a pair of hidden states. +def _build_nasnet_base(hidden_previous, + hidden, + normal_cell, + reduction_cell, + hparams, + true_cell_num, + start_cell_num): + """Constructs a NASNet image model.""" + + # Find where to place the reduction cells or stride normal cells + reduction_indices = nasnet_utils.calc_reduction_layers( + hparams.num_cells, hparams.num_reduction_layers) + + # Note: The None is prepended to match the behavior of _imagenet_stem() + cell_outputs = [None, hidden_previous, hidden] + net = hidden + + # NOTE: In the nasnet.py code, filter_scaling starts at 1.0. We instead + # start at 2.0 because 1 reduction cell has been created which would + # update the filter_scaling to 2.0. + filter_scaling = 2.0 + + # Run the cells + for cell_num in range(start_cell_num, hparams.num_cells): + stride = 1 + if hparams.skip_reduction_layer_input: + prev_layer = cell_outputs[-2] + if cell_num in reduction_indices: + filter_scaling *= hparams.filter_scaling_rate + net = reduction_cell( + net, + scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)), + filter_scaling=filter_scaling, + stride=2, + prev_layer=cell_outputs[-2], + cell_num=true_cell_num) + true_cell_num += 1 + cell_outputs.append(net) + if not hparams.skip_reduction_layer_input: + prev_layer = cell_outputs[-2] + net = normal_cell( + net, + scope='cell_{}'.format(cell_num), + filter_scaling=filter_scaling, + stride=stride, + prev_layer=prev_layer, + cell_num=true_cell_num) + true_cell_num += 1 + cell_outputs.append(net) + + # Final nonlinearity. + # Note that we have dropped the final pooling, dropout and softmax layers + # from the default nasnet version. + with tf.variable_scope('final_layer'): + net = tf.nn.relu(net) + return net + + +# TODO(shlens): Only fixed_shape_resizer is currently supported for NASNet +# featurization. The reason for this is that nasnet.py only supports +# inputs with fully known shapes. We need to update nasnet.py to handle +# shapes not known at compile time. +class FasterRCNNNASFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN with NASNet-A feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 16. + """ + if first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 16.') + super(FasterRCNNNASFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN with NAS preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Extracts features using the first half of the NASNet network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + end_points: A dictionary mapping feature extractor tensor names to tensors + + Raises: + ValueError: If the created network is missing the required activation. + """ + del scope + + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + + with slim.arg_scope(nasnet_large_arg_scope_for_detection( + is_batch_norm_training=self._train_batch_norm)): + with arg_scope([slim.conv2d, + slim.batch_norm, + slim.separable_conv2d], + reuse=self._reuse_weights): + _, end_points = nasnet.build_nasnet_large( + preprocessed_inputs, num_classes=None, + is_training=self._is_training, + final_endpoint='Cell_11') + + # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016. + rpn_feature_map = tf.concat([end_points['Cell_10'], + end_points['Cell_11']], 3) + + # nasnet.py does not maintain the batch size in the first dimension. + # This work around permits us retaining the batch for below. + batch = preprocessed_inputs.get_shape().as_list()[0] + shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] + rpn_feature_map_shape = [batch] + shape_without_batch + rpn_feature_map.set_shape(rpn_feature_map_shape) + + return rpn_feature_map, end_points + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + This function reconstructs the "second half" of the NASNet-A + network after the part defined in `_extract_proposal_features`. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + del scope + + # Note that we always feed into 2 layers of equal depth + # where the first N channels corresponds to previous hidden layer + # and the second N channels correspond to the final hidden layer. + hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) + + # Note that what follows is largely a copy of build_nasnet_large() within + # nasnet.py. We are copying to minimize code pollution in slim. + + # TODO(shlens,skornblith): Determine the appropriate drop path schedule. + # For now the schedule is the default (1.0->0.7 over 250,000 train steps). + hparams = nasnet.large_imagenet_config() + if not self._is_training: + hparams.set_hparam('drop_path_keep_prob', 1.0) + + # Calculate the total number of cells in the network + # -- Add 2 for the reduction cells. + total_num_cells = hparams.num_cells + 2 + # -- And add 2 for the stem cells for ImageNet training. + total_num_cells += 2 + + normal_cell = nasnet_utils.NasNetANormalCell( + hparams.num_conv_filters, hparams.drop_path_keep_prob, + total_num_cells, hparams.total_training_steps) + reduction_cell = nasnet_utils.NasNetAReductionCell( + hparams.num_conv_filters, hparams.drop_path_keep_prob, + total_num_cells, hparams.total_training_steps) + with arg_scope([slim.dropout, nasnet_utils.drop_path], + is_training=self._is_training): + with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): + with arg_scope([slim.avg_pool2d, + slim.max_pool2d, + slim.conv2d, + slim.batch_norm, + slim.separable_conv2d, + nasnet_utils.factorized_reduction, + nasnet_utils.global_avg_pool, + nasnet_utils.get_channel_index, + nasnet_utils.get_channel_dim], + data_format=hparams.data_format): + + # This corresponds to the cell number just past 'Cell_11' used by + # by _extract_proposal_features(). + start_cell_num = 12 + # Note that this number equals: + # start_cell_num + 2 stem cells + 1 reduction cell + true_cell_num = 15 + + with slim.arg_scope(nasnet.nasnet_large_arg_scope()): + net = _build_nasnet_base(hidden_previous, + hidden, + normal_cell=normal_cell, + reduction_cell=reduction_cell, + hparams=hparams, + true_cell_num=true_cell_num, + start_cell_num=start_cell_num) + + proposal_classifier_features = net + return proposal_classifier_features + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for + NASNet-A checkpoints. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + # Note that the NAS checkpoint only contains the moving average version of + # the Variables so we need to generate an appropriate dictionary mapping. + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith( + first_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + first_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + if variable.op.name.startswith( + second_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + second_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e143c7b00a6cc0fc4410196d319d5e2b5b831f8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a41cb0f733d613ffb050bbf4f8506579375c9d08 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py @@ -0,0 +1,111 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_nas_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnNASFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_nas.FasterRCNNNASFeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 19, 19, 4032]) + + def test_extract_proposal_features_input_size_224(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 14, 14, 4032]) + + def test_extract_proposal_features_input_size_112(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 4032]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [2, 9, 9, 4032]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..ec32cd309d3a3fe135cf72665631b04273e21424 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor.py @@ -0,0 +1,329 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""PNASNet Faster R-CNN implementation. + +Based on PNASNet model: https://arxiv.org/abs/1712.00559 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.utils import variables_helper +from nets.nasnet import nasnet_utils + +try: + from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + +arg_scope = slim.arg_scope + + +def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): + """Defines the default arg scope for the PNASNet Large for object detection. + + This provides a small edit to switch batch norm training on and off. + + Args: + is_batch_norm_training: Boolean indicating whether to train with batch norm. + + Returns: + An `arg_scope` to use for the PNASNet Large Model. + """ + imagenet_scope = pnasnet.pnasnet_large_arg_scope() + with arg_scope(imagenet_scope): + with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: + return sc + + +def _filter_scaling(reduction_indices, start_cell_num): + """Compute the expected filter scaling at given PNASNet cell start_cell_num. + + In the pnasnet.py code, filter_scaling starts at 1.0. We instead + adapt filter scaling to depend on the starting cell. + At first cells, before any reduction, filter_scalling is 1.0. With passing + any reduction cell, the filter_scaling is multiplied by 2. + + Args: + reduction_indices: list of int indices. + start_cell_num: int. + Returns: + filter_scaling: float. + """ + filter_scaling = 1.0 + for ind in reduction_indices: + if ind < start_cell_num: + filter_scaling *= 2.0 + return filter_scaling + + +# Note: This is largely a copy of _build_pnasnet_base inside pnasnet.py but +# with special edits to remove instantiation of the stem and the special +# ability to receive as input a pair of hidden states. It constructs only +# a sub-network from the original PNASNet model, starting from the +# start_cell_num cell and with modified final layer. +def _build_pnasnet_base( + hidden_previous, hidden, normal_cell, hparams, true_cell_num, + start_cell_num): + """Constructs a PNASNet image model for proposal classifier features.""" + + # Find where to place the reduction cells or stride normal cells + reduction_indices = nasnet_utils.calc_reduction_layers( + hparams.num_cells, hparams.num_reduction_layers) + filter_scaling = _filter_scaling(reduction_indices, start_cell_num) + + # Note: The None is prepended to match the behavior of _imagenet_stem() + cell_outputs = [None, hidden_previous, hidden] + net = hidden + + # Run the cells + for cell_num in range(start_cell_num, hparams.num_cells): + is_reduction = cell_num in reduction_indices + stride = 2 if is_reduction else 1 + if is_reduction: filter_scaling *= hparams.filter_scaling_rate + prev_layer = cell_outputs[-2] + net = normal_cell( + net, + scope='cell_{}'.format(cell_num), + filter_scaling=filter_scaling, + stride=stride, + prev_layer=prev_layer, + cell_num=true_cell_num) + true_cell_num += 1 + cell_outputs.append(net) + + # Final nonlinearity. + # Note that we have dropped the final pooling, dropout and softmax layers + # from the default pnasnet version. + with tf.variable_scope('final_layer'): + net = tf.nn.relu(net) + return net + + +# TODO(shlens): Only fixed_shape_resizer is currently supported for PNASNet +# featurization. The reason for this is that pnasnet.py only supports +# inputs with fully known shapes. We need to update pnasnet.py to handle +# shapes not known at compile time. +class FasterRCNNPNASFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN with PNASNet feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 16. + """ + if first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 16.') + super(FasterRCNNPNASFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN with PNAS preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Extracts features using the first half of the PNASNet network. + We construct the network in `align_feature_maps=True` mode, which means + that all VALID paddings in the network are changed to SAME padding so that + the feature maps are aligned. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + end_points: A dictionary mapping feature extractor tensor names to tensors + + Raises: + ValueError: If the created network is missing the required activation. + """ + del scope + + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + + with slim.arg_scope(pnasnet_large_arg_scope_for_detection( + is_batch_norm_training=self._train_batch_norm)): + with arg_scope([slim.conv2d, + slim.batch_norm, + slim.separable_conv2d], + reuse=self._reuse_weights): + _, end_points = pnasnet.build_pnasnet_large( + preprocessed_inputs, num_classes=None, + is_training=self._is_training, + final_endpoint='Cell_7') + + # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160. + # Cell_7 is the last cell before second reduction. + rpn_feature_map = tf.concat([end_points['Cell_6'], + end_points['Cell_7']], 3) + + # pnasnet.py does not maintain the batch size in the first dimension. + # This work around permits us retaining the batch for below. + batch = preprocessed_inputs.get_shape().as_list()[0] + shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] + rpn_feature_map_shape = [batch] + shape_without_batch + rpn_feature_map.set_shape(rpn_feature_map_shape) + + return rpn_feature_map, end_points + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + This function reconstructs the "second half" of the PNASNet + network after the part defined in `_extract_proposal_features`. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name. + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + del scope + + # Number of used stem cells. + num_stem_cells = 2 + + # Note that we always feed into 2 layers of equal depth + # where the first N channels corresponds to previous hidden layer + # and the second N channels correspond to the final hidden layer. + hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) + + # Note that what follows is largely a copy of build_pnasnet_large() within + # pnasnet.py. We are copying to minimize code pollution in slim. + + # TODO(shlens,skornblith): Determine the appropriate drop path schedule. + # For now the schedule is the default (1.0->0.7 over 250,000 train steps). + hparams = pnasnet.large_imagenet_config() + if not self._is_training: + hparams.set_hparam('drop_path_keep_prob', 1.0) + + # Calculate the total number of cells in the network + total_num_cells = hparams.num_cells + num_stem_cells + + normal_cell = pnasnet.PNasNetNormalCell( + hparams.num_conv_filters, hparams.drop_path_keep_prob, + total_num_cells, hparams.total_training_steps) + with arg_scope([slim.dropout, nasnet_utils.drop_path], + is_training=self._is_training): + with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): + with arg_scope([slim.avg_pool2d, + slim.max_pool2d, + slim.conv2d, + slim.batch_norm, + slim.separable_conv2d, + nasnet_utils.factorized_reduction, + nasnet_utils.global_avg_pool, + nasnet_utils.get_channel_index, + nasnet_utils.get_channel_dim], + data_format=hparams.data_format): + + # This corresponds to the cell number just past 'Cell_7' used by + # _extract_proposal_features(). + start_cell_num = 8 + true_cell_num = start_cell_num + num_stem_cells + + with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): + net = _build_pnasnet_base( + hidden_previous, + hidden, + normal_cell=normal_cell, + hparams=hparams, + true_cell_num=true_cell_num, + start_cell_num=start_cell_num) + + proposal_classifier_features = net + return proposal_classifier_features + + def restore_from_classification_checkpoint_fn( + self, + first_stage_feature_extractor_scope, + second_stage_feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for + PNASNet checkpoints. + + Args: + first_stage_feature_extractor_scope: A scope name for the first stage + feature extractor. + second_stage_feature_extractor_scope: A scope name for the second stage + feature extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith( + first_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + first_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + if variable.op.name.startswith( + second_stage_feature_extractor_scope): + var_name = variable.op.name.replace( + second_stage_feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af9e10ad09eb097ee6872cc955863fb597f7772b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..16774511b4d9c6eb1c94b8304640d9bf99c47ce0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py @@ -0,0 +1,124 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_pnas_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, first_stage_features_stride): + return frcnn_pnas.FasterRCNNPNASFeatureExtractor( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 299, 299, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 19, 19, 4320]) + + def test_extract_proposal_features_input_size_224(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 14, 14, 4320]) + + def test_extract_proposal_features_input_size_112(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 4320]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [2, 17, 17, 1088], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [2, 9, 9, 4320]) + + def test_filter_scaling_computation(self): + expected_filter_scaling = { + ((4, 8), 2): 1.0, + ((4, 8), 7): 2.0, + ((4, 8), 8): 2.0, + ((4, 8), 9): 4.0 + } + for args, filter_scaling in expected_filter_scaling.items(): + reduction_indices, start_cell_num = args + self.assertAlmostEqual( + frcnn_pnas._filter_scaling(reduction_indices, start_cell_num), + filter_scaling) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b1e25404c71be5a3b68df9ce85416ffd4e982e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py @@ -0,0 +1,254 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Resnet based Faster R-CNN implementation in Keras. + +See Deep Residual Learning for Image Recognition by He et al. +https://arxiv.org/abs/1512.03385 +""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.models.keras_models import resnet_v1 +from object_detection.utils import model_util + + +_RESNET_MODEL_CONV4_LAST_LAYERS = { + 'resnet_v1_50': 'conv4_block6_out', + 'resnet_v1_101': 'conv4_block23_out', + 'resnet_v1_152': 'conv4_block36_out', +} + + +class FasterRCNNResnetKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Faster R-CNN with Resnet feature extractor implementation.""" + + def __init__(self, + is_training, + resnet_v1_base_model, + resnet_v1_base_model_name, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 16.') + super(FasterRCNNResnetKerasFeatureExtractor, self).__init__( + is_training, first_stage_features_stride, batch_norm_trainable, + weight_decay) + self.classification_backbone = None + self._variable_dict = {} + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name + + def preprocess(self, resized_inputs): + """Faster R-CNN Resnet V1 preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def get_proposal_feature_extractor_model(self, name=None): + """Returns a model that extracts first stage RPN features. + + Extracts features using the first half of the Resnet v1 network. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes preprocessed_inputs: + A [batch, height, width, channels] float32 tensor + representing a batch of images. + + And returns rpn_feature_map: + A tensor with shape [batch, height, width, depth] + """ + if not self.classification_backbone: + self.classification_backbone = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=None, + weight_decay=self._weight_decay, + classes=None, + weights=None, + include_top=False + ) + with tf.name_scope(name): + with tf.name_scope('ResnetV1'): + + conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ + self._resnet_v1_base_model_name] + proposal_features = self.classification_backbone.get_layer( + name=conv4_last_layer).output + keras_model = tf.keras.Model( + inputs=self.classification_backbone.inputs, + outputs=proposal_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + def get_box_classifier_feature_extractor_model(self, name=None): + """Returns a model that extracts second stage box classifier features. + + This function reconstructs the "second half" of the ResNet v1 + network after the part defined in `get_proposal_feature_extractor_model`. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes proposal_feature_maps: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + And returns proposal_classifier_features: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + if not self.classification_backbone: + self.classification_backbone = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=None, + weight_decay=self._weight_decay, + classes=None, + weights=None, + include_top=False + ) + with tf.name_scope(name): + with tf.name_scope('ResnetV1'): + conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[ + self._resnet_v1_base_model_name] + proposal_feature_maps = self.classification_backbone.get_layer( + name=conv4_last_layer).output + proposal_classifier_features = self.classification_backbone.get_layer( + name='conv5_block3_out').output + + keras_model = model_util.extract_submodel( + model=self.classification_backbone, + inputs=proposal_feature_maps, + outputs=proposal_classifier_features) + for variable in keras_model.variables: + self._variable_dict[variable.name[:-2]] = variable + return keras_model + + +class FasterRCNNResnet50KerasFeatureExtractor( + FasterRCNNResnetKerasFeatureExtractor): + """Faster R-CNN with Resnet50 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + """ + super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + is_training=is_training, + resnet_v1_base_model=resnet_v1.resnet_v1_50, + resnet_v1_base_model_name='resnet_v1_50', + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + + +class FasterRCNNResnet101KerasFeatureExtractor( + FasterRCNNResnetKerasFeatureExtractor): + """Faster R-CNN with Resnet101 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + """ + super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__( + is_training=is_training, + resnet_v1_base_model=resnet_v1.resnet_v1_101, + resnet_v1_base_model_name='resnet_v1_101', + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + + +class FasterRCNNResnet152KerasFeatureExtractor( + FasterRCNNResnetKerasFeatureExtractor): + """Faster R-CNN with Resnet152 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + """ + super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__( + is_training=is_training, + resnet_v1_base_model=resnet_v1.resnet_v1_152, + resnet_v1_base_model_name='resnet_v1_152', + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..15e8a5fbf153cdee690be94d2d9c910070af35f0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py @@ -0,0 +1,80 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_resnet_keras_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_res +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FasterRcnnResnetKerasFeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, architecture='resnet_v1_50'): + return frcnn_res.FasterRCNNResnet50KerasFeatureExtractor( + is_training=False, + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [1, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + self.assertAllEqual(features_shape.numpy(), [1, 14, 14, 1024]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shape = tf.shape(rpn_feature_map) + self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1024]) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(tf.errors.InvalidArgumentError): + feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + proposal_feature_maps = tf.random_uniform( + [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + model = feature_extractor.get_box_classifier_feature_extractor_model( + name='TestScope') + proposal_classifier_features = ( + model(proposal_feature_maps)) + features_shape = tf.shape(proposal_classifier_features) + # Note: due to a slight mismatch in slim and keras resnet definitions + # the output shape of the box classifier is slightly different compared to + # that of the slim implementation. The keras version is more `canonical` + # in that it more accurately reflects the original authors' implementation. + # TODO(jonathanhuang): make the output shape match that of the slim + # implementation by using atrous convolutions. + self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048]) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..30cd9d42c54500af36f74d45870b04726755b90e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py @@ -0,0 +1,268 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Resnet V1 Faster R-CNN implementation. + +See "Deep Residual Learning for Image Recognition" by He et al., 2015. +https://arxiv.org/abs/1512.03385 + +Note: this implementation assumes that the classification checkpoint used +to finetune this model is trained using the same configuration as that of +the MSRA provided checkpoints +(see https://github.com/KaimingHe/deep-residual-networks), e.g., with +same preprocessing, batch norm scaling, etc. +""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from nets import resnet_utils +from nets import resnet_v1 + + +class FasterRCNNResnetV1FeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster R-CNN Resnet V1 feature extractor implementation.""" + + def __init__(self, + architecture, + resnet_model, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + architecture: Architecture name of the Resnet V1 model. + resnet_model: Definition of the Resnet V1 model. + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: Activaton functon to use in Resnet V1 model. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + self._architecture = architecture + self._resnet_model = resnet_model + self._activation_fn = activation_fn + super(FasterRCNNResnetV1FeatureExtractor, + self).__init__(is_training, first_stage_features_stride, + batch_norm_trainable, reuse_weights, weight_decay) + + def preprocess(self, resized_inputs): + """Faster R-CNN Resnet V1 preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def _extract_proposal_features(self, preprocessed_inputs, scope): + """Extracts first stage RPN features. + + Args: + preprocessed_inputs: A [batch, height, width, channels] float32 tensor + representing a batch of images. + scope: A scope name. + + Returns: + rpn_feature_map: A tensor with shape [batch, height, width, depth] + activations: A dictionary mapping feature extractor tensor names to + tensors + + Raises: + InvalidArgumentError: If the spatial size of `preprocessed_inputs` + (height or width) is less than 33. + ValueError: If the created network is missing the required activation. + """ + if len(preprocessed_inputs.get_shape().as_list()) != 4: + raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' + 'tensor of shape %s' % preprocessed_inputs.get_shape()) + shape_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), + tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), + ['image size must at least be 33 in both height and width.']) + + with tf.control_dependencies([shape_assert]): + # Disables batchnorm for fine-tuning with smaller batch sizes. + # TODO(chensun): Figure out if it is needed when image + # batch size is bigger. + with slim.arg_scope( + resnet_utils.resnet_arg_scope( + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=self._activation_fn, + weight_decay=self._weight_decay)): + with tf.variable_scope( + self._architecture, reuse=self._reuse_weights) as var_scope: + _, activations = self._resnet_model( + preprocessed_inputs, + num_classes=None, + is_training=self._train_batch_norm, + global_pool=False, + output_stride=self._first_stage_features_stride, + spatial_squeeze=False, + scope=var_scope) + + handle = scope + '/%s/block3' % self._architecture + return activations[handle], activations + + def _extract_box_classifier_features(self, proposal_feature_maps, scope): + """Extracts second stage box classifier features. + + Args: + proposal_feature_maps: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + scope: A scope name (unused). + + Returns: + proposal_classifier_features: A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.variable_scope(self._architecture, reuse=self._reuse_weights): + with slim.arg_scope( + resnet_utils.resnet_arg_scope( + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=self._activation_fn, + weight_decay=self._weight_decay)): + with slim.arg_scope([slim.batch_norm], + is_training=self._train_batch_norm): + blocks = [ + resnet_utils.Block('block4', resnet_v1.bottleneck, [{ + 'depth': 2048, + 'depth_bottleneck': 512, + 'stride': 1 + }] * 3) + ] + proposal_classifier_features = resnet_utils.stack_blocks_dense( + proposal_feature_maps, blocks) + return proposal_classifier_features + + +class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): + """Faster R-CNN Resnet 50 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16, + or if `architecture` is not supported. + """ + super(FasterRCNNResnet50FeatureExtractor, + self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, + first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay, activation_fn) + + +class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): + """Faster R-CNN Resnet 101 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16, + or if `architecture` is not supported. + """ + super(FasterRCNNResnet101FeatureExtractor, + self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, + first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay, activation_fn) + + +class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor): + """Faster R-CNN Resnet 152 feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0, + activation_fn=tf.nn.relu): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + reuse_weights: See base class. + weight_decay: See base class. + activation_fn: See base class. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16, + or if `architecture` is not supported. + """ + super(FasterRCNNResnet152FeatureExtractor, + self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, + first_stage_features_stride, batch_norm_trainable, + reuse_weights, weight_decay, activation_fn) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc465f1f125f8c545e4f658781f3ccae0667dc2e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3d47da04af5fb3f728379a649d64329c862eaf75 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,167 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase): + + def _build_feature_extractor(self, + first_stage_features_stride, + activation_fn=tf.nn.relu, + architecture='resnet_v1_101'): + feature_extractor_map = { + 'resnet_v1_50': + faster_rcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, + 'resnet_v1_101': + faster_rcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, + 'resnet_v1_152': + faster_rcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor + } + return feature_extractor_map[architecture]( + is_training=False, + first_stage_features_stride=first_stage_features_stride, + activation_fn=activation_fn, + batch_norm_trainable=False, + reuse_weights=None, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16, architecture=architecture) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 14, 14, 1024]) + + def test_extract_proposal_features_stride_eight(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=8) + preprocessed_inputs = tf.random_uniform( + [4, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [4, 28, 28, 1024]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [1, 112, 112, 3], maxval=255, dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [1, 7, 7, 1024]) + + def test_extract_proposal_features_dies_on_invalid_stride(self): + with self.assertRaises(ValueError): + self._build_feature_extractor(first_stage_features_stride=99) + + def test_extract_proposal_features_dies_on_very_small_images(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + features_shape = tf.shape(rpn_feature_map) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run( + features_shape, + feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(ValueError): + feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestScope') + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16) + proposal_feature_maps = tf.random_uniform( + [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + proposal_classifier_features = ( + feature_extractor.extract_box_classifier_features( + proposal_feature_maps, scope='TestScope')) + features_shape = tf.shape(proposal_classifier_features) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + features_shape_out = sess.run(features_shape) + self.assertAllEqual(features_shape_out, [3, 7, 7, 2048]) + + def test_overwriting_activation_fn(self): + for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: + feature_extractor = self._build_feature_extractor( + first_stage_features_stride=16, + architecture=architecture, + activation_fn=tf.nn.relu6) + preprocessed_inputs = tf.random_uniform([4, 224, 224, 3], + maxval=255, + dtype=tf.float32) + rpn_feature_map, _ = feature_extractor.extract_proposal_features( + preprocessed_inputs, scope='TestStage1Scope') + _ = feature_extractor.extract_box_classifier_features( + rpn_feature_map, scope='TestStaget2Scope') + conv_ops = [ + op for op in tf.get_default_graph().get_operations() + if op.type == 'Relu6' + ] + op_names = [op.name for op in conv_ops] + + self.assertIsNotNone(conv_ops) + self.assertIn('TestStage1Scope/resnet_v1_50/resnet_v1_50/conv1/Relu6', + op_names) + self.assertIn( + 'TestStaget2Scope/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/Relu6', + op_names) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..27d8844b7b765e1d195f1a40580c5b3863637b12 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -0,0 +1,434 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Faster RCNN Keras-based Resnet V1 FPN Feature Extractor.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import resnet_v1 +from object_detection.utils import ops + + +_RESNET_MODEL_OUTPUT_LAYERS = { + 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block23_out', 'conv5_block3_out'], + 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', + 'conv4_block36_out', 'conv5_block3_out'], +} + + +class _ResnetFPN(tf.keras.layers.Layer): + """Construct Resnet FPN layer.""" + + def __init__(self, + backbone_classifier, + fpn_features_generator, + coarse_feature_layers, + pad_to_multiple, + fpn_min_level, + resnet_block_names, + base_fpn_max_level): + """Constructor. + + Args: + backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50', + 'resnet_v1_101', 'resnet_v1_152'. + fpn_features_generator: KerasFpnTopDownFeatureMaps that accepts a + dictionary of features and returns a ordered dictionary of fpn features. + coarse_feature_layers: Coarse feature layers for fpn. + pad_to_multiple: An integer multiple to pad input image. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet v1 layers. + resnet_block_names: a list of block names of resnet. + base_fpn_max_level: maximum level of fpn without coarse feature layers. + """ + super(_ResnetFPN, self).__init__() + self.classification_backbone = backbone_classifier + self.fpn_features_generator = fpn_features_generator + self.coarse_feature_layers = coarse_feature_layers + self.pad_to_multiple = pad_to_multiple + self._fpn_min_level = fpn_min_level + self._resnet_block_names = resnet_block_names + self._base_fpn_max_level = base_fpn_max_level + + def call(self, inputs): + """Create internal Resnet FPN layer. + + Args: + inputs: A [batch, height_out, width_out, channels] float32 tensor + representing a batch of images. + + Returns: + feature_maps: A list of tensors with shape [batch, height, width, depth] + represent extracted features. + """ + inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) + backbone_outputs = self.classification_backbone(inputs) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + feature_block_map = dict( + list(zip(self._resnet_block_names, backbone_outputs))) + fpn_input_image_features = [ + (feature_block, feature_block_map[feature_block]) + for feature_block in feature_block_list] + fpn_features = self.fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self.coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + + return feature_maps + + +class FasterRCNNResnetV1FpnKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features.""" + + def __init__(self, + is_training, + resnet_v1_base_model, + resnet_v1_base_model_name, + first_stage_features_stride, + conv_hyperparams, + batch_norm_trainable=True, + pad_to_multiple=32, + weight_decay=0.0, + fpn_min_level=2, + fpn_max_level=6, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. + first_stage_features_stride: See base class. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + batch_norm_trainable: See base class. + pad_to_multiple: An integer multiple to pad input image. + weight_decay: See base class. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet v1 layers. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + + Raises: + ValueError: If `first_stage_features_stride` is not 8 or 16. + """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') + + super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name + self._conv_hyperparams = conv_hyperparams + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._freeze_batchnorm = (not batch_norm_trainable) + self._pad_to_multiple = pad_to_multiple + + self._override_base_feature_extractor_hyperparams = \ + override_base_feature_extractor_hyperparams + self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def preprocess(self, resized_inputs): + """Faster R-CNN Resnet V1 preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def get_proposal_feature_extractor_model(self, name=None): + """Returns a model that extracts first stage RPN features. + + Extracts features using the Resnet v1 FPN network. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes preprocessed_inputs: + A [batch, height, width, channels] float32 tensor + representing a batch of images. + + And returns rpn_feature_map: + A list of tensors with shape [batch, height, width, depth] + """ + with tf.name_scope(name): + with tf.name_scope('ResnetV1FPN'): + full_resnet_v1_model = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=(self._conv_hyperparams if + self._override_base_feature_extractor_hyperparams + else None), + classes=None, + weights=None, + include_top=False) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[ + self._resnet_v1_base_model_name] + outputs = [full_resnet_v1_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + self.classification_backbone = tf.keras.Model( + inputs=full_resnet_v1_model.inputs, + outputs=outputs) + + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._additional_layer_depth, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + + # Construct coarse feature layers + for i in range(self._base_fpn_max_level, self._fpn_max_level): + layers = [] + layer_name = 'bottom_up_block{}'.format(i) + layers.append( + tf.keras.layers.Conv2D( + self._additional_layer_depth, + [3, 3], + padding='SAME', + strides=2, + name=layer_name + '_conv', + **self._conv_hyperparams.params())) + layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + self._conv_hyperparams.build_activation_layer( + name=layer_name)) + self._coarse_feature_layers.append(layers) + + feature_extractor_model = _ResnetFPN(self.classification_backbone, + self._fpn_features_generator, + self._coarse_feature_layers, + self._pad_to_multiple, + self._fpn_min_level, + self._resnet_block_names, + self._base_fpn_max_level) + return feature_extractor_model + + def get_box_classifier_feature_extractor_model(self, name=None): + """Returns a model that extracts second stage box classifier features. + + Construct two fully connected layer to extract the box classifier features. + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes proposal_feature_maps: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + + And returns proposal_classifier_features: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, 1, 1, 1024] + representing box classifier features for each proposal. + """ + with tf.name_scope(name): + with tf.name_scope('ResnetV1FPN'): + feature_extractor_model = tf.keras.models.Sequential([ + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(units=1024, activation='relu'), + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm)), + tf.keras.layers.Dense(units=1024, activation='relu'), + tf.keras.layers.Reshape((1, 1, 1024)) + ]) + return feature_extractor_model + + +class FasterRCNNResnet50FpnKerasFeatureExtractor( + FasterRCNNResnetV1FpnKerasFeatureExtractor): + """Faster RCNN with Resnet50 FPN feature extractor.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=True, + conv_hyperparams=None, + weight_decay=0.0, + fpn_min_level=2, + fpn_max_level=6, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + conv_hyperparams: See base class. + weight_decay: See base class. + fpn_min_level: See base class. + fpn_max_level: See base class. + additional_layer_depth: See base class. + override_base_feature_extractor_hyperparams: See base class. + """ + super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + conv_hyperparams=conv_hyperparams, + resnet_v1_base_model=resnet_v1.resnet_v1_50, + resnet_v1_base_model_name='resnet_v1_50', + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay, + fpn_min_level=fpn_min_level, + fpn_max_level=fpn_max_level, + additional_layer_depth=additional_layer_depth, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams + ) + + +class FasterRCNNResnet101FpnKerasFeatureExtractor( + FasterRCNNResnetV1FpnKerasFeatureExtractor): + """Faster RCNN with Resnet101 FPN feature extractor.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=True, + conv_hyperparams=None, + weight_decay=0.0, + fpn_min_level=2, + fpn_max_level=6, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + conv_hyperparams: See base class. + weight_decay: See base class. + fpn_min_level: See base class. + fpn_max_level: See base class. + additional_layer_depth: See base class. + override_base_feature_extractor_hyperparams: See base class. + """ + super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + conv_hyperparams=conv_hyperparams, + resnet_v1_base_model=resnet_v1.resnet_v1_101, + resnet_v1_base_model_name='resnet_v1_101', + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay, + fpn_min_level=fpn_min_level, + fpn_max_level=fpn_max_level, + additional_layer_depth=additional_layer_depth, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + +class FasterRCNNResnet152FpnKerasFeatureExtractor( + FasterRCNNResnetV1FpnKerasFeatureExtractor): + """Faster RCNN with Resnet152 FPN feature extractor.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + batch_norm_trainable=True, + conv_hyperparams=None, + weight_decay=0.0, + fpn_min_level=2, + fpn_max_level=6, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + batch_norm_trainable: See base class. + conv_hyperparams: See base class. + weight_decay: See base class. + fpn_min_level: See base class. + fpn_max_level: See base class. + additional_layer_depth: See base class. + override_base_feature_extractor_hyperparams: See base class. + """ + super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + conv_hyperparams=conv_hyperparams, + resnet_v1_base_model=resnet_v1.resnet_v1_152, + resnet_v1_base_model_name='resnet_v1_152', + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay, + fpn_min_level=fpn_min_level, + fpn_max_level=fpn_max_level, + additional_layer_depth=additional_layer_depth, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a0813cf65e873a4109fc8bc33add099c1ab87c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -0,0 +1,94 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_resnet_v1_fpn_keras_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_res_fpn +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Parse(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _build_feature_extractor(self): + return frcnn_res_fpn.FasterRCNNResnet50FpnKerasFeatureExtractor( + is_training=False, + conv_hyperparams=self._build_conv_hyperparams(), + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [2, 448, 448, 3], maxval=255, dtype=tf.float32) + rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shapes = [tf.shape(rpn_feature_map) + for rpn_feature_map in rpn_feature_maps] + + self.assertAllEqual(features_shapes[0].numpy(), [2, 112, 112, 256]) + self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256]) + self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256]) + self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256]) + self.assertAllEqual(features_shapes[4].numpy(), [2, 7, 7, 256]) + + def test_extract_proposal_features_half_size_input(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [2, 224, 224, 3], maxval=255, dtype=tf.float32) + rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shapes = [tf.shape(rpn_feature_map) + for rpn_feature_map in rpn_feature_maps] + + self.assertAllEqual(features_shapes[0].numpy(), [2, 56, 56, 256]) + self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256]) + self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) + self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256]) + self.assertAllEqual(features_shapes[4].numpy(), [2, 4, 4, 256]) + + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + proposal_feature_maps = tf.random_uniform( + [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + model = feature_extractor.get_box_classifier_feature_extractor_model( + name='TestScope') + proposal_classifier_features = ( + model(proposal_feature_maps)) + features_shape = tf.shape(proposal_classifier_features) + + self.assertAllEqual(features_shape.numpy(), [3, 1, 1, 1024]) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..87d15e968390446a4332e20e5b737e04d573d98a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators.py @@ -0,0 +1,825 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions to generate a list of feature maps based on image features. + +Provides several feature map generators that can be used to build object +detection feature extractors. + +Object detection feature extractors usually are built by stacking two components +- A base feature extractor such as Inception V3 and a feature map generator. +Feature map generators build on the base feature extractors and produce a list +of final feature maps. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import collections +import functools +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.utils import ops +from object_detection.utils import shape_utils + +# Activation bound used for TPU v1. Activations will be clipped to +# [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with +# use_bounded_activations enabled. +ACTIVATION_BOUND = 6.0 + + +def get_depth_fn(depth_multiplier, min_depth): + """Builds a callable to compute depth (output channels) of conv filters. + + Args: + depth_multiplier: a multiplier for the nominal depth. + min_depth: a lower bound on the depth of filters. + + Returns: + A callable that takes in a nominal depth and returns the depth to use. + """ + def multiply_depth(depth): + new_depth = int(depth * depth_multiplier) + return max(new_depth, min_depth) + return multiply_depth + + +def create_conv_block( + use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, + is_training, freeze_batchnorm, depth): + """Create Keras layers for depthwise & non-depthwise convolutions. + + Args: + use_depthwise: Whether to use depthwise separable conv instead of regular + conv. + kernel_size: A list of length 2: [kernel_height, kernel_width] of the + filters. Can be an int if both values are the same. + padding: One of 'VALID' or 'SAME'. + stride: A list of length 2: [stride_height, stride_width], specifying the + convolution stride. Can be an int if both strides are the same. + layer_name: String. The name of the layer. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + is_training: Indicates whether the feature generator is in training mode. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + depth: Depth of output feature maps. + + Returns: + A list of conv layers. + """ + layers = [] + if use_depthwise: + kwargs = conv_hyperparams.params() + # Both the regularizer and initializer apply to the depthwise layer, + # so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + layers.append( + tf.keras.layers.SeparableConv2D( + depth, [kernel_size, kernel_size], + depth_multiplier=1, + padding=padding, + strides=stride, + name=layer_name + '_depthwise_conv', + **kwargs)) + else: + layers.append(tf.keras.layers.Conv2D( + depth, + [kernel_size, kernel_size], + padding=padding, + strides=stride, + name=layer_name + '_conv', + **conv_hyperparams.params())) + layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + return layers + + +class KerasMultiResolutionFeatureMaps(tf.keras.Model): + """Generates multi resolution feature maps from input image features. + + A Keras model that generates multi-scale feature maps for detection as in the + SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. + + More specifically, when called on inputs it performs the following two tasks: + 1) If a layer name is provided in the configuration, returns that layer as a + feature map. + 2) If a layer name is left as an empty string, constructs a new feature map + based on the spatial shape and depth configuration. Note that the current + implementation only supports generating new layers using convolution of + stride 2 resulting in a spatial resolution reduction by a factor of 2. + By default convolution kernel size is set to 3, and it can be customized + by caller. + + An example of the configuration for Inception V3: + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + + When this feature generator object is called on input image_features: + Args: + image_features: A dictionary of handles to activation tensors from the + base feature extractor. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + + def __init__(self, + feature_map_layout, + depth_multiplier, + min_depth, + insert_1x1_conv, + is_training, + conv_hyperparams, + freeze_batchnorm, + name=None): + """Constructor. + + Args: + feature_map_layout: Dictionary of specifications for the feature map + layouts in the following format (Inception V2/V3 respectively): + { + 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + or + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + If 'from_layer' is specified, the specified feature map is directly used + as a box predictor layer, and the layer_depth is directly infered from + the feature map (instead of using the provided 'layer_depth' parameter). + In this case, our convention is to set 'layer_depth' to -1 for clarity. + Otherwise, if 'from_layer' is an empty string, then the box predictor + layer will be built from the previous layer using convolution + operations. Note that the current implementation only supports + generating new layers using convolutions of stride 2 (resulting in a + spatial resolution reduction by a factor of 2), and will be extended to + a more flexible design. Convolution kernel size is set to 3 by default, + and can be customized by 'conv_kernel_size' parameter (similarily, + 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). + The created convolution operation will be a normal 2D convolution by + default, and a depthwise convolution followed by 1x1 convolution if + 'use_depthwise' is set to True. + depth_multiplier: Depth multiplier for convolutional layers. + min_depth: Minimum depth for convolutional layers. + insert_1x1_conv: A boolean indicating whether an additional 1x1 + convolution should be inserted before shrinking the feature map. + is_training: Indicates whether the feature generator is in training mode. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(KerasMultiResolutionFeatureMaps, self).__init__(name=name) + + self.feature_map_layout = feature_map_layout + self.convolutions = [] + + depth_fn = get_depth_fn(depth_multiplier, min_depth) + + base_from_layer = '' + use_explicit_padding = False + if 'use_explicit_padding' in feature_map_layout: + use_explicit_padding = feature_map_layout['use_explicit_padding'] + use_depthwise = False + if 'use_depthwise' in feature_map_layout: + use_depthwise = feature_map_layout['use_depthwise'] + for index, from_layer in enumerate(feature_map_layout['from_layer']): + net = [] + layer_depth = feature_map_layout['layer_depth'][index] + conv_kernel_size = 3 + if 'conv_kernel_size' in feature_map_layout: + conv_kernel_size = feature_map_layout['conv_kernel_size'][index] + if from_layer: + base_from_layer = from_layer + else: + if insert_1x1_conv: + layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( + base_from_layer, index, depth_fn(layer_depth // 2)) + net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth // 2), + [1, 1], + padding='SAME', + strides=1, + name=layer_name + '_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + + layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( + base_from_layer, index, conv_kernel_size, conv_kernel_size, + depth_fn(layer_depth)) + stride = 2 + padding = 'SAME' + if use_explicit_padding: + padding = 'VALID' + # We define this function here while capturing the value of + # conv_kernel_size, to avoid holding a reference to the loop variable + # conv_kernel_size inside of a lambda function + def fixed_padding(features, kernel_size=conv_kernel_size): + return ops.fixed_padding(features, kernel_size) + net.append(tf.keras.layers.Lambda(fixed_padding)) + # TODO(rathodv): Add some utilities to simplify the creation of + # Depthwise & non-depthwise convolutions w/ normalization & activations + if use_depthwise: + net.append(tf.keras.layers.DepthwiseConv2D( + [conv_kernel_size, conv_kernel_size], + depth_multiplier=1, + padding=padding, + strides=stride, + name=layer_name + '_depthwise_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_depthwise_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name + '_depthwise')) + + net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1], + padding='SAME', + strides=1, + name=layer_name + '_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + + else: + net.append(tf.keras.layers.Conv2D( + depth_fn(layer_depth), + [conv_kernel_size, conv_kernel_size], + padding=padding, + strides=stride, + name=layer_name + '_conv', + **conv_hyperparams.params())) + net.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name=layer_name + '_batchnorm')) + net.append( + conv_hyperparams.build_activation_layer( + name=layer_name)) + + # Until certain bugs are fixed in checkpointable lists, + # this net must be appended only once it's been filled with layers + self.convolutions.append(net) + + def call(self, image_features): + """Generate the multi-resolution feature maps. + + Executed when calling the `.__call__` method on input. + + Args: + image_features: A dictionary of handles to activation tensors from the + base feature extractor. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + feature_maps = [] + feature_map_keys = [] + + for index, from_layer in enumerate(self.feature_map_layout['from_layer']): + if from_layer: + feature_map = image_features[from_layer] + feature_map_keys.append(from_layer) + else: + feature_map = feature_maps[-1] + for layer in self.convolutions[index]: + feature_map = layer(feature_map) + layer_name = self.convolutions[index][-1].name + feature_map_keys.append(layer_name) + feature_maps.append(feature_map) + return collections.OrderedDict( + [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) + + +def multi_resolution_feature_maps(feature_map_layout, depth_multiplier, + min_depth, insert_1x1_conv, image_features, + pool_residual=False): + """Generates multi resolution feature maps from input image features. + + Generates multi-scale feature maps for detection as in the SSD papers by + Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. + + More specifically, it performs the following two tasks: + 1) If a layer name is provided in the configuration, returns that layer as a + feature map. + 2) If a layer name is left as an empty string, constructs a new feature map + based on the spatial shape and depth configuration. Note that the current + implementation only supports generating new layers using convolution of + stride 2 resulting in a spatial resolution reduction by a factor of 2. + By default convolution kernel size is set to 3, and it can be customized + by caller. + + An example of the configuration for Inception V3: + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + + Args: + feature_map_layout: Dictionary of specifications for the feature map + layouts in the following format (Inception V2/V3 respectively): + { + 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + or + { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128] + } + If 'from_layer' is specified, the specified feature map is directly used + as a box predictor layer, and the layer_depth is directly infered from the + feature map (instead of using the provided 'layer_depth' parameter). In + this case, our convention is to set 'layer_depth' to -1 for clarity. + Otherwise, if 'from_layer' is an empty string, then the box predictor + layer will be built from the previous layer using convolution operations. + Note that the current implementation only supports generating new layers + using convolutions of stride 2 (resulting in a spatial resolution + reduction by a factor of 2), and will be extended to a more flexible + design. Convolution kernel size is set to 3 by default, and can be + customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' + should be set to -1 if 'from_layer' is specified). The created convolution + operation will be a normal 2D convolution by default, and a depthwise + convolution followed by 1x1 convolution if 'use_depthwise' is set to True. + depth_multiplier: Depth multiplier for convolutional layers. + min_depth: Minimum depth for convolutional layers. + insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution + should be inserted before shrinking the feature map. + image_features: A dictionary of handles to activation tensors from the + base feature extractor. + pool_residual: Whether to add an average pooling layer followed by a + residual connection between subsequent feature maps when the channel + depth match. For example, with option 'layer_depth': [-1, 512, 256, 256], + a pooling and residual layer is added between the third and forth feature + map. This option is better used with Weight Shared Convolution Box + Predictor when all feature maps have the same channel depth to encourage + more consistent features across multi-scale feature maps. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + + Raises: + ValueError: if the number entries in 'from_layer' and + 'layer_depth' do not match. + ValueError: if the generated layer does not have the same resolution + as specified. + """ + depth_fn = get_depth_fn(depth_multiplier, min_depth) + + feature_map_keys = [] + feature_maps = [] + base_from_layer = '' + use_explicit_padding = False + if 'use_explicit_padding' in feature_map_layout: + use_explicit_padding = feature_map_layout['use_explicit_padding'] + use_depthwise = False + if 'use_depthwise' in feature_map_layout: + use_depthwise = feature_map_layout['use_depthwise'] + for index, from_layer in enumerate(feature_map_layout['from_layer']): + layer_depth = feature_map_layout['layer_depth'][index] + conv_kernel_size = 3 + if 'conv_kernel_size' in feature_map_layout: + conv_kernel_size = feature_map_layout['conv_kernel_size'][index] + if from_layer: + feature_map = image_features[from_layer] + base_from_layer = from_layer + feature_map_keys.append(from_layer) + else: + pre_layer = feature_maps[-1] + pre_layer_depth = pre_layer.get_shape().as_list()[3] + intermediate_layer = pre_layer + if insert_1x1_conv: + layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( + base_from_layer, index, depth_fn(layer_depth // 2)) + intermediate_layer = slim.conv2d( + pre_layer, + depth_fn(layer_depth // 2), [1, 1], + padding='SAME', + stride=1, + scope=layer_name) + layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( + base_from_layer, index, conv_kernel_size, conv_kernel_size, + depth_fn(layer_depth)) + stride = 2 + padding = 'SAME' + if use_explicit_padding: + padding = 'VALID' + intermediate_layer = ops.fixed_padding( + intermediate_layer, conv_kernel_size) + if use_depthwise: + feature_map = slim.separable_conv2d( + intermediate_layer, + None, [conv_kernel_size, conv_kernel_size], + depth_multiplier=1, + padding=padding, + stride=stride, + scope=layer_name + '_depthwise') + feature_map = slim.conv2d( + feature_map, + depth_fn(layer_depth), [1, 1], + padding='SAME', + stride=1, + scope=layer_name) + if pool_residual and pre_layer_depth == depth_fn(layer_depth): + feature_map += slim.avg_pool2d( + pre_layer, [3, 3], + padding='SAME', + stride=2, + scope=layer_name + '_pool') + else: + feature_map = slim.conv2d( + intermediate_layer, + depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], + padding=padding, + stride=stride, + scope=layer_name) + feature_map_keys.append(layer_name) + feature_maps.append(feature_map) + return collections.OrderedDict( + [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) + + +class KerasFpnTopDownFeatureMaps(tf.keras.Model): + """Generates Keras based `top-down` feature maps for Feature Pyramid Networks. + + See https://arxiv.org/abs/1612.03144 for details. + """ + + def __init__(self, + num_levels, + depth, + is_training, + conv_hyperparams, + freeze_batchnorm, + use_depthwise=False, + use_explicit_padding=False, + use_bounded_activations=False, + use_native_resize_op=False, + scope=None, + name=None): + """Constructor. + + Args: + num_levels: the number of image features. + depth: depth of output feature maps. + is_training: Indicates whether the feature generator is in training mode. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_depthwise: whether to use depthwise separable conv instead of regular + conv. + use_explicit_padding: whether to use explicit padding. + use_bounded_activations: Whether or not to clip activations to range + [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend + themselves to quantized inference. + use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op + for the upsampling process instead of reshape and broadcasting + implementation. + scope: A scope name to wrap this op under. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(KerasFpnTopDownFeatureMaps, self).__init__(name=name) + + self.scope = scope if scope else 'top_down' + self.top_layers = [] + self.residual_blocks = [] + self.top_down_blocks = [] + self.reshape_blocks = [] + self.conv_layers = [] + + padding = 'VALID' if use_explicit_padding else 'SAME' + stride = 1 + kernel_size = 3 + def clip_by_value(features): + return tf.clip_by_value(features, -ACTIVATION_BOUND, ACTIVATION_BOUND) + + # top layers + self.top_layers.append(tf.keras.layers.Conv2D( + depth, [1, 1], strides=stride, padding=padding, + name='projection_%d' % num_levels, + **conv_hyperparams.params(use_bias=True))) + if use_bounded_activations: + self.top_layers.append(tf.keras.layers.Lambda( + clip_by_value, name='clip_by_value')) + + for level in reversed(list(range(num_levels - 1))): + # to generate residual from image features + residual_net = [] + # to preprocess top_down (the image feature map from last layer) + top_down_net = [] + # to reshape top_down according to residual if necessary + reshaped_residual = [] + # to apply convolution layers to feature map + conv_net = [] + + # residual block + residual_net.append(tf.keras.layers.Conv2D( + depth, [1, 1], padding=padding, strides=1, + name='projection_%d' % (level + 1), + **conv_hyperparams.params(use_bias=True))) + if use_bounded_activations: + residual_net.append(tf.keras.layers.Lambda( + clip_by_value, name='clip_by_value')) + + # top-down block + # TODO (b/128922690): clean-up of ops.nearest_neighbor_upsampling + if use_native_resize_op: + def resize_nearest_neighbor(image): + image_shape = shape_utils.combined_static_and_dynamic_shape(image) + return tf.image.resize_nearest_neighbor( + image, [image_shape[1] * 2, image_shape[2] * 2]) + top_down_net.append(tf.keras.layers.Lambda( + resize_nearest_neighbor, name='nearest_neighbor_upsampling')) + else: + def nearest_neighbor_upsampling(image): + return ops.nearest_neighbor_upsampling(image, scale=2) + top_down_net.append(tf.keras.layers.Lambda( + nearest_neighbor_upsampling, name='nearest_neighbor_upsampling')) + + # reshape block + if use_explicit_padding: + def reshape(inputs): + residual_shape = tf.shape(inputs[0]) + return inputs[1][:, :residual_shape[1], :residual_shape[2], :] + reshaped_residual.append( + tf.keras.layers.Lambda(reshape, name='reshape')) + + # down layers + if use_bounded_activations: + conv_net.append(tf.keras.layers.Lambda( + clip_by_value, name='clip_by_value')) + + if use_explicit_padding: + def fixed_padding(features, kernel_size=kernel_size): + return ops.fixed_padding(features, kernel_size) + conv_net.append(tf.keras.layers.Lambda( + fixed_padding, name='fixed_padding')) + + layer_name = 'smoothing_%d' % (level + 1) + conv_block = create_conv_block( + use_depthwise, kernel_size, padding, stride, layer_name, + conv_hyperparams, is_training, freeze_batchnorm, depth) + conv_net.extend(conv_block) + + self.residual_blocks.append(residual_net) + self.top_down_blocks.append(top_down_net) + self.reshape_blocks.append(reshaped_residual) + self.conv_layers.append(conv_net) + + def call(self, image_features): + """Generate the multi-resolution feature maps. + + Executed when calling the `.__call__` method on input. + + Args: + image_features: list of tuples of (tensor_name, image_feature_tensor). + Spatial resolutions of succesive tensors must reduce exactly by a factor + of 2. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + output_feature_maps_list = [] + output_feature_map_keys = [] + + with tf.name_scope(self.scope): + top_down = image_features[-1][1] + for layer in self.top_layers: + top_down = layer(top_down) + output_feature_maps_list.append(top_down) + output_feature_map_keys.append('top_down_%s' % image_features[-1][0]) + + num_levels = len(image_features) + for index, level in enumerate(reversed(list(range(num_levels - 1)))): + residual = image_features[level][1] + top_down = output_feature_maps_list[-1] + for layer in self.residual_blocks[index]: + residual = layer(residual) + for layer in self.top_down_blocks[index]: + top_down = layer(top_down) + for layer in self.reshape_blocks[index]: + top_down = layer([residual, top_down]) + top_down += residual + for layer in self.conv_layers[index]: + top_down = layer(top_down) + output_feature_maps_list.append(top_down) + output_feature_map_keys.append('top_down_%s' % image_features[level][0]) + return collections.OrderedDict(reversed( + list(zip(output_feature_map_keys, output_feature_maps_list)))) + + +def fpn_top_down_feature_maps(image_features, + depth, + use_depthwise=False, + use_explicit_padding=False, + use_bounded_activations=False, + scope=None, + use_native_resize_op=False): + """Generates `top-down` feature maps for Feature Pyramid Networks. + + See https://arxiv.org/abs/1612.03144 for details. + + Args: + image_features: list of tuples of (tensor_name, image_feature_tensor). + Spatial resolutions of succesive tensors must reduce exactly by a factor + of 2. + depth: depth of output feature maps. + use_depthwise: whether to use depthwise separable conv instead of regular + conv. + use_explicit_padding: whether to use explicit padding. + use_bounded_activations: Whether or not to clip activations to range + [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend + themselves to quantized inference. + scope: A scope name to wrap this op under. + use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for + the upsampling process instead of reshape and broadcasting implementation. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + """ + with tf.name_scope(scope, 'top_down'): + num_levels = len(image_features) + output_feature_maps_list = [] + output_feature_map_keys = [] + padding = 'VALID' if use_explicit_padding else 'SAME' + kernel_size = 3 + with slim.arg_scope( + [slim.conv2d, slim.separable_conv2d], padding=padding, stride=1): + top_down = slim.conv2d( + image_features[-1][1], + depth, [1, 1], activation_fn=None, normalizer_fn=None, + scope='projection_%d' % num_levels) + if use_bounded_activations: + top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, + ACTIVATION_BOUND) + output_feature_maps_list.append(top_down) + output_feature_map_keys.append( + 'top_down_%s' % image_features[-1][0]) + + for level in reversed(list(range(num_levels - 1))): + if use_native_resize_op: + with tf.name_scope('nearest_neighbor_upsampling'): + top_down_shape = shape_utils.combined_static_and_dynamic_shape( + top_down) + top_down = tf.image.resize_nearest_neighbor( + top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2]) + else: + top_down = ops.nearest_neighbor_upsampling(top_down, scale=2) + residual = slim.conv2d( + image_features[level][1], depth, [1, 1], + activation_fn=None, normalizer_fn=None, + scope='projection_%d' % (level + 1)) + if use_bounded_activations: + residual = tf.clip_by_value(residual, -ACTIVATION_BOUND, + ACTIVATION_BOUND) + if use_explicit_padding: + # slice top_down to the same shape as residual + residual_shape = tf.shape(residual) + top_down = top_down[:, :residual_shape[1], :residual_shape[2], :] + top_down += residual + if use_bounded_activations: + top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, + ACTIVATION_BOUND) + if use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + pre_output = top_down + if use_explicit_padding: + pre_output = ops.fixed_padding(pre_output, kernel_size) + output_feature_maps_list.append(conv_op( + pre_output, + depth, [kernel_size, kernel_size], + scope='smoothing_%d' % (level + 1))) + output_feature_map_keys.append('top_down_%s' % image_features[level][0]) + return collections.OrderedDict(reversed( + list(zip(output_feature_map_keys, output_feature_maps_list)))) + + +def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers, + image_features, replace_pool_with_conv=False): + """Generates pooling pyramid feature maps. + + The pooling pyramid feature maps is motivated by + multi_resolution_feature_maps. The main difference are that it is simpler and + reduces the number of free parameters. + + More specifically: + - Instead of using convolutions to shrink the feature map, it uses max + pooling, therefore totally gets rid of the parameters in convolution. + - By pooling feature from larger map up to a single cell, it generates + features in the same feature space. + - Instead of independently making box predictions from individual maps, it + shares the same classifier across different feature maps, therefore reduces + the "mis-calibration" across different scales. + + See go/ppn-detection for more details. + + Args: + base_feature_map_depth: Depth of the base feature before the max pooling. + num_layers: Number of layers used to make predictions. They are pooled + from the base feature. + image_features: A dictionary of handles to activation tensors from the + feature extractor. + replace_pool_with_conv: Whether or not to replace pooling operations with + convolutions in the PPN. Default is False. + + Returns: + feature_maps: an OrderedDict mapping keys (feature map names) to + tensors where each tensor has shape [batch, height_i, width_i, depth_i]. + Raises: + ValueError: image_features does not contain exactly one entry + """ + if len(image_features) != 1: + raise ValueError('image_features should be a dictionary of length 1.') + image_features = image_features[list(image_features.keys())[0]] + + feature_map_keys = [] + feature_maps = [] + feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth + if base_feature_map_depth > 0: + image_features = slim.conv2d( + image_features, + base_feature_map_depth, + [1, 1], # kernel size + padding='SAME', stride=1, scope=feature_map_key) + # Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for + # TPU v1 compatibility. Without the following dummy op, TPU runtime + # compiler will combine the convolution with one max-pooling below into a + # single cycle, so getting the conv2d feature becomes impossible. + image_features = slim.max_pool2d( + image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key) + feature_map_keys.append(feature_map_key) + feature_maps.append(image_features) + feature_map = image_features + if replace_pool_with_conv: + with slim.arg_scope([slim.conv2d], padding='SAME', stride=2): + for i in range(num_layers - 1): + feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i, + base_feature_map_depth) + feature_map = slim.conv2d( + feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key) + feature_map_keys.append(feature_map_key) + feature_maps.append(feature_map) + else: + with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2): + for i in range(num_layers - 1): + feature_map_key = 'MaxPool2d_%d_2x2' % i + feature_map = slim.max_pool2d( + feature_map, [2, 2], padding='SAME', scope=feature_map_key) + feature_map_keys.append(feature_map_key) + feature_maps.append(feature_map) + return collections.OrderedDict( + [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0091d6e5428206944837ca1186aef6ed80387189 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators_test.py new file mode 100644 index 0000000000000000000000000000000000000000..951e7760bd8a42afb19f61b6c6bc1c1f744d74dd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/feature_map_generators_test.py @@ -0,0 +1,842 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for feature map generators.""" +import unittest +from absl.testing import parameterized + +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models import feature_map_generators +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import test_utils +from object_detection.utils import tf_version + +INCEPTION_V2_LAYOUT = { + 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 256], + 'anchor_strides': [16, 32, 64, -1, -1, -1], + 'layer_target_norm': [20.0, -1, -1, -1, -1, -1], +} + +INCEPTION_V3_LAYOUT = { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], + 'layer_depth': [-1, -1, -1, 512, 256, 128], + 'anchor_strides': [16, 32, 64, -1, -1, -1], + 'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3] +} + +EMBEDDED_SSD_MOBILENET_V1_LAYOUT = { + 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''], + 'layer_depth': [-1, -1, 512, 256, 256], + 'conv_kernel_size': [-1, -1, 3, 3, 2], +} + +SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = { + 'from_layer': ['Conv2d_13_pointwise', '', '', ''], + 'layer_depth': [-1, 256, 256, 256], +} + + +class MultiResolutionFeatureMapGeneratorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _build_feature_map_generator(self, feature_map_layout, + pool_residual=False): + if tf_version.is_tf2(): + return feature_map_generators.KerasMultiResolutionFeatureMaps( + feature_map_layout=feature_map_layout, + depth_multiplier=1, + min_depth=32, + insert_1x1_conv=True, + freeze_batchnorm=False, + is_training=True, + conv_hyperparams=self._build_conv_hyperparams(), + name='FeatureMaps' + ) + else: + def feature_map_generator(image_features): + return feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=1, + min_depth=32, + insert_1x1_conv=True, + image_features=image_features, + pool_residual=pool_residual) + return feature_map_generator + + def test_get_expected_feature_map_shapes_with_inception_v2(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=INCEPTION_V2_LAYOUT) + def graph_fn(): + feature_maps = feature_map_generator(image_features) + return feature_maps + + expected_feature_map_shapes = { + 'Mixed_3c': (4, 28, 28, 256), + 'Mixed_4c': (4, 14, 14, 576), + 'Mixed_5c': (4, 7, 7, 1024), + 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + layout_copy = INCEPTION_V2_LAYOUT.copy() + layout_copy['use_depthwise'] = True + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=layout_copy) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Mixed_3c': (4, 28, 28, 256), + 'Mixed_4c': (4, 14, 14, 576), + 'Mixed_5c': (4, 7, 7, 1024), + 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_use_explicit_padding(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + layout_copy = INCEPTION_V2_LAYOUT.copy() + layout_copy['use_explicit_padding'] = True + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=layout_copy, + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Mixed_3c': (4, 28, 28, 256), + 'Mixed_4c': (4, 14, 14, 576), + 'Mixed_5c': (4, 7, 7, 1024), + 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_inception_v3(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32), + 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32), + 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32) + } + + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=INCEPTION_V3_LAYOUT, + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Mixed_5d': (4, 35, 35, 256), + 'Mixed_6e': (4, 17, 17, 576), + 'Mixed_7c': (4, 8, 8, 1024), + 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512), + 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256), + 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512], + dtype=tf.float32), + 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], + dtype=tf.float32), + } + + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT, + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Conv2d_11_pointwise': (4, 16, 16, 512), + 'Conv2d_13_pointwise': (4, 8, 8, 1024), + 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512), + 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256), + 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024], + dtype=tf.float32), + } + + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT, + pool_residual=True + ) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'Conv2d_13_pointwise': (4, 8, 8, 1024), + 'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256), + 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256), + 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)} + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_variable_names_with_inception_v2(self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=INCEPTION_V2_LAYOUT, + ) + def graph_fn(): + return feature_map_generator(image_features) + + self.execute(graph_fn, [], g) + expected_slim_variables = set([ + 'Mixed_5c_1_Conv2d_3_1x1_256/weights', + 'Mixed_5c_1_Conv2d_3_1x1_256/biases', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases', + 'Mixed_5c_1_Conv2d_4_1x1_128/weights', + 'Mixed_5c_1_Conv2d_4_1x1_128/biases', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases', + 'Mixed_5c_1_Conv2d_5_1x1_128/weights', + 'Mixed_5c_1_Conv2d_5_1x1_128/biases', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias', + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias', + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias', + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + def test_get_expected_variable_names_with_inception_v2_use_depthwise( + self): + with test_utils.GraphContextOrNone() as g: + image_features = { + 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32), + 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32), + 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32) + } + layout_copy = INCEPTION_V2_LAYOUT.copy() + layout_copy['use_depthwise'] = True + feature_map_generator = self._build_feature_map_generator( + feature_map_layout=layout_copy, + ) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + + expected_slim_variables = set([ + 'Mixed_5c_1_Conv2d_3_1x1_256/weights', + 'Mixed_5c_1_Conv2d_3_1x1_256/biases', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/depthwise_weights', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/biases', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights', + 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases', + 'Mixed_5c_1_Conv2d_4_1x1_128/weights', + 'Mixed_5c_1_Conv2d_4_1x1_128/biases', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/depthwise_weights', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/biases', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases', + 'Mixed_5c_1_Conv2d_5_1x1_128/weights', + 'Mixed_5c_1_Conv2d_5_1x1_128/biases', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/depthwise_weights', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/biases', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights', + 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias', + ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/' + 'depthwise_kernel'), + ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/' + 'bias'), + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias', + ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/' + 'depthwise_kernel'), + ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/' + 'bias'), + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel', + 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias', + ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/' + 'depthwise_kernel'), + ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/' + 'bias'), + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel', + 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias', + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + +@parameterized.parameters({'use_native_resize_op': True}, + {'use_native_resize_op': False}) +class FPNFeatureMapGeneratorTest(test_case.TestCase, parameterized.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _build_feature_map_generator( + self, image_features, depth, use_bounded_activations=False, + use_native_resize_op=False, use_explicit_padding=False, + use_depthwise=False): + if tf_version.is_tf2(): + return feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=len(image_features), + depth=depth, + is_training=True, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + use_depthwise=use_depthwise, + use_explicit_padding=use_explicit_padding, + use_bounded_activations=use_bounded_activations, + use_native_resize_op=use_native_resize_op, + scope=None, + name='FeatureMaps', + ) + else: + def feature_map_generator(image_features): + return feature_map_generators.fpn_top_down_feature_maps( + image_features=image_features, + depth=depth, + use_depthwise=use_depthwise, + use_explicit_padding=use_explicit_padding, + use_bounded_activations=use_bounded_activations, + use_native_resize_op=use_native_resize_op) + return feature_map_generator + + def test_get_expected_feature_map_shapes( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'top_down_block2': (4, 8, 8, 128), + 'top_down_block3': (4, 4, 4, 128), + 'top_down_block4': (4, 2, 2, 128), + 'top_down_block5': (4, 1, 1, 128) + } + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_feature_map_shapes_with_explicit_padding( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_explicit_padding=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'top_down_block2': (4, 8, 8, 128), + 'top_down_block3': (4, 4, 4, 128), + 'top_down_block4': (4, 2, 2, 128), + 'top_down_block5': (4, 1, 1, 128) + } + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + def test_use_bounded_activations_add_operations( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [('block2', + tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', + tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', + tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', + tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_bounded_activations=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + expected_added_operations = dict.fromkeys([ + 'top_down/clip_by_value', 'top_down/clip_by_value_1', + 'top_down/clip_by_value_2', 'top_down/clip_by_value_3', + 'top_down/clip_by_value_4', 'top_down/clip_by_value_5', + 'top_down/clip_by_value_6' + ]) + op_names = {op.name: None for op in g.get_operations()} + self.assertDictContainsSubset(expected_added_operations, op_names) + + @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') + def test_use_bounded_activations_clip_value( + self, use_native_resize_op): + tf_graph = tf.Graph() + with tf_graph.as_default(): + image_features = [ + ('block2', 255 * tf.ones([4, 8, 8, 256], dtype=tf.float32)), + ('block3', 255 * tf.ones([4, 4, 4, 256], dtype=tf.float32)), + ('block4', 255 * tf.ones([4, 2, 2, 256], dtype=tf.float32)), + ('block5', 255 * tf.ones([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_bounded_activations=True, + use_native_resize_op=use_native_resize_op) + feature_map_generator(image_features) + + expected_clip_by_value_ops = [ + 'top_down/clip_by_value', 'top_down/clip_by_value_1', + 'top_down/clip_by_value_2', 'top_down/clip_by_value_3', + 'top_down/clip_by_value_4', 'top_down/clip_by_value_5', + 'top_down/clip_by_value_6' + ] + + # Gathers activation tensors before and after clip_by_value operations. + activations = {} + for clip_by_value_op in expected_clip_by_value_ops: + clip_input_tensor = tf_graph.get_operation_by_name( + '{}/Minimum'.format(clip_by_value_op)).inputs[0] + clip_output_tensor = tf_graph.get_tensor_by_name( + '{}:0'.format(clip_by_value_op)) + activations.update({ + 'before_{}'.format(clip_by_value_op): clip_input_tensor, + 'after_{}'.format(clip_by_value_op): clip_output_tensor, + }) + + expected_lower_bound = -feature_map_generators.ACTIVATION_BOUND + expected_upper_bound = feature_map_generators.ACTIVATION_BOUND + init_op = tf.global_variables_initializer() + with self.test_session() as session: + session.run(init_op) + activations_output = session.run(activations) + for clip_by_value_op in expected_clip_by_value_ops: + # Before clipping, activations are beyound the expected bound because + # of large input image_features values. + activations_before_clipping = ( + activations_output['before_{}'.format(clip_by_value_op)]) + before_clipping_lower_bound = np.amin(activations_before_clipping) + before_clipping_upper_bound = np.amax(activations_before_clipping) + self.assertLessEqual(before_clipping_lower_bound, + expected_lower_bound) + self.assertGreaterEqual(before_clipping_upper_bound, + expected_upper_bound) + + # After clipping, activations are bounded as expectation. + activations_after_clipping = ( + activations_output['after_{}'.format(clip_by_value_op)]) + after_clipping_lower_bound = np.amin(activations_after_clipping) + after_clipping_upper_bound = np.amax(activations_after_clipping) + self.assertGreaterEqual(after_clipping_lower_bound, + expected_lower_bound) + self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound) + + def test_get_expected_feature_map_shapes_with_depthwise( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_depthwise=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + + expected_feature_map_shapes = { + 'top_down_block2': (4, 8, 8, 128), + 'top_down_block3': (4, 4, 4, 128), + 'top_down_block4': (4, 2, 2, 128), + 'top_down_block5': (4, 1, 1, 128) + } + out_feature_maps = self.execute(graph_fn, [], g) + out_feature_map_shapes = dict( + (key, value.shape) for key, value in out_feature_maps.items()) + self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes) + + def test_get_expected_variable_names( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + expected_slim_variables = set([ + 'projection_1/weights', + 'projection_1/biases', + 'projection_2/weights', + 'projection_2/biases', + 'projection_3/weights', + 'projection_3/biases', + 'projection_4/weights', + 'projection_4/biases', + 'smoothing_1/weights', + 'smoothing_1/biases', + 'smoothing_2/weights', + 'smoothing_2/biases', + 'smoothing_3/weights', + 'smoothing_3/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/top_down/projection_1/kernel', + 'FeatureMaps/top_down/projection_1/bias', + 'FeatureMaps/top_down/projection_2/kernel', + 'FeatureMaps/top_down/projection_2/bias', + 'FeatureMaps/top_down/projection_3/kernel', + 'FeatureMaps/top_down/projection_3/bias', + 'FeatureMaps/top_down/projection_4/kernel', + 'FeatureMaps/top_down/projection_4/bias', + 'FeatureMaps/top_down/smoothing_1_conv/kernel', + 'FeatureMaps/top_down/smoothing_1_conv/bias', + 'FeatureMaps/top_down/smoothing_2_conv/kernel', + 'FeatureMaps/top_down/smoothing_2_conv/bias', + 'FeatureMaps/top_down/smoothing_3_conv/kernel', + 'FeatureMaps/top_down/smoothing_3_conv/bias' + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + def test_get_expected_variable_names_with_depthwise( + self, use_native_resize_op): + with test_utils.GraphContextOrNone() as g: + image_features = [ + ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), + ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), + ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), + ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) + ] + feature_map_generator = self._build_feature_map_generator( + image_features=image_features, + depth=128, + use_depthwise=True, + use_native_resize_op=use_native_resize_op) + def graph_fn(): + return feature_map_generator(image_features) + self.execute(graph_fn, [], g) + expected_slim_variables = set([ + 'projection_1/weights', + 'projection_1/biases', + 'projection_2/weights', + 'projection_2/biases', + 'projection_3/weights', + 'projection_3/biases', + 'projection_4/weights', + 'projection_4/biases', + 'smoothing_1/depthwise_weights', + 'smoothing_1/pointwise_weights', + 'smoothing_1/biases', + 'smoothing_2/depthwise_weights', + 'smoothing_2/pointwise_weights', + 'smoothing_2/biases', + 'smoothing_3/depthwise_weights', + 'smoothing_3/pointwise_weights', + 'smoothing_3/biases', + ]) + + expected_keras_variables = set([ + 'FeatureMaps/top_down/projection_1/kernel', + 'FeatureMaps/top_down/projection_1/bias', + 'FeatureMaps/top_down/projection_2/kernel', + 'FeatureMaps/top_down/projection_2/bias', + 'FeatureMaps/top_down/projection_3/kernel', + 'FeatureMaps/top_down/projection_3/bias', + 'FeatureMaps/top_down/projection_4/kernel', + 'FeatureMaps/top_down/projection_4/bias', + 'FeatureMaps/top_down/smoothing_1_depthwise_conv/depthwise_kernel', + 'FeatureMaps/top_down/smoothing_1_depthwise_conv/pointwise_kernel', + 'FeatureMaps/top_down/smoothing_1_depthwise_conv/bias', + 'FeatureMaps/top_down/smoothing_2_depthwise_conv/depthwise_kernel', + 'FeatureMaps/top_down/smoothing_2_depthwise_conv/pointwise_kernel', + 'FeatureMaps/top_down/smoothing_2_depthwise_conv/bias', + 'FeatureMaps/top_down/smoothing_3_depthwise_conv/depthwise_kernel', + 'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel', + 'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias' + ]) + + if tf_version.is_tf2(): + actual_variable_set = set( + [var.name.split(':')[0] for var in feature_map_generator.variables]) + self.assertSetEqual(expected_keras_variables, actual_variable_set) + else: + with g.as_default(): + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertSetEqual(expected_slim_variables, actual_variable_set) + + +class GetDepthFunctionTest(tf.test.TestCase): + + def test_return_min_depth_when_multiplier_is_small(self): + depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5, + min_depth=16) + self.assertEqual(depth_fn(16), 16) + + def test_return_correct_depth_with_multiplier(self): + depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5, + min_depth=16) + self.assertEqual(depth_fn(64), 32) + + +@parameterized.parameters( + {'replace_pool_with_conv': False}, + {'replace_pool_with_conv': True}, +) +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase): + + def test_get_expected_feature_map_shapes(self, replace_pool_with_conv): + image_features = { + 'image_features': tf.random_uniform([4, 19, 19, 1024]) + } + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=1024, + num_layers=6, + image_features=image_features, + replace_pool_with_conv=replace_pool_with_conv) + + expected_pool_feature_map_shapes = { + 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024), + 'MaxPool2d_0_2x2': (4, 10, 10, 1024), + 'MaxPool2d_1_2x2': (4, 5, 5, 1024), + 'MaxPool2d_2_2x2': (4, 3, 3, 1024), + 'MaxPool2d_3_2x2': (4, 2, 2, 1024), + 'MaxPool2d_4_2x2': (4, 1, 1, 1024), + } + + expected_conv_feature_map_shapes = { + 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024), + 'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024), + 'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024), + 'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024), + 'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024), + 'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024), + } + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + out_feature_maps = sess.run(feature_maps) + out_feature_map_shapes = {key: value.shape + for key, value in out_feature_maps.items()} + if replace_pool_with_conv: + self.assertDictEqual(expected_conv_feature_map_shapes, + out_feature_map_shapes) + else: + self.assertDictEqual(expected_pool_feature_map_shapes, + out_feature_map_shapes) + + def test_get_expected_variable_names(self, replace_pool_with_conv): + image_features = { + 'image_features': tf.random_uniform([4, 19, 19, 1024]) + } + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=1024, + num_layers=6, + image_features=image_features, + replace_pool_with_conv=replace_pool_with_conv) + + expected_pool_variables = set([ + 'Base_Conv2d_1x1_1024/weights', + 'Base_Conv2d_1x1_1024/biases', + ]) + + expected_conv_variables = set([ + 'Base_Conv2d_1x1_1024/weights', + 'Base_Conv2d_1x1_1024/biases', + 'Conv2d_0_3x3_s2_1024/weights', + 'Conv2d_0_3x3_s2_1024/biases', + 'Conv2d_1_3x3_s2_1024/weights', + 'Conv2d_1_3x3_s2_1024/biases', + 'Conv2d_2_3x3_s2_1024/weights', + 'Conv2d_2_3x3_s2_1024/biases', + 'Conv2d_3_3x3_s2_1024/weights', + 'Conv2d_3_3x3_s2_1024/biases', + 'Conv2d_4_3x3_s2_1024/weights', + 'Conv2d_4_3x3_s2_1024/biases', + ]) + + init_op = tf.global_variables_initializer() + with self.test_session() as sess: + sess.run(init_op) + sess.run(feature_maps) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + if replace_pool_with_conv: + self.assertSetEqual(expected_conv_variables, actual_variable_set) + else: + self.assertSetEqual(expected_pool_variables, actual_variable_set) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/base_models/original_mobilenet_v2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/base_models/original_mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7f95724e86c422b568921b77dcf094d901d11f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/base_models/original_mobilenet_v2.py @@ -0,0 +1,478 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""MobileNet v2 models for Keras. + +MobileNetV2 is a general architecture and can be used for multiple use cases. +Depending on the use case, it can use different input layer size and +different width factors. This allows different width models to reduce +the number of multiply-adds and thereby +reduce inference cost on mobile devices. + +MobileNetV2 is very similar to the original MobileNet, +except that it uses inverted residual blocks with +bottlenecking features. It has a drastically lower +parameter count than the original MobileNet. +MobileNets support any input size greater +than 32 x 32, with larger image sizes +offering better performance. + +The number of parameters and number of multiply-adds +can be modified by using the `alpha` parameter, +which increases/decreases the number of filters in each layer. +By altering the image size and `alpha` parameter, +all 22 models from the paper can be built, with ImageNet weights provided. + +The paper demonstrates the performance of MobileNets using `alpha` values of +1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4 + +For each of these `alpha` values, weights for 5 different input image sizes +are provided (224, 192, 160, 128, and 96). + + +The following table describes the performance of +MobileNet on various input sizes: +------------------------------------------------------------------------ +MACs stands for Multiply Adds + + Classification Checkpoint| MACs (M) | Parameters (M)| Top 1 Acc| Top 5 Acc +--------------------------|------------|---------------|---------|----|------- +| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 | +| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 | +| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 | +| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 | +| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 | +| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 | +| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 | +| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 | +| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 | +| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 | +| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 | +| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 | +| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 | +| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 | +| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 | +| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 | +| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 | +| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 | +| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 | +| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 | +| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 | +| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 | + +The weights for all 16 models are obtained and translated from the Tensorflow +checkpoints from TensorFlow checkpoints found at +https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md + +# Reference +This file contains building code for MobileNetV2, based on +[MobileNetV2: Inverted Residuals and Linear Bottlenecks] +(https://arxiv.org/abs/1801.04381) + +Tests comparing this model to the existing Tensorflow model can be +found at +[mobilenet_v2_keras](https://github.com/JonathanCMitchell/mobilenet_v2_keras) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import warnings +import numpy as np +import tensorflow.compat.v1 as tf + +Model = tf.keras.Model +Input = tf.keras.layers.Input +Activation = tf.keras.layers.Activation +BatchNormalization = tf.keras.layers.BatchNormalization +Conv2D = tf.keras.layers.Conv2D +DepthwiseConv2D = tf.keras.layers.DepthwiseConv2D +GlobalAveragePooling2D = tf.keras.layers.GlobalAveragePooling2D +Add = tf.keras.layers.Add +Dense = tf.keras.layers.Dense +K = tf.keras.Backend + + +def relu6(x): + return K.relu(x, max_value=6) + + +def _obtain_input_shape( + input_shape, + default_size, + min_size, + data_format, + require_flatten): + """Internal utility to compute/validate an ImageNet model's input shape. + + Arguments: + input_shape: either None (will return the default network input shape), + or a user-provided shape to be validated. + default_size: default input width/height for the model. + min_size: minimum input width/height accepted by the model. + data_format: image data format to use. + require_flatten: whether the model is expected to + be linked to a classifier via a Flatten layer. + + Returns: + An integer shape tuple (may include None entries). + + Raises: + ValueError: in case of invalid argument values. + """ + if input_shape and len(input_shape) == 3: + if data_format == 'channels_first': + if input_shape[0] not in {1, 3}: + warnings.warn( + 'This model usually expects 1 or 3 input channels. ' + 'However, it was passed an input_shape with ' + + str(input_shape[0]) + ' input channels.') + default_shape = (input_shape[0], default_size, default_size) + else: + if input_shape[-1] not in {1, 3}: + warnings.warn( + 'This model usually expects 1 or 3 input channels. ' + 'However, it was passed an input_shape with ' + + str(input_shape[-1]) + ' input channels.') + default_shape = (default_size, default_size, input_shape[-1]) + else: + if data_format == 'channels_first': + default_shape = (3, default_size, default_size) + else: + default_shape = (default_size, default_size, 3) + if input_shape: + if data_format == 'channels_first': + if input_shape is not None: + if len(input_shape) != 3: + raise ValueError( + '`input_shape` must be a tuple of three integers.') + if ((input_shape[1] is not None and input_shape[1] < min_size) or + (input_shape[2] is not None and input_shape[2] < min_size)): + raise ValueError('Input size must be at least ' + + str(min_size) + 'x' + str(min_size) + + '; got `input_shape=' + + str(input_shape) + '`') + else: + if input_shape is not None: + if len(input_shape) != 3: + raise ValueError( + '`input_shape` must be a tuple of three integers.') + if ((input_shape[0] is not None and input_shape[0] < min_size) or + (input_shape[1] is not None and input_shape[1] < min_size)): + raise ValueError('Input size must be at least ' + + str(min_size) + 'x' + str(min_size) + + '; got `input_shape=' + + str(input_shape) + '`') + else: + if require_flatten: + input_shape = default_shape + else: + if data_format == 'channels_first': + input_shape = (3, None, None) + else: + input_shape = (None, None, 3) + if require_flatten: + if None in input_shape: + raise ValueError('If `include_top` is True, ' + 'you should specify a static `input_shape`. ' + 'Got `input_shape=' + str(input_shape) + '`') + return input_shape + + +def preprocess_input(x): + """Preprocesses a numpy array encoding a batch of images. + + This function applies the "Inception" preprocessing which converts + the RGB values from [0, 255] to [-1, 1]. Note that this preprocessing + function is different from `imagenet_utils.preprocess_input()`. + + Arguments: + x: a 4D numpy array consists of RGB values within [0, 255]. + + Returns: + Preprocessed array. + """ + x /= 128. + x -= 1. + return x.astype(np.float32) + + +# This function is taken from the original tf repo. +# It ensures that all layers have a channel number that is divisible by 8 +# It can be seen here: +# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def mobilenet_v2(input_shape=None, + alpha=1.0, + include_top=True, + classes=1000): + """Instantiates the MobileNetV2 architecture. + + To load a MobileNetV2 model via `load_model`, import the custom + objects `relu6` and pass them to the `custom_objects` parameter. + E.g. + model = load_model('mobilenet.h5', custom_objects={ + 'relu6': mobilenet.relu6}) + + Arguments: + input_shape: optional shape tuple, to be specified if you would + like to use a model with an input img resolution that is not + (224, 224, 3). + It should have exactly 3 inputs channels (224, 224, 3). + You can also omit this option if you would like + to infer input_shape from an input_tensor. + If you choose to include both input_tensor and input_shape then + input_shape will be used if they match, if the shapes + do not match then we will throw an error. + E.g. `(160, 160, 3)` would be one valid value. + alpha: controls the width of the network. This is known as the + width multiplier in the MobileNetV2 paper. + - If `alpha` < 1.0, proportionally decreases the number + of filters in each layer. + - If `alpha` > 1.0, proportionally increases the number + of filters in each layer. + - If `alpha` = 1, default number of filters from the paper + are used at each layer. + include_top: whether to include the fully-connected + layer at the top of the network. + classes: optional number of classes to classify images + into, only to be specified if `include_top` is True, and + if no `weights` argument is specified. + + Returns: + A Keras model instance. + + Raises: + ValueError: in case of invalid argument for `weights`, + or invalid input shape or invalid depth_multiplier, alpha, + rows when weights='imagenet' + """ + + # Determine proper input shape and default size. + # If input_shape is None and no input_tensor + if input_shape is None: + default_size = 224 + + # If input_shape is not None, assume default size + else: + if K.image_data_format() == 'channels_first': + rows = input_shape[1] + cols = input_shape[2] + else: + rows = input_shape[0] + cols = input_shape[1] + + if rows == cols and rows in [96, 128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + + input_shape = _obtain_input_shape(input_shape, + default_size=default_size, + min_size=32, + data_format=K.image_data_format(), + require_flatten=include_top) + + if K.image_data_format() == 'channels_last': + row_axis, col_axis = (0, 1) + else: + row_axis, col_axis = (1, 2) + rows = input_shape[row_axis] + cols = input_shape[col_axis] + + if K.image_data_format() != 'channels_last': + warnings.warn('The MobileNet family of models is only available ' + 'for the input data format "channels_last" ' + '(width, height, channels). ' + 'However your settings specify the default ' + 'data format "channels_first" (channels, width, height).' + ' You should set `image_data_format="channels_last"` ' + 'in your Keras config located at ~/.keras/keras.json. ' + 'The model being returned right now will expect inputs ' + 'to follow the "channels_last" data format.') + K.set_image_data_format('channels_last') + old_data_format = 'channels_first' + else: + old_data_format = None + + img_input = Input(shape=input_shape) + + first_block_filters = _make_divisible(32 * alpha, 8) + x = Conv2D(first_block_filters, + kernel_size=3, + strides=(2, 2), padding='same', + use_bias=False, name='Conv1')(img_input) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='bn_Conv1')(x) + x = Activation(relu6, name='Conv1_relu')(x) + + x = _first_inverted_res_block(x, + filters=16, + alpha=alpha, + stride=1, + block_id=0) + + x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2, + expansion=6, block_id=1) + x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, + expansion=6, block_id=2) + + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, + expansion=6, block_id=3) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, + expansion=6, block_id=4) + x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, + expansion=6, block_id=5) + + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2, + expansion=6, block_id=6) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, + expansion=6, block_id=7) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, + expansion=6, block_id=8) + x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, + expansion=6, block_id=9) + + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, + expansion=6, block_id=10) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, + expansion=6, block_id=11) + x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, + expansion=6, block_id=12) + + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2, + expansion=6, block_id=13) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, + expansion=6, block_id=14) + x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, + expansion=6, block_id=15) + + x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, + expansion=6, block_id=16) + + # no alpha applied to last conv as stated in the paper: + # if the width multiplier is greater than 1 we + # increase the number of output channels + if alpha > 1.0: + last_block_filters = _make_divisible(1280 * alpha, 8) + else: + last_block_filters = 1280 + + x = Conv2D(last_block_filters, + kernel_size=1, + use_bias=False, + name='Conv_1')(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x) + x = Activation(relu6, name='out_relu')(x) + + if include_top: + x = GlobalAveragePooling2D()(x) + x = Dense(classes, activation='softmax', + use_bias=True, name='Logits')(x) + + # Ensure that the model takes into account + # any potential predecessors of `input_tensor`. + inputs = img_input + + # Create model. + model = Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows)) + + if old_data_format: + K.set_image_data_format(old_data_format) + return model + + +def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): + """Build an inverted res block.""" + in_channels = int(inputs.shape[-1]) + pointwise_conv_filters = int(filters * alpha) + pointwise_filters = _make_divisible(pointwise_conv_filters, 8) + # Expand + + x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', + use_bias=False, activation=None, + name='mobl%d_conv_expand' % block_id)(inputs) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_bn_expand' % + block_id)(x) + x = Activation(relu6, name='conv_%d_relu' % block_id)(x) + + # Depthwise + x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, + use_bias=False, padding='same', + name='mobl%d_conv_depthwise' % block_id)(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_depthwise' % block_id)(x) + + x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) + + # Project + x = Conv2D(pointwise_filters, + kernel_size=1, padding='same', use_bias=False, activation=None, + name='mobl%d_conv_project' % block_id)(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_bn_project' % block_id)(x) + + if in_channels == pointwise_filters and stride == 1: + return Add(name='res_connect_' + str(block_id))([inputs, x]) + + return x + + +def _first_inverted_res_block(inputs, + stride, + alpha, filters, block_id): + """Build the first inverted res block.""" + in_channels = int(inputs.shape[-1]) + pointwise_conv_filters = int(filters * alpha) + pointwise_filters = _make_divisible(pointwise_conv_filters, 8) + + # Depthwise + x = DepthwiseConv2D(kernel_size=3, + strides=stride, activation=None, + use_bias=False, padding='same', + name='mobl%d_conv_depthwise' % + block_id)(inputs) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_depthwise' % + block_id)(x) + x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) + + # Project + x = Conv2D(pointwise_filters, + kernel_size=1, + padding='same', + use_bias=False, + activation=None, + name='mobl%d_conv_project' % + block_id)(x) + x = BatchNormalization(epsilon=1e-3, momentum=0.999, + name='bn%d_conv_project' % + block_id)(x) + + if in_channels == pointwise_filters and stride == 1: + return Add(name='res_connect_' + str(block_id))([inputs, x]) + + return x diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/convert_keras_models.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/convert_keras_models.py new file mode 100644 index 0000000000000000000000000000000000000000..a34af981b37032115bf0c3e957e0f4c216504d4c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/convert_keras_models.py @@ -0,0 +1,85 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Write keras weights into a tensorflow checkpoint. + +The imagenet weights in `keras.applications` are downloaded from github. +This script converts them into the tensorflow checkpoint format and stores them +on disk where they can be easily accessible during training. +""" + +from __future__ import print_function + +import os + +from absl import app +import numpy as np +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +tf.flags.DEFINE_string('model', 'resnet_v2_101', + 'The model to load. The following are supported: ' + '"resnet_v1_50", "resnet_v1_101", "resnet_v2_50", ' + '"resnet_v2_101"') +tf.flags.DEFINE_string('output_path', None, + 'The directory to output weights in.') +tf.flags.DEFINE_boolean('verify_weights', True, + ('Verify the weights are loaded correctly by making ' + 'sure the predictions are the same before and after ' + 'saving.')) + + +def init_model(name): + """Creates a Keras Model with the specific ResNet version.""" + if name == 'resnet_v1_50': + model = tf.keras.applications.ResNet50(weights='imagenet') + elif name == 'resnet_v1_101': + model = tf.keras.applications.ResNet101(weights='imagenet') + elif name == 'resnet_v2_50': + model = tf.keras.applications.ResNet50V2(weights='imagenet') + elif name == 'resnet_v2_101': + model = tf.keras.applications.ResNet101V2(weights='imagenet') + else: + raise ValueError('Model {} not supported'.format(FLAGS.model)) + + return model + + +def main(_): + + model = init_model(FLAGS.model) + + path = os.path.join(FLAGS.output_path, FLAGS.model) + tf.gfile.MakeDirs(path) + weights_path = os.path.join(path, 'weights') + ckpt = tf.train.Checkpoint(feature_extractor=model) + saved_path = ckpt.save(weights_path) + + if FLAGS.verify_weights: + imgs = np.random.randn(1, 224, 224, 3).astype(np.float32) + keras_preds = model(imgs) + + model = init_model(FLAGS.model) + ckpt.restore(saved_path) + loaded_weights_pred = model(imgs).numpy() + + if not np.all(np.isclose(keras_preds, loaded_weights_pred)): + raise RuntimeError('The model was not saved correctly.') + + +if __name__ == '__main__': + tf.enable_v2_behavior() + app.run(main) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/hourglass_network.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/hourglass_network.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e71545c401f0bd2df723581734d85969f400f7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/hourglass_network.py @@ -0,0 +1,624 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Hourglass[1] network. + +[1]: https://arxiv.org/abs/1603.06937 +""" + + +import tensorflow.compat.v2 as tf + + +BATCH_NORM_EPSILON = 1e-5 +BATCH_NORM_MOMENTUM = 0.1 +BATCH_NORM_FUSED = True + + +class IdentityLayer(tf.keras.layers.Layer): + """A layer which passes through the input as it is.""" + + def call(self, inputs): + return inputs + + +def _get_padding_for_kernel_size(kernel_size): + if kernel_size == 7: + return (3, 3) + elif kernel_size == 3: + return (1, 1) + else: + raise ValueError('Padding for kernel size {} not known.'.format( + kernel_size)) + + +def batchnorm(): + try: + return tf.keras.layers.experimental.SyncBatchNormalization( + name='batchnorm', epsilon=1e-5, momentum=0.1) + except AttributeError: + return tf.keras.layers.BatchNormalization( + name='batchnorm', epsilon=1e-5, momentum=0.1, fused=BATCH_NORM_FUSED) + + +class ConvolutionalBlock(tf.keras.layers.Layer): + """Block that aggregates Convolution + Norm layer + ReLU.""" + + def __init__(self, kernel_size, out_channels, stride=1, relu=True, + padding='same'): + """Initializes the Convolutional block. + + Args: + kernel_size: int, convolution kernel size. + out_channels: int, the desired number of output channels. + stride: Integer, stride used in the convolution. + relu: bool, whether to use relu at the end of the layer. + padding: str, the padding scheme to use when kernel_size <= 1 + """ + super(ConvolutionalBlock, self).__init__() + + if kernel_size > 1: + padding = 'valid' + padding_size = _get_padding_for_kernel_size(kernel_size) + + # TODO(vighneshb) Explore if removing and using padding option in conv + # layer works. + self.pad = tf.keras.layers.ZeroPadding2D(padding_size) + else: + self.pad = IdentityLayer() + + self.conv = tf.keras.layers.Conv2D( + filters=out_channels, kernel_size=kernel_size, use_bias=False, + strides=stride, padding=padding) + + self.norm = batchnorm() + + if relu: + self.relu = tf.keras.layers.ReLU() + else: + self.relu = IdentityLayer() + + def call(self, inputs): + net = self.pad(inputs) + net = self.conv(net) + net = self.norm(net) + return self.relu(net) + + +class SkipConvolution(ConvolutionalBlock): + """The skip connection layer for a ResNet.""" + + def __init__(self, out_channels, stride): + """Initializes the skip convolution layer. + + Args: + out_channels: int, the desired number of output channels. + stride: int, the stride for the layer. + """ + super(SkipConvolution, self).__init__( + out_channels=out_channels, kernel_size=1, stride=stride, relu=False) + + +class ResidualBlock(tf.keras.layers.Layer): + """A Residual block.""" + + def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1, + padding='same'): + """Initializes the Residual block. + + Args: + out_channels: int, the desired number of output channels. + skip_conv: bool, whether to use a conv layer for skip connections. + kernel_size: int, convolution kernel size. + stride: Integer, stride used in the convolution. + padding: str, the type of padding to use. + """ + + super(ResidualBlock, self).__init__() + self.conv_block = ConvolutionalBlock( + kernel_size=kernel_size, out_channels=out_channels, stride=stride) + + self.conv = tf.keras.layers.Conv2D( + filters=out_channels, kernel_size=kernel_size, use_bias=False, + strides=1, padding=padding) + self.norm = batchnorm() + + if skip_conv: + self.skip = SkipConvolution(out_channels=out_channels, + stride=stride) + else: + self.skip = IdentityLayer() + + self.relu = tf.keras.layers.ReLU() + + def call(self, inputs): + net = self.conv_block(inputs) + net = self.conv(net) + net = self.norm(net) + net_skip = self.skip(inputs) + return self.relu(net + net_skip) + + +class InputDownsampleBlock(tf.keras.layers.Layer): + """Block for the initial feature downsampling.""" + + def __init__(self, out_channels_initial_conv, out_channels_residual_block): + """Initializes the downsample block. + + Args: + out_channels_initial_conv: int, the desired number of output channels + in the initial conv layer. + out_channels_residual_block: int, the desired number of output channels + in the underlying residual block. + """ + + super(InputDownsampleBlock, self).__init__() + self.conv_block = ConvolutionalBlock( + kernel_size=7, out_channels=out_channels_initial_conv, stride=2, + padding='valid') + self.residual_block = ResidualBlock( + out_channels=out_channels_residual_block, stride=2, skip_conv=True) + + def call(self, inputs): + return self.residual_block(self.conv_block(inputs)) + + +class InputConvBlock(tf.keras.layers.Layer): + """Block for the initial feature convolution. + + This block is used in the hourglass network when we don't want to downsample + the input. + """ + + def __init__(self, out_channels_initial_conv, out_channels_residual_block): + """Initializes the downsample block. + + Args: + out_channels_initial_conv: int, the desired number of output channels + in the initial conv layer. + out_channels_residual_block: int, the desired number of output channels + in the underlying residual block. + """ + + super(InputConvBlock, self).__init__() + + self.conv_block = ConvolutionalBlock( + kernel_size=3, out_channels=out_channels_initial_conv, stride=1, + padding='valid') + self.residual_block = ResidualBlock( + out_channels=out_channels_residual_block, stride=1, skip_conv=True) + + def call(self, inputs): + return self.residual_block(self.conv_block(inputs)) + + +def _make_repeated_residual_blocks(out_channels, num_blocks, + initial_stride=1, residual_channels=None, + initial_skip_conv=False): + """Stack Residual blocks one after the other. + + Args: + out_channels: int, the desired number of output channels. + num_blocks: int, the number of residual blocks to be stacked. + initial_stride: int, the stride of the initial residual block. + residual_channels: int, the desired number of output channels in the + intermediate residual blocks. If not specifed, we use out_channels. + initial_skip_conv: bool, if set, the first residual block uses a skip + convolution. This is useful when the number of channels in the input + are not the same as residual_channels. + + Returns: + blocks: A list of residual blocks to be applied in sequence. + + """ + + blocks = [] + + if residual_channels is None: + residual_channels = out_channels + + for i in range(num_blocks - 1): + # Only use the stride at the first block so we don't repeatedly downsample + # the input + stride = initial_stride if i == 0 else 1 + + # If the stide is more than 1, we cannot use an identity layer for the + # skip connection and are forced to use a conv for the skip connection. + skip_conv = stride > 1 + + if i == 0 and initial_skip_conv: + skip_conv = True + + blocks.append( + ResidualBlock(out_channels=residual_channels, stride=stride, + skip_conv=skip_conv) + ) + + if num_blocks == 1: + # If there is only 1 block, the for loop above is not run, + # therefore we honor the requested stride in the last residual block + stride = initial_stride + # We are forced to use a conv in the skip connection if stride > 1 + skip_conv = stride > 1 + else: + stride = 1 + skip_conv = residual_channels != out_channels + + blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv, + stride=stride)) + + return blocks + + +def _apply_blocks(inputs, blocks): + net = inputs + + for block in blocks: + net = block(net) + + return net + + +class EncoderDecoderBlock(tf.keras.layers.Layer): + """An encoder-decoder block which recursively defines the hourglass network.""" + + def __init__(self, num_stages, channel_dims, blocks_per_stage, + stagewise_downsample=True, encoder_decoder_shortcut=True): + """Initializes the encoder-decoder block. + + Args: + num_stages: int, Number of stages in the network. At each stage we have 2 + encoder and 1 decoder blocks. The second encoder block downsamples the + input. + channel_dims: int list, the output channels dimensions of stages in + the network. `channel_dims[0]` is used to define the number of + channels in the first encoder block and `channel_dims[1]` is used to + define the number of channels in the second encoder block. The channels + in the recursive inner layers are defined using `channel_dims[1:]` + blocks_per_stage: int list, number of residual blocks to use at each + stage. `blocks_per_stage[0]` defines the number of blocks at the + current stage and `blocks_per_stage[1:]` is used at further stages. + stagewise_downsample: bool, whether or not to downsample before passing + inputs to the next stage. + encoder_decoder_shortcut: bool, whether or not to use shortcut + connections between encoder and decoder. + """ + + super(EncoderDecoderBlock, self).__init__() + + out_channels = channel_dims[0] + out_channels_downsampled = channel_dims[1] + + self.encoder_decoder_shortcut = encoder_decoder_shortcut + + if encoder_decoder_shortcut: + self.merge_features = tf.keras.layers.Add() + self.encoder_block1 = _make_repeated_residual_blocks( + out_channels=out_channels, num_blocks=blocks_per_stage[0], + initial_stride=1) + + initial_stride = 2 if stagewise_downsample else 1 + self.encoder_block2 = _make_repeated_residual_blocks( + out_channels=out_channels_downsampled, + num_blocks=blocks_per_stage[0], initial_stride=initial_stride, + initial_skip_conv=out_channels != out_channels_downsampled) + + if num_stages > 1: + self.inner_block = [ + EncoderDecoderBlock(num_stages - 1, channel_dims[1:], + blocks_per_stage[1:], + stagewise_downsample=stagewise_downsample, + encoder_decoder_shortcut=encoder_decoder_shortcut) + ] + else: + self.inner_block = _make_repeated_residual_blocks( + out_channels=out_channels_downsampled, + num_blocks=blocks_per_stage[1]) + + self.decoder_block = _make_repeated_residual_blocks( + residual_channels=out_channels_downsampled, + out_channels=out_channels, num_blocks=blocks_per_stage[0]) + + self.upsample = tf.keras.layers.UpSampling2D(initial_stride) + + def call(self, inputs): + + if self.encoder_decoder_shortcut: + encoded_outputs = _apply_blocks(inputs, self.encoder_block1) + encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2) + inner_block_outputs = _apply_blocks( + encoded_downsampled_outputs, self.inner_block) + + decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block) + upsampled_outputs = self.upsample(decoded_outputs) + + if self.encoder_decoder_shortcut: + return self.merge_features([encoded_outputs, upsampled_outputs]) + else: + return upsampled_outputs + + +class HourglassNetwork(tf.keras.Model): + """The hourglass network.""" + + def __init__(self, num_stages, input_channel_dims, channel_dims_per_stage, + blocks_per_stage, num_hourglasses, initial_downsample=True, + stagewise_downsample=True, encoder_decoder_shortcut=True): + """Intializes the feature extractor. + + Args: + num_stages: int, Number of stages in the network. At each stage we have 2 + encoder and 1 decoder blocks. The second encoder block downsamples the + input. + input_channel_dims: int, the number of channels in the input conv blocks. + channel_dims_per_stage: int list, the output channel dimensions of each + stage in the hourglass network. + blocks_per_stage: int list, number of residual blocks to use at each + stage in the hourglass network + num_hourglasses: int, number of hourglas networks to stack + sequentially. + initial_downsample: bool, if set, downsamples the input by a factor of 4 + before applying the rest of the network. Downsampling is done with a 7x7 + convolution kernel, otherwise a 3x3 kernel is used. + stagewise_downsample: bool, whether or not to downsample before passing + inputs to the next stage. + encoder_decoder_shortcut: bool, whether or not to use shortcut + connections between encoder and decoder. + """ + + super(HourglassNetwork, self).__init__() + + self.num_hourglasses = num_hourglasses + self.initial_downsample = initial_downsample + if initial_downsample: + self.downsample_input = InputDownsampleBlock( + out_channels_initial_conv=input_channel_dims, + out_channels_residual_block=channel_dims_per_stage[0] + ) + else: + self.conv_input = InputConvBlock( + out_channels_initial_conv=input_channel_dims, + out_channels_residual_block=channel_dims_per_stage[0] + ) + + self.hourglass_network = [] + self.output_conv = [] + for _ in range(self.num_hourglasses): + self.hourglass_network.append( + EncoderDecoderBlock( + num_stages=num_stages, channel_dims=channel_dims_per_stage, + blocks_per_stage=blocks_per_stage, + stagewise_downsample=stagewise_downsample, + encoder_decoder_shortcut=encoder_decoder_shortcut) + ) + self.output_conv.append( + ConvolutionalBlock(kernel_size=3, + out_channels=channel_dims_per_stage[0]) + ) + + self.intermediate_conv1 = [] + self.intermediate_conv2 = [] + self.intermediate_residual = [] + + for _ in range(self.num_hourglasses - 1): + self.intermediate_conv1.append( + ConvolutionalBlock( + kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False) + ) + self.intermediate_conv2.append( + ConvolutionalBlock( + kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False) + ) + self.intermediate_residual.append( + ResidualBlock(out_channels=channel_dims_per_stage[0]) + ) + + self.intermediate_relu = tf.keras.layers.ReLU() + + def call(self, inputs): + + if self.initial_downsample: + inputs = self.downsample_input(inputs) + else: + inputs = self.conv_input(inputs) + + outputs = [] + + for i in range(self.num_hourglasses): + + hourglass_output = self.hourglass_network[i](inputs) + + output = self.output_conv[i](hourglass_output) + outputs.append(output) + + if i < self.num_hourglasses - 1: + secondary_output = (self.intermediate_conv1[i](inputs) + + self.intermediate_conv2[i](output)) + secondary_output = self.intermediate_relu(secondary_output) + inputs = self.intermediate_residual[i](secondary_output) + + return outputs + + @property + def out_stride(self): + """The stride in the output image of the network.""" + return 4 + + @property + def num_feature_outputs(self): + """Ther number of feature outputs returned by the feature extractor.""" + return self.num_hourglasses + + +def _layer_depth(layer): + """Compute depth of Conv/Residual blocks or lists of them.""" + + if isinstance(layer, list): + return sum([_layer_depth(l) for l in layer]) + + elif isinstance(layer, ConvolutionalBlock): + return 1 + + elif isinstance(layer, ResidualBlock): + return 2 + + else: + raise ValueError('Unknown layer - {}'.format(layer)) + + +def _encoder_decoder_depth(network): + """Helper function to compute depth of encoder-decoder blocks.""" + + encoder_block2_layers = _layer_depth(network.encoder_block2) + decoder_block_layers = _layer_depth(network.decoder_block) + + if isinstance(network.inner_block[0], EncoderDecoderBlock): + + assert len(network.inner_block) == 1, 'Inner block is expected as length 1.' + inner_block_layers = _encoder_decoder_depth(network.inner_block[0]) + + return inner_block_layers + encoder_block2_layers + decoder_block_layers + + elif isinstance(network.inner_block[0], ResidualBlock): + return (encoder_block2_layers + decoder_block_layers + + _layer_depth(network.inner_block)) + + else: + raise ValueError('Unknown inner block type.') + + +def hourglass_depth(network): + """Helper function to verify depth of hourglass backbone.""" + + input_conv_layers = 3 # 1 ResidualBlock and 1 ConvBlock + + # Only intermediate_conv2 and intermediate_residual are applied before + # sending inputs to the later stages. + intermediate_layers = ( + _layer_depth(network.intermediate_conv2) + + _layer_depth(network.intermediate_residual) + ) + + # network.output_conv is applied before sending input to the later stages + output_layers = _layer_depth(network.output_conv) + + encoder_decoder_layers = sum(_encoder_decoder_depth(net) for net in + network.hourglass_network) + + return (input_conv_layers + encoder_decoder_layers + intermediate_layers + + output_layers) + + +def hourglass_104(): + """The Hourglass-104 backbone. + + The architecture parameters are taken from [1]. + + Returns: + network: An HourglassNetwork object implementing the Hourglass-104 + backbone. + + [1]: https://arxiv.org/abs/1904.07850 + """ + + return HourglassNetwork( + input_channel_dims=128, + channel_dims_per_stage=[256, 256, 384, 384, 384, 512], + num_hourglasses=2, + num_stages=5, + blocks_per_stage=[2, 2, 2, 2, 2, 4], + ) + + +def single_stage_hourglass(input_channel_dims, channel_dims_per_stage, + blocks_per_stage, initial_downsample=True, + stagewise_downsample=True, + encoder_decoder_shortcut=True): + assert len(channel_dims_per_stage) == len(blocks_per_stage) + + return HourglassNetwork( + input_channel_dims=input_channel_dims, + channel_dims_per_stage=channel_dims_per_stage, + num_hourglasses=1, + num_stages=len(channel_dims_per_stage) - 1, + blocks_per_stage=blocks_per_stage, + initial_downsample=initial_downsample, + stagewise_downsample=stagewise_downsample, + encoder_decoder_shortcut=encoder_decoder_shortcut + ) + + +def hourglass_10(num_channels, initial_downsample=True): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + initial_downsample=initial_downsample, + blocks_per_stage=[1, 1], + channel_dims_per_stage=[nc * 2, nc * 2]) + + +def hourglass_20(num_channels, initial_downsample=True): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + initial_downsample=initial_downsample, + blocks_per_stage=[1, 2, 2], + channel_dims_per_stage=[nc * 2, nc * 2, nc * 3]) + + +def hourglass_32(num_channels, initial_downsample=True): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + initial_downsample=initial_downsample, + blocks_per_stage=[2, 2, 2, 2], + channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3]) + + +def hourglass_52(num_channels, initial_downsample=True): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + initial_downsample=initial_downsample, + blocks_per_stage=[2, 2, 2, 2, 2, 4], + channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4]) + + +def hourglass_100(num_channels, initial_downsample=True): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + initial_downsample=initial_downsample, + blocks_per_stage=[4, 4, 4, 4, 4, 8], + channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4]) + + +def hourglass_20_uniform_size(num_channels): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + blocks_per_stage=[1, 2, 2], + channel_dims_per_stage=[nc * 2, nc * 2, nc * 3], + initial_downsample=False, + stagewise_downsample=False) + + +def hourglass_20_no_shortcut(num_channels): + nc = num_channels + return single_stage_hourglass( + input_channel_dims=nc, + blocks_per_stage=[1, 2, 2], + channel_dims_per_stage=[nc * 2, nc * 2, nc * 3], + initial_downsample=False, + encoder_decoder_shortcut=False) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/hourglass_network_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/hourglass_network_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d1813703c7c6debc049711551031800985b8431d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/hourglass_network_tf2_test.py @@ -0,0 +1,158 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing the Hourglass network.""" +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models.keras_models import hourglass_network as hourglass +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class HourglassFeatureExtractorTest(tf.test.TestCase, parameterized.TestCase): + + def test_identity_layer(self): + + layer = hourglass.IdentityLayer() + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 3)) + + def test_skip_conv_layer_stride_1(self): + + layer = hourglass.SkipConvolution(out_channels=8, stride=1) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + def test_skip_conv_layer_stride_2(self): + + layer = hourglass.SkipConvolution(out_channels=8, stride=2) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 16, 16, 8)) + + @parameterized.parameters([{'kernel_size': 1}, + {'kernel_size': 3}, + {'kernel_size': 7}]) + def test_conv_block(self, kernel_size): + + layer = hourglass.ConvolutionalBlock( + out_channels=8, kernel_size=kernel_size, stride=1) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + layer = hourglass.ConvolutionalBlock( + out_channels=8, kernel_size=kernel_size, stride=2) + output = layer(np.zeros((2, 32, 32, 3), dtype=np.float32)) + self.assertEqual(output.shape, (2, 16, 16, 8)) + + def test_residual_block_stride_1(self): + + layer = hourglass.ResidualBlock(out_channels=8, stride=1) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + def test_residual_block_stride_2(self): + + layer = hourglass.ResidualBlock(out_channels=8, stride=2, + skip_conv=True) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 16, 16, 8)) + + def test_input_downsample_block(self): + + layer = hourglass.InputDownsampleBlock( + out_channels_initial_conv=4, out_channels_residual_block=8) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 8, 8, 8)) + + def test_input_conv_block(self): + layer = hourglass.InputConvBlock( + out_channels_initial_conv=4, out_channels_residual_block=8) + output = layer(np.zeros((2, 32, 32, 8), dtype=np.float32)) + self.assertEqual(output.shape, (2, 32, 32, 8)) + + def test_encoder_decoder_block(self): + + layer = hourglass.EncoderDecoderBlock( + num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], + channel_dims=[4, 6, 8, 10, 12]) + output = layer(np.zeros((2, 64, 64, 4), dtype=np.float32)) + self.assertEqual(output.shape, (2, 64, 64, 4)) + + def test_hourglass_feature_extractor(self): + + model = hourglass.HourglassNetwork( + num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6], input_channel_dims=4, + channel_dims_per_stage=[6, 8, 10, 12, 14], num_hourglasses=2) + outputs = model(np.zeros((2, 64, 64, 3), dtype=np.float32)) + self.assertEqual(outputs[0].shape, (2, 16, 16, 6)) + self.assertEqual(outputs[1].shape, (2, 16, 16, 6)) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class HourglassDepthTest(tf.test.TestCase): + + def test_hourglass_104(self): + + net = hourglass.hourglass_104() + self.assertEqual(hourglass.hourglass_depth(net), 104) + + def test_hourglass_10(self): + net = hourglass.hourglass_10(2, initial_downsample=False) + self.assertEqual(hourglass.hourglass_depth(net), 10) + + outputs = net(tf.zeros((2, 32, 32, 3))) + self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) + + def test_hourglass_20(self): + net = hourglass.hourglass_20(2, initial_downsample=False) + self.assertEqual(hourglass.hourglass_depth(net), 20) + + outputs = net(tf.zeros((2, 32, 32, 3))) + self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) + + def test_hourglass_32(self): + net = hourglass.hourglass_32(2, initial_downsample=False) + self.assertEqual(hourglass.hourglass_depth(net), 32) + + outputs = net(tf.zeros((2, 32, 32, 3))) + self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) + + def test_hourglass_52(self): + net = hourglass.hourglass_52(2, initial_downsample=False) + self.assertEqual(hourglass.hourglass_depth(net), 52) + + outputs = net(tf.zeros((2, 32, 32, 3))) + self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) + + def test_hourglass_20_uniform_size(self): + net = hourglass.hourglass_20_uniform_size(2) + self.assertEqual(hourglass.hourglass_depth(net), 20) + + outputs = net(tf.zeros((2, 32, 32, 3))) + self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) + + def test_hourglass_100(self): + net = hourglass.hourglass_100(2, initial_downsample=False) + self.assertEqual(hourglass.hourglass_depth(net), 100) + + outputs = net(tf.zeros((2, 32, 32, 3))) + self.assertEqual(outputs[0].shape, (2, 32, 32, 4)) + + +if __name__ == '__main__': + tf.test.main() + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/inception_resnet_v2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/inception_resnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..9ecdfa2615f47198335e5a8467c06bdb97e9b3be --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/inception_resnet_v2.py @@ -0,0 +1,244 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the Keras InceptionResnetV2 models for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras InceptionResNetV2.""" + + def __init__(self, + batchnorm_training, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + default_batchnorm_momentum=0.999, + default_batchnorm_epsilon=1e-3, + weight_decay=0.00004): + """Alternative tf.keras.layers interface, for use by InceptionResNetV2. + + It is used by the Keras applications kwargs injection API to + modify the Inception Resnet V2 Keras application with changes required by + the Object Detection API. + + These injected interfaces make the following changes to the network: + + - Supports freezing batch norm layers + - Adds support for feature map alignment (like in the Slim model) + - Adds support for changing the output stride (like in the Slim model) + - Adds support for overriding various batch norm hyperparameters + + Because the Keras inception resnet v2 application does not assign explicit + names to most individual layers, the injection of output stride support + works by identifying convolution layers according to their filter counts + and pre-feature-map-alignment padding arguments. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + output_stride: A scalar that specifies the requested ratio of input to + output spatial resolution. Only supports 8 and 16. + align_feature_maps: When true, changes all the VALID paddings in the + network to SAME padding so that the feature maps are aligned. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + default_batchnorm_momentum: Float. Batch norm layers will be constructed + using this value as the momentum. + default_batchnorm_epsilon: small float added to variance to avoid + dividing by zero. + weight_decay: the l2 regularization weight decay for weights variables. + (gets multiplied by 0.5 to map from slim l2 regularization weight to + Keras l2 regularization weight). + """ + self._use_atrous = output_stride == 8 + self._align_feature_maps = align_feature_maps + self._batchnorm_training = batchnorm_training + self._batchnorm_scale = batchnorm_scale + self._default_batchnorm_momentum = default_batchnorm_momentum + self._default_batchnorm_epsilon = default_batchnorm_epsilon + self.regularizer = tf.keras.regularizers.l2(weight_decay * 0.5) + + def Conv2D(self, filters, kernel_size, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras InceptionResnetV2 application's convolutions with ones + that follow the spec specified by the Object Detection hyperparameters. + + If feature map alignment is enabled, the padding will be forced to 'same'. + If output_stride is 8, some conv2d layers will be matched according to + their name or filter counts or pre-alignment padding parameters, and will + have the correct 'dilation rate' or 'strides' set. + + Args: + filters: The number of filters to use for the convolution. + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A Keras Conv2D layer specified by the Object Detection hyperparameter + configurations. + """ + kwargs['kernel_regularizer'] = self.regularizer + kwargs['bias_regularizer'] = self.regularizer + + # Because the Keras application does not set explicit names for most layers, + # (instead allowing names to auto-increment), we must match individual + # layers in the model according to their filter count, name, or + # pre-alignment mapping. This means we can only align the feature maps + # after we have applied our updates in cases where output_stride=8. + if self._use_atrous and (filters == 384): + kwargs['strides'] = 1 + + name = kwargs.get('name') + if self._use_atrous and ( + (name and 'block17' in name) or + (filters == 128 or filters == 160 or + (filters == 192 and kwargs.get('padding', '').lower() != 'valid'))): + kwargs['dilation_rate'] = 2 + + if self._align_feature_maps: + kwargs['padding'] = 'same' + + return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) + + def MaxPooling2D(self, pool_size, strides, **kwargs): + """Builds a pooling layer according to the current Object Detection config. + + Overrides the Keras InceptionResnetV2 application's MaxPooling2D layers with + ones that follow the spec specified by the Object Detection hyperparameters. + + If feature map alignment is enabled, the padding will be forced to 'same'. + If output_stride is 8, some pooling layers will be matched according to + their pre-alignment padding parameters, and will have their 'strides' + argument overridden. + + Args: + pool_size: The pool size specified by the Keras application. + strides: The strides specified by the unwrapped Keras application. + **kwargs: Keyword args specified by the Keras application for + constructing the max pooling layer. + + Returns: + A MaxPool2D layer specified by the Object Detection hyperparameter + configurations. + """ + if self._use_atrous and kwargs.get('padding', '').lower() == 'valid': + strides = 1 + + if self._align_feature_maps: + kwargs['padding'] = 'same' + + return tf.keras.layers.MaxPool2D(pool_size, strides=strides, **kwargs) + + # We alias MaxPool2D because Keras has that alias + MaxPool2D = MaxPooling2D # pylint: disable=invalid-name + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Keyword arguments from the `layers.BatchNormalization` calls in + the Keras application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + kwargs['scale'] = self._batchnorm_scale + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + epsilon=self._default_batchnorm_epsilon, + momentum=self._default_batchnorm_momentum, + **kwargs) + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +# pylint: disable=invalid-name +def inception_resnet_v2( + batchnorm_training, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + weight_decay=0.00004, + default_batchnorm_momentum=0.9997, + default_batchnorm_epsilon=0.001, + **kwargs): + """Instantiates the InceptionResnetV2 architecture. + + (Modified for object detection) + + This wraps the InceptionResnetV2 tensorflow Keras application, but uses the + Keras application's kwargs-based monkey-patching API to override the Keras + architecture with the following changes: + + - Supports freezing batch norm layers with FreezableBatchNorms + - Adds support for feature map alignment (like in the Slim model) + - Adds support for changing the output stride (like in the Slim model) + - Changes the default batchnorm momentum to 0.9997 + - Adds support for overriding various batchnorm hyperparameters + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + output_stride: A scalar that specifies the requested ratio of input to + output spatial resolution. Only supports 8 and 16. + align_feature_maps: When true, changes all the VALID paddings in the + network to SAME padding so that the feature maps are aligned. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + weight_decay: the l2 regularization weight decay for weights variables. + (gets multiplied by 0.5 to map from slim l2 regularization weight to + Keras l2 regularization weight). + default_batchnorm_momentum: Float. Batch norm layers will be constructed + using this value as the momentum. + default_batchnorm_epsilon: small float added to variance to avoid + dividing by zero. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.InceptionResNetV2` method that constructs the + Keras model. + + Returns: + A Keras model instance. + """ + if output_stride != 8 and output_stride != 16: + raise ValueError('output_stride must be 8 or 16.') + + layers_override = _LayersOverride( + batchnorm_training, + output_stride, + align_feature_maps=align_feature_maps, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + weight_decay=weight_decay) + return tf.keras.applications.InceptionResNetV2( + layers=layers_override, **kwargs) +# pylint: enable=invalid-name diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4cbcc54ad66985920e7739888b3542b6a1e48bca --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/inception_resnet_v2_tf2_test.py @@ -0,0 +1,228 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for inception_resnet_v2.py. + +This test mainly focuses on comparing slim inception resnet v2 and Keras +inception resnet v2 for object detection. To verify the consistency of the two +models, we compare: + 1. Output shape of each layer given different inputs + 2. Number of global variables + +We also visualize the model structure via Tensorboard, and compare the model +layout and the parameters of each Op to make sure the two implementations are +consistent. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.models.keras_models import inception_resnet_v2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_KERAS_TO_SLIM_ENDPOINT_NAMES = { + 'activation': 'Conv2d_1a_3x3', + 'activation_1': 'Conv2d_2a_3x3', + 'activation_2': 'Conv2d_2b_3x3', + 'activation_3': 'Conv2d_3b_1x1', + 'activation_4': 'Conv2d_4a_3x3', + 'max_pooling2d': 'MaxPool_3a_3x3', + 'max_pooling2d_1': 'MaxPool_5a_3x3', + 'mixed_5b': 'Mixed_5b', + 'mixed_6a': 'Mixed_6a', + 'block17_20_ac': 'PreAuxLogits', + 'mixed_7a': 'Mixed_7a', + 'conv_7b_ac': 'Conv2d_7b_1x1', +} + +_SLIM_ENDPOINT_SHAPES_128 = { + 'Conv2d_1a_3x3': (2, 64, 64, 32), + 'Conv2d_2a_3x3': (2, 64, 64, 32), + 'Conv2d_2b_3x3': (2, 64, 64, 64), + 'Conv2d_3b_1x1': (2, 32, 32, 80), + 'Conv2d_4a_3x3': (2, 32, 32, 192), + 'Conv2d_7b_1x1': (2, 4, 4, 1536), + 'MaxPool_3a_3x3': (2, 32, 32, 64), + 'MaxPool_5a_3x3': (2, 16, 16, 192), + 'Mixed_5b': (2, 16, 16, 320), + 'Mixed_6a': (2, 8, 8, 1088), + 'Mixed_7a': (2, 4, 4, 2080), + 'PreAuxLogits': (2, 8, 8, 1088)} +_SLIM_ENDPOINT_SHAPES_128_STRIDE_8 = { + 'Conv2d_1a_3x3': (2, 64, 64, 32), + 'Conv2d_2a_3x3': (2, 64, 64, 32), + 'Conv2d_2b_3x3': (2, 64, 64, 64), + 'Conv2d_3b_1x1': (2, 32, 32, 80), + 'Conv2d_4a_3x3': (2, 32, 32, 192), + 'MaxPool_3a_3x3': (2, 32, 32, 64), + 'MaxPool_5a_3x3': (2, 16, 16, 192), + 'Mixed_5b': (2, 16, 16, 320), + 'Mixed_6a': (2, 16, 16, 1088), + 'PreAuxLogits': (2, 16, 16, 1088)} +_SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE = { + 'Conv2d_1a_3x3': (2, 63, 63, 32), + 'Conv2d_2a_3x3': (2, 61, 61, 32), + 'Conv2d_2b_3x3': (2, 61, 61, 64), + 'Conv2d_3b_1x1': (2, 30, 30, 80), + 'Conv2d_4a_3x3': (2, 28, 28, 192), + 'Conv2d_7b_1x1': (2, 2, 2, 1536), + 'MaxPool_3a_3x3': (2, 30, 30, 64), + 'MaxPool_5a_3x3': (2, 13, 13, 192), + 'Mixed_5b': (2, 13, 13, 320), + 'Mixed_6a': (2, 6, 6, 1088), + 'Mixed_7a': (2, 2, 2, 2080), + 'PreAuxLogits': (2, 6, 6, 1088)} +_SLIM_ENDPOINT_SHAPES_299 = {} +_SLIM_ENDPOINT_SHAPES_299_STRIDE_8 = {} +_SLIM_ENDPOINT_SHAPES_299_ALIGN_FEATURE_MAPS_FALSE = {} + +_KERAS_LAYERS_TO_CHECK = list(_KERAS_TO_SLIM_ENDPOINT_NAMES.keys()) + +_NUM_CHANNELS = 3 +_BATCH_SIZE = 2 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class InceptionResnetV2Test(test_case.TestCase): + + def _create_application_with_layer_outputs( + self, layer_names, batchnorm_training, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + weight_decay=0.00004, + default_batchnorm_momentum=0.9997, + default_batchnorm_epsilon=0.001,): + """Constructs Keras inception_resnet_v2 that extracts layer outputs.""" + # Have to clear the Keras backend to ensure isolation in layer naming + tf.keras.backend.clear_session() + if not layer_names: + layer_names = _KERAS_LAYERS_TO_CHECK + full_model = inception_resnet_v2.inception_resnet_v2( + batchnorm_training=batchnorm_training, + output_stride=output_stride, + align_feature_maps=align_feature_maps, + weights=None, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + include_top=False) + layer_outputs = [full_model.get_layer(name=layer).output + for layer in layer_names] + return tf.keras.Model( + inputs=full_model.inputs, + outputs=layer_outputs) + + def _check_returns_correct_shape( + self, image_height, image_width, + expected_feature_map_shape, layer_names=None, batchnorm_training=True, + output_stride=16, + align_feature_maps=False, + batchnorm_scale=False, + weight_decay=0.00004, + default_batchnorm_momentum=0.9997, + default_batchnorm_epsilon=0.001,): + if not layer_names: + layer_names = _KERAS_LAYERS_TO_CHECK + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=batchnorm_training, + output_stride=output_stride, + align_feature_maps=align_feature_maps, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon) + + image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS).astype(np.float32) + feature_maps = model(image_tensor) + + for feature_map, layer_name in zip(feature_maps, layer_names): + endpoint_name = _KERAS_TO_SLIM_ENDPOINT_NAMES[layer_name] + expected_shape = expected_feature_map_shape[endpoint_name] + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, layer_names=None): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False) + preprocessed_inputs = tf.random.uniform([4, 40, 40, _NUM_CHANNELS]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + expected_feature_map_shape = ( + _SLIM_ENDPOINT_SHAPES_128) + self._check_returns_correct_shape( + image_height, image_width, expected_feature_map_shape, + align_feature_maps=True) + + def test_returns_correct_shapes_128_output_stride_8(self): + image_height = 128 + image_width = 128 + expected_feature_map_shape = ( + _SLIM_ENDPOINT_SHAPES_128_STRIDE_8) + + # Output stride of 8 not defined beyond 'block17_20_ac', which is + # PreAuxLogits in slim. So, we exclude those layers in our Keras vs Slim + # comparison. + excluded_layers = {'mixed_7a', 'conv_7b_ac'} + layer_names = [l for l in _KERAS_LAYERS_TO_CHECK + if l not in excluded_layers] + self._check_returns_correct_shape( + image_height, image_width, expected_feature_map_shape, + layer_names=layer_names, output_stride=8, align_feature_maps=True) + + def test_returns_correct_shapes_128_align_feature_maps_false( + self): + image_height = 128 + image_width = 128 + expected_feature_map_shape = ( + _SLIM_ENDPOINT_SHAPES_128_ALIGN_FEATURE_MAPS_FALSE) + self._check_returns_correct_shape( + image_height, image_width, expected_feature_map_shape, + align_feature_maps=False) + + def test_hyperparam_override(self): + model = inception_resnet_v2.inception_resnet_v2( + batchnorm_training=True, + default_batchnorm_momentum=0.2, + default_batchnorm_epsilon=0.1, + weights=None, + include_top=False) + bn_layer = model.get_layer(name='freezable_batch_norm') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + variables = self._get_variables() + # 896 is the number of variables from slim inception resnet v2 model. + self.assertEqual(len(variables), 896) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v1.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..71c20e2cead25573d831290bcf14fbf1e1942c2e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v1.py @@ -0,0 +1,358 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the Keras MobilenetV1 models for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm +from object_detection.models.keras_models import model_utils + + +def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), + kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] + pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] + pad_beg = [pad_total[0] // 2, pad_total[1] // 2] + pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], + [pad_beg[1], pad_end[1]], [0, 0]]) + return padded_inputs + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras MobileNetV1.""" + + def __init__(self, + batchnorm_training, + default_batchnorm_momentum=0.999, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Alternative tf.keras.layers interface, for use by the Keras MobileNetV1. + + It is used by the Keras applications kwargs injection API to + modify the MobilenetV1 Keras application with changes required by + the Object Detection API. + + These injected interfaces make the following changes to the network: + + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v1 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV1 paper. It + modifies the number of filters in each convolutional layer. It's called + depth multiplier in Keras application MobilenetV1. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v1 body. Default is + `None` to use the default mobilenet_v1 network layout. + """ + self._alpha = alpha + self._batchnorm_training = batchnorm_training + self._default_batchnorm_momentum = default_batchnorm_momentum + self._conv_hyperparams = conv_hyperparams + self._use_explicit_padding = use_explicit_padding + self._min_depth = min_depth + self._conv_defs = conv_defs + self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) + self.initializer = tf.truncated_normal_initializer(stddev=0.09) + + def _FixedPaddingLayer(self, kernel_size, rate=1): + return tf.keras.layers.Lambda( + lambda x: _fixed_padding(x, kernel_size, rate)) + + def Conv2D(self, filters, kernel_size, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras MobileNetV1 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + filters: The number of filters to use for the convolution. + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. In this function, the kernel size is expected to + be pair of numbers and the numbers must be equal for this function. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras Conv2D layer to + the input argument, or that will first pad the input then apply a Conv2D + layer. + + Raises: + ValueError: if kernel size is not a pair of equal + integers (representing a square kernel). + """ + if not isinstance(kernel_size, tuple): + raise ValueError('kernel is expected to be a tuple.') + if len(kernel_size) != 2: + raise ValueError('kernel is expected to be length two.') + if kernel_size[0] != kernel_size[1]: + raise ValueError('kernel is expected to be square.') + layer_name = kwargs['name'] + if self._conv_defs: + conv_filters = model_utils.get_conv_def(self._conv_defs, layer_name) + if conv_filters: + filters = conv_filters + # Apply the width multiplier and the minimum depth to the convolution layers + filters = int(filters * self._alpha) + if self._min_depth and filters < self._min_depth: + filters = self._min_depth + + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + else: + kwargs['kernel_regularizer'] = self.regularizer + kwargs['kernel_initializer'] = self.initializer + + kwargs['padding'] = 'same' + if self._use_explicit_padding and kernel_size[0] > 1: + kwargs['padding'] = 'valid' + def padded_conv(features): # pylint: disable=invalid-name + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.Conv2D( + filters, kernel_size, **kwargs)(padded_features) + return padded_conv + else: + return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) + + def DepthwiseConv2D(self, kernel_size, **kwargs): + """Builds a DepthwiseConv2D according to the Object Detection config. + + Overrides the Keras MobileNetV2 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras DepthwiseConv2D + layer to the input argument, or that will first pad the input then apply + the depthwise convolution. + """ + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + # Both regularizer and initializaer also applies to depthwise layer in + # MobilenetV1, so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + else: + kwargs['depthwise_regularizer'] = self.regularizer + kwargs['depthwise_initializer'] = self.initializer + + kwargs['padding'] = 'same' + if self._use_explicit_padding: + kwargs['padding'] = 'valid' + def padded_depthwise_conv(features): # pylint: disable=invalid-name + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.DepthwiseConv2D( + kernel_size, **kwargs)(padded_features) + return padded_depthwise_conv + else: + return tf.keras.layers.DepthwiseConv2D(kernel_size, **kwargs) + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Only the name is used, all other params ignored. + Required for matching `layers.BatchNormalization` calls in the Keras + application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_batch_norm( + training=self._batchnorm_training, + name=name) + else: + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + epsilon=1e-3, + momentum=self._default_batchnorm_momentum, + name=name) + + def Input(self, shape): + """Builds an Input layer. + + Overrides the Keras application Input layer with one that uses a + tf.placeholder_with_default instead of a tf.placeholder. This is necessary + to ensure the application works when run on a TPU. + + Args: + shape: The shape for the input layer to use. (Does not include a dimension + for the batch size). + Returns: + An input layer for the specified shape that internally uses a + placeholder_with_default. + """ + default_size = 224 + default_batch_size = 1 + shape = list(shape) + default_shape = [default_size if dim is None else dim for dim in shape] + + input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) + + placeholder_with_default = tf.placeholder_with_default( + input=input_tensor, shape=[None] + shape) + return model_utils.input_layer(shape, placeholder_with_default) + + # pylint: disable=unused-argument + def ReLU(self, *args, **kwargs): + """Builds an activation layer. + + Overrides the Keras application ReLU with the activation specified by the + Object Detection configuration. + + Args: + *args: Ignored, required to match the `tf.keras.ReLU` interface + **kwargs: Only the name is used, + required to match `tf.keras.ReLU` interface + + Returns: + An activation layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_activation_layer(name=name) + else: + return tf.keras.layers.Lambda(tf.nn.relu6, name=name) + # pylint: enable=unused-argument + + # pylint: disable=unused-argument + def ZeroPadding2D(self, padding, **kwargs): + """Replaces explicit padding in the Keras application with a no-op. + + Args: + padding: The padding values for image height and width. + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A no-op identity lambda. + """ + return lambda x: x + # pylint: enable=unused-argument + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +# pylint: disable=invalid-name +def mobilenet_v1(batchnorm_training, + default_batchnorm_momentum=0.9997, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None, + **kwargs): + """Instantiates the MobileNetV1 architecture, modified for object detection. + + This wraps the MobileNetV1 tensorflow Keras application, but uses the + Keras application's kwargs-based monkey-patching API to override the Keras + architecture with the following changes: + + - Changes the default batchnorm momentum to 0.9997 + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + - Makes the Input layer use a tf.placeholder_with_default instead of a + tf.placeholder, to work on TPUs. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v1 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV1 paper. It + modifies the number of filters in each convolutional layer. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v1 body. Default is + `None` to use the default mobilenet_v1 network layout. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + default_batchnorm_momentum=default_batchnorm_momentum, + conv_hyperparams=conv_hyperparams, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=alpha, + conv_defs=conv_defs) + return tf.keras.applications.MobileNet( + alpha=alpha, layers=layers_override, **kwargs) +# pylint: enable=invalid-name diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v1_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v1_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7e46999d9dfd2fc4ddcd2c432f5ecc2a07f3a9eb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v1_tf2_test.py @@ -0,0 +1,256 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for mobilenet_v1.py. + +This test mainly focuses on comparing slim MobilenetV1 and Keras MobilenetV1 for +object detection. To verify the consistency of the two models, we compare: + 1. Output shape of each layer given different inputs + 2. Number of global variables + +We also visualize the model structure via Tensorboard, and compare the model +layout and the parameters of each Op to make sure the two implementations are +consistent. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models.keras_models import mobilenet_v1 +from object_detection.models.keras_models import model_utils +from object_detection.models.keras_models import test_utils +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_KERAS_LAYERS_TO_CHECK = [ + 'conv1_relu', + 'conv_dw_1_relu', 'conv_pw_1_relu', + 'conv_dw_2_relu', 'conv_pw_2_relu', + 'conv_dw_3_relu', 'conv_pw_3_relu', + 'conv_dw_4_relu', 'conv_pw_4_relu', + 'conv_dw_5_relu', 'conv_pw_5_relu', + 'conv_dw_6_relu', 'conv_pw_6_relu', + 'conv_dw_7_relu', 'conv_pw_7_relu', + 'conv_dw_8_relu', 'conv_pw_8_relu', + 'conv_dw_9_relu', 'conv_pw_9_relu', + 'conv_dw_10_relu', 'conv_pw_10_relu', + 'conv_dw_11_relu', 'conv_pw_11_relu', + 'conv_dw_12_relu', 'conv_pw_12_relu', + 'conv_dw_13_relu', 'conv_pw_13_relu', +] + +_NUM_CHANNELS = 3 +_BATCH_SIZE = 2 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MobilenetV1Test(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + train: true, + scale: false, + center: true, + decay: 0.2, + epsilon: 0.1, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_application_with_layer_outputs( + self, layer_names, batchnorm_training, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Constructs Keras MobilenetV1 that extracts intermediate layer outputs.""" + if not layer_names: + layer_names = _KERAS_LAYERS_TO_CHECK + full_model = mobilenet_v1.mobilenet_v1( + batchnorm_training=batchnorm_training, + conv_hyperparams=conv_hyperparams, + weights=None, + use_explicit_padding=use_explicit_padding, + alpha=alpha, + min_depth=min_depth, + conv_defs=conv_defs, + include_top=False) + layer_outputs = [full_model.get_layer(name=layer).output + for layer in layer_names] + return tf.keras.Model( + inputs=full_model.inputs, + outputs=layer_outputs) + + def _check_returns_correct_shape( + self, image_height, image_width, depth_multiplier, + expected_feature_map_shape, use_explicit_padding=False, min_depth=8, + layer_names=None, conv_defs=None): + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=depth_multiplier, + conv_defs=conv_defs) + + image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS).astype(np.float32) + feature_maps = model(image_tensor) + + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shape): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _check_returns_correct_shapes_with_dynamic_inputs( + self, image_height, image_width, depth_multiplier, + expected_feature_map_shape, use_explicit_padding=False, min_depth=8, + layer_names=None): + image_tensor = tf.random_uniform([_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS], dtype=tf.float32) + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, + use_explicit_padding=use_explicit_padding, + alpha=depth_multiplier) + + feature_maps = model(image_tensor) + + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shape): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, depth_multiplier, layer_names=None): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, use_explicit_padding=False, + alpha=depth_multiplier) + preprocessed_inputs = tf.random.uniform([2, 40, 40, 3]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_128) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_128_explicit_padding) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape, + use_explicit_padding=True) + + def test_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs) + self._check_returns_correct_shapes_with_dynamic_inputs( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_299) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_enforcing_min_depth) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape) + + def test_returns_correct_shapes_with_conv_defs( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + conv_def_block_12 = model_utils.ConvDefs( + conv_name='conv_pw_12', filters=512) + conv_def_block_13 = model_utils.ConvDefs( + conv_name='conv_pw_13', filters=256) + conv_defs = [conv_def_block_12, conv_def_block_13] + + expected_feature_map_shape = ( + test_utils.moblenet_v1_expected_feature_map_shape_with_conv_defs) + self._check_returns_correct_shape( + image_height, image_width, depth_multiplier, expected_feature_map_shape, + conv_defs=conv_defs) + + def test_hyperparam_override(self): + hyperparams = self._build_conv_hyperparams() + model = mobilenet_v1.mobilenet_v1( + batchnorm_training=True, + conv_hyperparams=hyperparams, + weights=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=32, + include_top=False) + hyperparams.params() + bn_layer = model.get_layer(name='conv_pw_5_bn') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + depth_multiplier = 1 + variables = self._get_variables(depth_multiplier) + # 135 is the number of variables from slim MobilenetV1 model. + self.assertEqual(len(variables), 135) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..b534cfbb182e333fc15d06b391b51ba8b3a03c4d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v2.py @@ -0,0 +1,334 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the MobileNet v2 models for Keras, for object detection.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from object_detection.core import freezable_batch_norm +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops + + +# pylint: disable=invalid-name +# This method copied from the slim mobilenet base network code (same license) +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras MobileNetV2.""" + + def __init__(self, + batchnorm_training, + default_batchnorm_momentum=0.999, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Alternative tf.keras.layers interface, for use by the Keras MobileNetV2. + + It is used by the Keras applications kwargs injection API to + modify the Mobilenet v2 Keras application with changes required by + the Object Detection API. + + These injected interfaces make the following changes to the network: + + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v2 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV2 paper. It + modifies the number of filters in each convolutional layer. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v2 body. Default is + `None` to use the default mobilenet_v2 network layout. + """ + self._alpha = alpha + self._batchnorm_training = batchnorm_training + self._default_batchnorm_momentum = default_batchnorm_momentum + self._conv_hyperparams = conv_hyperparams + self._use_explicit_padding = use_explicit_padding + self._min_depth = min_depth + self._conv_defs = conv_defs + self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5) + self.initializer = tf.truncated_normal_initializer(stddev=0.09) + + def _FixedPaddingLayer(self, kernel_size): + return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size)) + + def Conv2D(self, filters, **kwargs): + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras MobileNetV2 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + filters: The number of filters to use for the convolution. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras Conv2D layer to + the input argument, or that will first pad the input then apply a Conv2D + layer. + """ + # Make sure 'alpha' is always applied to the last convolution block's size + # (This overrides the Keras application's functionality) + layer_name = kwargs.get('name') + if layer_name == 'Conv_1': + if self._conv_defs: + filters = model_utils.get_conv_def(self._conv_defs, 'Conv_1') + else: + filters = 1280 + if self._alpha < 1.0: + filters = _make_divisible(filters * self._alpha, 8) + + # Apply the minimum depth to the convolution layers + if (self._min_depth and (filters < self._min_depth) + and not kwargs.get('name').endswith('expand')): + filters = self._min_depth + + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + else: + kwargs['kernel_regularizer'] = self.regularizer + kwargs['kernel_initializer'] = self.initializer + + kwargs['padding'] = 'same' + kernel_size = kwargs.get('kernel_size') + if self._use_explicit_padding and kernel_size > 1: + kwargs['padding'] = 'valid' + def padded_conv(features): + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features) + + return padded_conv + else: + return tf.keras.layers.Conv2D(filters, **kwargs) + + def DepthwiseConv2D(self, **kwargs): + """Builds a DepthwiseConv2D according to the Object Detection config. + + Overrides the Keras MobileNetV2 application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras DepthwiseConv2D + layer to the input argument, or that will first pad the input then apply + the depthwise convolution. + """ + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + # Both the regularizer and initializer apply to the depthwise layer in + # MobilenetV1, so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + else: + kwargs['depthwise_regularizer'] = self.regularizer + kwargs['depthwise_initializer'] = self.initializer + + kwargs['padding'] = 'same' + kernel_size = kwargs.get('kernel_size') + if self._use_explicit_padding and kernel_size > 1: + kwargs['padding'] = 'valid' + def padded_depthwise_conv(features): + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features) + + return padded_depthwise_conv + else: + return tf.keras.layers.DepthwiseConv2D(**kwargs) + + def BatchNormalization(self, **kwargs): + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Only the name is used, all other params ignored. + Required for matching `layers.BatchNormalization` calls in the Keras + application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_batch_norm( + training=self._batchnorm_training, + name=name) + else: + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + epsilon=1e-3, + momentum=self._default_batchnorm_momentum, + name=name) + + def Input(self, shape): + """Builds an Input layer. + + Overrides the Keras application Input layer with one that uses a + tf.placeholder_with_default instead of a tf.placeholder. This is necessary + to ensure the application works when run on a TPU. + + Args: + shape: The shape for the input layer to use. (Does not include a dimension + for the batch size). + Returns: + An input layer for the specified shape that internally uses a + placeholder_with_default. + """ + default_size = 224 + default_batch_size = 1 + shape = list(shape) + default_shape = [default_size if dim is None else dim for dim in shape] + + input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) + + placeholder_with_default = tf.placeholder_with_default( + input=input_tensor, shape=[None] + shape) + return model_utils.input_layer(shape, placeholder_with_default) + + # pylint: disable=unused-argument + def ReLU(self, *args, **kwargs): + """Builds an activation layer. + + Overrides the Keras application ReLU with the activation specified by the + Object Detection configuration. + + Args: + *args: Ignored, required to match the `tf.keras.ReLU` interface + **kwargs: Only the name is used, + required to match `tf.keras.ReLU` interface + + Returns: + An activation layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_activation_layer(name=name) + else: + return tf.keras.layers.Lambda(tf.nn.relu6, name=name) + # pylint: enable=unused-argument + + # pylint: disable=unused-argument + def ZeroPadding2D(self, **kwargs): + """Replaces explicit padding in the Keras application with a no-op. + + Args: + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A no-op identity lambda. + """ + return lambda x: x + # pylint: enable=unused-argument + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +def mobilenet_v2(batchnorm_training, + default_batchnorm_momentum=0.9997, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None, + **kwargs): + """Instantiates the MobileNetV2 architecture, modified for object detection. + + This wraps the MobileNetV2 tensorflow Keras application, but uses the + Keras application's kwargs-based monkey-patching API to override the Keras + architecture with the following changes: + + - Changes the default batchnorm momentum to 0.9997 + - Applies the Object Detection hyperparameter configuration + - Supports FreezableBatchNorms + - Adds support for a min number of filters for each layer + - Makes the `alpha` parameter affect the final convolution block even if it + is less than 1.0 + - Adds support for explicit padding of convolutions + - Makes the Input layer use a tf.placeholder_with_default instead of a + tf.placeholder, to work on TPUs. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default mobilenet_v2 layer builders. + use_explicit_padding: If True, use 'valid' padding for convolutions, + but explicitly pre-pads inputs so that the output dimensions are the + same as if 'same' padding were used. Off by default. + alpha: The width multiplier referenced in the MobileNetV2 paper. It + modifies the number of filters in each convolutional layer. + min_depth: Minimum number of filters in the convolutional layers. + conv_defs: Network layout to specify the mobilenet_v2 body. Default is + `None` to use the default mobilenet_v2 network layout. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.MobilenetV2` method that constructs the Keras + model. + + Returns: + A Keras model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + default_batchnorm_momentum=default_batchnorm_momentum, + conv_hyperparams=conv_hyperparams, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=alpha, + conv_defs=conv_defs) + return tf.keras.applications.MobileNetV2(alpha=alpha, + layers=layers_override, + **kwargs) +# pylint: enable=invalid-name diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v2_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v2_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2a53a9b63f28522197bc3daab29dab3a56dfb994 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/mobilenet_v2_tf2_test.py @@ -0,0 +1,250 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for mobilenet_v2.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.models.keras_models import model_utils +from object_detection.models.keras_models import test_utils +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_layers_to_check = [ + 'Conv1_relu', + 'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN', + 'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN', + 'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN', + 'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN', + 'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN', + 'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN', + 'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN', + 'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN', + 'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN', + 'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN', + 'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN', + 'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN', + 'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN', + 'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN', + 'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN', + 'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN', + 'out_relu'] + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MobilenetV2Test(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + batch_norm { + train: true, + scale: false, + center: true, + decay: 0.2, + epsilon: 0.1, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_application_with_layer_outputs( + self, layer_names, batchnorm_training, + conv_hyperparams=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=None, + conv_defs=None): + """Constructs Keras mobilenetv2 that extracts intermediate layer outputs.""" + # Have to clear the Keras backend to ensure isolation in layer naming + tf.keras.backend.clear_session() + if not layer_names: + layer_names = _layers_to_check + full_model = mobilenet_v2.mobilenet_v2( + batchnorm_training=batchnorm_training, + conv_hyperparams=conv_hyperparams, + weights=None, + use_explicit_padding=use_explicit_padding, + alpha=alpha, + min_depth=min_depth, + include_top=False, + conv_defs=conv_defs) + layer_outputs = [full_model.get_layer(name=layer).output + for layer in layer_names] + return tf.keras.Model( + inputs=full_model.inputs, + outputs=layer_outputs) + + def _check_returns_correct_shape( + self, batch_size, image_height, image_width, depth_multiplier, + expected_feature_map_shapes, use_explicit_padding=False, min_depth=None, + layer_names=None, conv_defs=None): + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, + use_explicit_padding=use_explicit_padding, + min_depth=min_depth, + alpha=depth_multiplier, + conv_defs=conv_defs) + + image_tensor = np.random.rand(batch_size, image_height, image_width, + 3).astype(np.float32) + feature_maps = model([image_tensor]) + + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _check_returns_correct_shapes_with_dynamic_inputs( + self, batch_size, image_height, image_width, depth_multiplier, + expected_feature_map_shapes, use_explicit_padding=False, + layer_names=None): + height = tf.random.uniform([], minval=image_height, maxval=image_height+1, + dtype=tf.int32) + width = tf.random.uniform([], minval=image_width, maxval=image_width+1, + dtype=tf.int32) + image_tensor = tf.random.uniform([batch_size, height, width, + 3], dtype=tf.float32) + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, use_explicit_padding=use_explicit_padding, + alpha=depth_multiplier) + feature_maps = model(image_tensor) + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, depth_multiplier, layer_names=None): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + layer_names=layer_names, + batchnorm_training=False, use_explicit_padding=False, + alpha=depth_multiplier) + preprocessed_inputs = tf.random.uniform([2, 40, 40, 3]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_128) + + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape) + + def test_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_128_explicit_padding) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape, use_explicit_padding=True) + + def test_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs) + self._check_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape) + + def test_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_299) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape) + + def test_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_enforcing_min_depth) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape, min_depth=32) + + def test_returns_correct_shapes_with_conv_defs( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + conv_1 = model_utils.ConvDefs( + conv_name='Conv_1', filters=256) + conv_defs = [conv_1] + + expected_feature_map_shape = ( + test_utils.moblenet_v2_expected_feature_map_shape_with_conv_defs) + self._check_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, + expected_feature_map_shape, conv_defs=conv_defs) + + def test_hyperparam_override(self): + hyperparams = self._build_conv_hyperparams() + model = mobilenet_v2.mobilenet_v2( + batchnorm_training=True, + conv_hyperparams=hyperparams, + weights=None, + use_explicit_padding=False, + alpha=1.0, + min_depth=32, + include_top=False) + hyperparams.params() + bn_layer = model.get_layer(name='block_5_project_BN') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + depth_multiplier = 1 + variables = self._get_variables(depth_multiplier) + self.assertEqual(len(variables), 260) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/model_utils.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..77f3cbd15d7981bd30876b1aebd62f8372a04288 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/model_utils.py @@ -0,0 +1,53 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utils for Keras models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import tensorflow.compat.v1 as tf + +# This is to specify the custom config of model structures. For example, +# ConvDefs(conv_name='conv_pw_12', filters=512) for Mobilenet V1 is to specify +# the filters of the conv layer with name 'conv_pw_12' as 512.s +ConvDefs = collections.namedtuple('ConvDefs', ['conv_name', 'filters']) + + +def get_conv_def(conv_defs, layer_name): + """Get the custom config for some layer of the model structure. + + Args: + conv_defs: A named tuple to specify the custom config of the model + network. See `ConvDefs` for details. + layer_name: A string, the name of the layer to be customized. + + Returns: + The number of filters for the layer, or `None` if there is no custom + config for the requested layer. + """ + for conv_def in conv_defs: + if layer_name == conv_def.conv_name: + return conv_def.filters + return None + + +def input_layer(shape, placeholder_with_default): + if tf.executing_eagerly(): + return tf.keras.layers.Input(shape=shape) + else: + return tf.keras.layers.Input(tensor=placeholder_with_default) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/resnet_v1.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/resnet_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..e4c7bf250abd1ca01eb9be33d7a3dfea2ccd475e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/resnet_v1.py @@ -0,0 +1,541 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A wrapper around the Keras Resnet V1 models for object detection.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +from tensorflow.python.keras.applications import resnet +from object_detection.core import freezable_batch_norm +from object_detection.models.keras_models import model_utils + + +def _fixed_padding(inputs, kernel_size, rate=1): # pylint: disable=invalid-name + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = tf.pad( + inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + return padded_inputs + + +class _LayersOverride(object): + """Alternative Keras layers interface for the Keras Resnet V1.""" + + def __init__(self, + batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1): + """Alternative tf.keras.layers interface, for use by the Keras Resnet V1. + + The class is used by the Keras applications kwargs injection API to + modify the Resnet V1 Keras application with changes required by + the Object Detection API. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + """ + self._batchnorm_training = batchnorm_training + self._batchnorm_scale = batchnorm_scale + self._default_batchnorm_momentum = default_batchnorm_momentum + self._default_batchnorm_epsilon = default_batchnorm_epsilon + self._conv_hyperparams = conv_hyperparams + self._min_depth = min_depth + self._depth_multiplier = depth_multiplier + self.regularizer = tf.keras.regularizers.l2(weight_decay) + self.initializer = tf.variance_scaling_initializer() + + def _FixedPaddingLayer(self, kernel_size, rate=1): # pylint: disable=invalid-name + return tf.keras.layers.Lambda( + lambda x: _fixed_padding(x, kernel_size, rate)) + + def Conv2D(self, filters, kernel_size, **kwargs): # pylint: disable=invalid-name + """Builds a Conv2D layer according to the current Object Detection config. + + Overrides the Keras Resnet application's convolutions with ones that + follow the spec specified by the Object Detection hyperparameters. + + Args: + filters: The number of filters to use for the convolution. + kernel_size: The kernel size to specify the height and width of the 2D + convolution window. + **kwargs: Keyword args specified by the Keras application for + constructing the convolution. + + Returns: + A one-arg callable that will either directly apply a Keras Conv2D layer to + the input argument, or that will first pad the input then apply a Conv2D + layer. + """ + # Apply the minimum depth to the convolution layers. + filters = max(int(filters * self._depth_multiplier), self._min_depth) + + if self._conv_hyperparams: + kwargs = self._conv_hyperparams.params(**kwargs) + else: + kwargs['kernel_regularizer'] = self.regularizer + kwargs['kernel_initializer'] = self.initializer + + # Set use_bias as false to keep it consistent with Slim Resnet model. + kwargs['use_bias'] = False + + kwargs['padding'] = 'same' + stride = kwargs.get('strides') + if stride and kernel_size and stride > 1 and kernel_size > 1: + kwargs['padding'] = 'valid' + def padded_conv(features): # pylint: disable=invalid-name + padded_features = self._FixedPaddingLayer(kernel_size)(features) + return tf.keras.layers.Conv2D( + filters, kernel_size, **kwargs)(padded_features) + return padded_conv + else: + return tf.keras.layers.Conv2D(filters, kernel_size, **kwargs) + + def Activation(self, *args, **kwargs): # pylint: disable=unused-argument,invalid-name + """Builds an activation layer. + + Overrides the Keras application Activation layer specified by the + Object Detection configuration. + + Args: + *args: Ignored, + required to match the `tf.keras.layers.Activation` interface. + **kwargs: Only the name is used, + required to match `tf.keras.layers.Activation` interface. + + Returns: + An activation layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_activation_layer(name=name) + else: + return tf.keras.layers.Lambda(tf.nn.relu, name=name) + + def BatchNormalization(self, **kwargs): # pylint: disable=invalid-name + """Builds a normalization layer. + + Overrides the Keras application batch norm with the norm specified by the + Object Detection configuration. + + Args: + **kwargs: Only the name is used, all other params ignored. + Required for matching `layers.BatchNormalization` calls in the Keras + application. + + Returns: + A normalization layer specified by the Object Detection hyperparameter + configurations. + """ + name = kwargs.get('name') + if self._conv_hyperparams: + return self._conv_hyperparams.build_batch_norm( + training=self._batchnorm_training, + name=name) + else: + kwargs['scale'] = self._batchnorm_scale + kwargs['epsilon'] = self._default_batchnorm_epsilon + return freezable_batch_norm.FreezableBatchNorm( + training=self._batchnorm_training, + momentum=self._default_batchnorm_momentum, + **kwargs) + + def Input(self, shape): # pylint: disable=invalid-name + """Builds an Input layer. + + Overrides the Keras application Input layer with one that uses a + tf.placeholder_with_default instead of a tf.placeholder. This is necessary + to ensure the application works when run on a TPU. + + Args: + shape: A tuple of integers representing the shape of the input, which + includes both spatial share and channels, but not the batch size. + Elements of this tuple can be None; 'None' elements represent dimensions + where the shape is not known. + + Returns: + An input layer for the specified shape that internally uses a + placeholder_with_default. + """ + default_size = 224 + default_batch_size = 1 + shape = list(shape) + default_shape = [default_size if dim is None else dim for dim in shape] + + input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape) + + placeholder_with_default = tf.placeholder_with_default( + input=input_tensor, shape=[None] + shape) + return model_utils.input_layer(shape, placeholder_with_default) + + def MaxPooling2D(self, pool_size, **kwargs): # pylint: disable=invalid-name + """Builds a MaxPooling2D layer with default padding as 'SAME'. + + This is specified by the default resnet arg_scope in slim. + + Args: + pool_size: The pool size specified by the Keras application. + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A MaxPooling2D layer with default padding as 'SAME'. + """ + kwargs['padding'] = 'same' + return tf.keras.layers.MaxPooling2D(pool_size, **kwargs) + + # Add alias as Keras also has it. + MaxPool2D = MaxPooling2D # pylint: disable=invalid-name + + def ZeroPadding2D(self, padding, **kwargs): # pylint: disable=unused-argument,invalid-name + """Replaces explicit padding in the Keras application with a no-op. + + Args: + padding: The padding values for image height and width. + **kwargs: Ignored, required to match the Keras applications usage. + + Returns: + A no-op identity lambda. + """ + return lambda x: x + + # Forward all non-overridden methods to the keras layers + def __getattr__(self, item): + return getattr(tf.keras.layers, item) + + +# pylint: disable=invalid-name +def resnet_v1_50(batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1, + **kwargs): + """Instantiates the Resnet50 architecture, modified for object detection. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras ResnetV1-50 model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + conv_hyperparams=conv_hyperparams, + weight_decay=weight_decay, + min_depth=min_depth, + depth_multiplier=depth_multiplier) + return tf.keras.applications.resnet.ResNet50( + layers=layers_override, **kwargs) + + +def resnet_v1_101(batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1, + **kwargs): + """Instantiates the Resnet50 architecture, modified for object detection. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras ResnetV1-101 model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + conv_hyperparams=conv_hyperparams, + weight_decay=weight_decay, + min_depth=min_depth, + depth_multiplier=depth_multiplier) + return tf.keras.applications.resnet.ResNet101( + layers=layers_override, **kwargs) + + +def resnet_v1_152(batchnorm_training, + batchnorm_scale=True, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5, + weight_decay=0.0001, + conv_hyperparams=None, + min_depth=8, + depth_multiplier=1, + **kwargs): + """Instantiates the Resnet50 architecture, modified for object detection. + + Args: + batchnorm_training: Bool. Assigned to Batch norm layer `training` param + when constructing `freezable_batch_norm.FreezableBatchNorm` layers. + batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale + the activations in the batch normalization layer. + default_batchnorm_momentum: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the momentum. + default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None, + batch norm layers will be constructed using this value as the epsilon. + weight_decay: The weight decay to use for regularizing the model. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. Optionally set to `None` + to use default resnet_v1 layer builders. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + **kwargs: Keyword arguments forwarded directly to the + `tf.keras.applications.Mobilenet` method that constructs the Keras + model. + + Returns: + A Keras ResnetV1-152 model instance. + """ + layers_override = _LayersOverride( + batchnorm_training, + batchnorm_scale=batchnorm_scale, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + conv_hyperparams=conv_hyperparams, + weight_decay=weight_decay, + min_depth=min_depth, + depth_multiplier=depth_multiplier) + return tf.keras.applications.resnet.ResNet152( + layers=layers_override, **kwargs) +# pylint: enable=invalid-name + + +# The following codes are based on the existing keras ResNet model pattern: +# google3/third_party/tensorflow/python/keras/applications/resnet.py +def block_basic(x, + filters, + kernel_size=3, + stride=1, + conv_shortcut=False, + name=None): + """A residual block for ResNet18/34. + + Arguments: + x: input tensor. + filters: integer, filters of the bottleneck layer. + kernel_size: default 3, kernel size of the bottleneck layer. + stride: default 1, stride of the first layer. + conv_shortcut: default False, use convolution shortcut if True, otherwise + identity shortcut. + name: string, block label. + + Returns: + Output tensor for the residual block. + """ + layers = tf.keras.layers + bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1 + + preact = layers.BatchNormalization( + axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')( + x) + preact = layers.Activation('relu', name=name + '_preact_relu')(preact) + + if conv_shortcut: + shortcut = layers.Conv2D( + filters, 1, strides=1, name=name + '_0_conv')( + preact) + else: + shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x + + x = layers.ZeroPadding2D( + padding=((1, 1), (1, 1)), name=name + '_1_pad')( + preact) + x = layers.Conv2D( + filters, kernel_size, strides=1, use_bias=False, name=name + '_1_conv')( + x) + x = layers.BatchNormalization( + axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')( + x) + x = layers.Activation('relu', name=name + '_1_relu')(x) + + x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) + x = layers.Conv2D( + filters, + kernel_size, + strides=stride, + use_bias=False, + name=name + '_2_conv')( + x) + x = layers.BatchNormalization( + axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')( + x) + x = layers.Activation('relu', name=name + '_2_relu')(x) + x = layers.Add(name=name + '_out')([shortcut, x]) + return x + + +def stack_basic(x, filters, blocks, stride1=2, name=None): + """A set of stacked residual blocks for ResNet18/34. + + Arguments: + x: input tensor. + filters: integer, filters of the bottleneck layer in a block. + blocks: integer, blocks in the stacked blocks. + stride1: default 2, stride of the first layer in the first block. + name: string, stack label. + + Returns: + Output tensor for the stacked blocks. + """ + x = block_basic(x, filters, conv_shortcut=True, name=name + '_block1') + for i in range(2, blocks): + x = block_basic(x, filters, name=name + '_block' + str(i)) + x = block_basic( + x, filters, stride=stride1, name=name + '_block' + str(blocks)) + return x + + +def resnet_v1_18(include_top=True, + weights='imagenet', + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation='softmax'): + """Instantiates the ResNet18 architecture.""" + + def stack_fn(x): + x = stack_basic(x, 64, 2, stride1=1, name='conv2') + x = stack_basic(x, 128, 2, name='conv3') + x = stack_basic(x, 256, 2, name='conv4') + return stack_basic(x, 512, 2, name='conv5') + + return resnet.ResNet( + stack_fn, + True, + True, + 'resnet18', + include_top, + weights, + input_tensor, + input_shape, + pooling, + classes, + classifier_activation=classifier_activation) + + +def resnet_v1_34(include_top=True, + weights='imagenet', + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation='softmax'): + """Instantiates the ResNet34 architecture.""" + + def stack_fn(x): + x = stack_basic(x, 64, 3, stride1=1, name='conv2') + x = stack_basic(x, 128, 4, name='conv3') + x = stack_basic(x, 256, 6, name='conv4') + return stack_basic(x, 512, 3, name='conv5') + + return resnet.ResNet( + stack_fn, + True, + True, + 'resnet34', + include_top, + weights, + input_tensor, + input_shape, + pooling, + classes, + classifier_activation=classifier_activation) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/resnet_v1_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/resnet_v1_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4566bc8ddda664fd7698f17b4951fca48612307b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/resnet_v1_tf2_test.py @@ -0,0 +1,226 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for resnet_v1.py. + +This test mainly focuses on comparing slim resnet v1 and Keras resnet v1 for +object detection. To verify the consistency of the two models, we compare: + 1. Output shape of each layer given different inputs. + 2. Number of global variables. +""" +import unittest + +from absl.testing import parameterized +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.models.keras_models import resnet_v1 +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + +_EXPECTED_SHAPES_224_RESNET50 = { + 'conv2_block3_out': (4, 56, 56, 256), + 'conv3_block4_out': (4, 28, 28, 512), + 'conv4_block6_out': (4, 14, 14, 1024), + 'conv5_block3_out': (4, 7, 7, 2048), +} + +_EXPECTED_SHAPES_224_RESNET101 = { + 'conv2_block3_out': (4, 56, 56, 256), + 'conv3_block4_out': (4, 28, 28, 512), + 'conv4_block23_out': (4, 14, 14, 1024), + 'conv5_block3_out': (4, 7, 7, 2048), +} + +_EXPECTED_SHAPES_224_RESNET152 = { + 'conv2_block3_out': (4, 56, 56, 256), + 'conv3_block8_out': (4, 28, 28, 512), + 'conv4_block36_out': (4, 14, 14, 1024), + 'conv5_block3_out': (4, 7, 7, 2048), +} + +_RESNET_NAMES = ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152'] +_RESNET_MODELS = [ + resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101, resnet_v1.resnet_v1_152 +] +_RESNET_SHAPES = [ + _EXPECTED_SHAPES_224_RESNET50, _EXPECTED_SHAPES_224_RESNET101, + _EXPECTED_SHAPES_224_RESNET152 +] + +_NUM_CHANNELS = 3 +_BATCH_SIZE = 4 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ResnetV1Test(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6, + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + batch_norm { + scale: true, + decay: 0.997, + epsilon: 0.001, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_application_with_layer_outputs(self, + model_index, + batchnorm_training, + batchnorm_scale=True, + weight_decay=0.0001, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5): + """Constructs Keras resnet_v1 that extracts layer outputs.""" + # Have to clear the Keras backend to ensure isolation in layer naming + tf.keras.backend.clear_session() + layer_names = _RESNET_SHAPES[model_index].keys() + full_model = _RESNET_MODELS[model_index]( + batchnorm_training=batchnorm_training, + weights=None, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon, + include_top=False) + + layer_outputs = [ + full_model.get_layer(name=layer).output for layer in layer_names + ] + return tf.keras.Model(inputs=full_model.inputs, outputs=layer_outputs) + + def _check_returns_correct_shape(self, + image_height, + image_width, + model_index, + expected_feature_map_shape, + batchnorm_training=True, + batchnorm_scale=True, + weight_decay=0.0001, + default_batchnorm_momentum=0.997, + default_batchnorm_epsilon=1e-5): + model = self._create_application_with_layer_outputs( + model_index=model_index, + batchnorm_training=batchnorm_training, + batchnorm_scale=batchnorm_scale, + weight_decay=weight_decay, + default_batchnorm_momentum=default_batchnorm_momentum, + default_batchnorm_epsilon=default_batchnorm_epsilon) + + image_tensor = np.random.rand(_BATCH_SIZE, image_height, image_width, + _NUM_CHANNELS).astype(np.float32) + feature_maps = model(image_tensor) + layer_names = _RESNET_SHAPES[model_index].keys() + for feature_map, layer_name in zip(feature_maps, layer_names): + expected_shape = _RESNET_SHAPES[model_index][layer_name] + self.assertAllEqual(feature_map.shape, expected_shape) + + def _get_variables(self, model_index): + tf.keras.backend.clear_session() + model = self._create_application_with_layer_outputs( + model_index, batchnorm_training=False) + preprocessed_inputs = tf.random.uniform([2, 40, 40, _NUM_CHANNELS]) + model(preprocessed_inputs) + return model.variables + + def test_returns_correct_shapes_224(self): + image_height = 224 + image_width = 224 + for model_index, _ in enumerate(_RESNET_NAMES): + expected_feature_map_shape = _RESNET_SHAPES[model_index] + self._check_returns_correct_shape(image_height, image_width, model_index, + expected_feature_map_shape) + + def test_hyperparam_override(self): + for model_name in _RESNET_MODELS: + model = model_name( + batchnorm_training=True, + default_batchnorm_momentum=0.2, + default_batchnorm_epsilon=0.1, + weights=None, + include_top=False) + bn_layer = model.get_layer(name='conv1_bn') + self.assertAllClose(bn_layer.momentum, 0.2) + self.assertAllClose(bn_layer.epsilon, 0.1) + + def test_variable_count(self): + # The number of variables from slim resnetv1-* model. + variable_nums = [265, 520, 775] + for model_index, var_num in enumerate(variable_nums): + variables = self._get_variables(model_index) + self.assertEqual(len(variables), var_num) + + +class ResnetShapeTest(test_case.TestCase, parameterized.TestCase): + + @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') + @parameterized.parameters( + { + 'resnet_type': + 'resnet_v1_34', + 'output_layer_names': [ + 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out', + 'conv5_block3_out' + ] + }, { + 'resnet_type': + 'resnet_v1_18', + 'output_layer_names': [ + 'conv2_block2_out', 'conv3_block2_out', 'conv4_block2_out', + 'conv5_block2_out' + ] + }) + def test_output_shapes(self, resnet_type, output_layer_names): + if resnet_type == 'resnet_v1_34': + model = resnet_v1.resnet_v1_34(input_shape=(64, 64, 3), weights=None) + else: + model = resnet_v1.resnet_v1_18(input_shape=(64, 64, 3), weights=None) + outputs = [ + model.get_layer(output_layer_name).output + for output_layer_name in output_layer_names + ] + resnet_model = tf.keras.models.Model(inputs=model.input, outputs=outputs) + outputs = resnet_model(np.zeros((2, 64, 64, 3), dtype=np.float32)) + + # Check the shape of 'conv2_block3_out': + self.assertEqual(outputs[0].shape, [2, 16, 16, 64]) + # Check the shape of 'conv3_block4_out': + self.assertEqual(outputs[1].shape, [2, 8, 8, 128]) + # Check the shape of 'conv4_block6_out': + self.assertEqual(outputs[2].shape, [2, 4, 4, 256]) + # Check the shape of 'conv5_block3_out': + self.assertEqual(outputs[3].shape, [2, 2, 2, 512]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/test_utils.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0669b6c697f81ca982070983892cec01e894fad1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/keras_models/test_utils.py @@ -0,0 +1,214 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Test utils for other test files.""" + +# import tensorflow as tf +# +# from nets import mobilenet_v1 +# +# slim = tf.contrib.slim +# +# # Layer names of Slim to map Keras layer names in MobilenetV1 +# _MOBLIENET_V1_SLIM_ENDPOINTS = [ +# 'Conv2d_0', +# 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', +# 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', +# 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', +# 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', +# 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', +# 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', +# 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', +# 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', +# 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', +# 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', +# 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', +# 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', +# 'Conv2d_13_depthwise', 'Conv2d_13_pointwise' +# ] +# +# +# # Function to get the output shape of each layer in Slim. It's used to +# # generate the following constant expected_feature_map_shape for MobilenetV1. +# # Similarly, this can also apply to MobilenetV2. +# def _get_slim_endpoint_shapes(inputs, depth_multiplier=1.0, min_depth=8, +# use_explicit_padding=False): +# with slim.arg_scope([slim.conv2d, slim.separable_conv2d], +# normalizer_fn=slim.batch_norm): +# _, end_points = mobilenet_v1.mobilenet_v1_base( +# inputs, final_endpoint='Conv2d_13_pointwise', +# depth_multiplier=depth_multiplier, min_depth=min_depth, +# use_explicit_padding=use_explicit_padding) +# return [end_points[endpoint_name].get_shape() +# for endpoint_name in _MOBLIENET_V1_SLIM_ENDPOINTS] + + +# For Mobilenet V1 +moblenet_v1_expected_feature_map_shape_128 = [ + (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), + (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), + (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), + (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), +] + +moblenet_v1_expected_feature_map_shape_128_explicit_padding = [ + (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), + (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), + (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), + (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), +] + +mobilenet_v1_expected_feature_map_shape_with_dynamic_inputs = [ + (2, 64, 64, 32), (2, 64, 64, 32), (2, 64, 64, 64), (2, 32, 32, 64), + (2, 32, 32, 128), (2, 32, 32, 128), (2, 32, 32, 128), (2, 16, 16, 128), + (2, 16, 16, 256), (2, 16, 16, 256), (2, 16, 16, 256), (2, 8, 8, 256), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), + (2, 8, 8, 512), (2, 8, 8, 512), (2, 8, 8, 512), (2, 4, 4, 512), + (2, 4, 4, 1024), (2, 4, 4, 1024), (2, 4, 4, 1024), +] + +moblenet_v1_expected_feature_map_shape_299 = [ + (2, 150, 150, 32), (2, 150, 150, 32), (2, 150, 150, 64), (2, 75, 75, 64), + (2, 75, 75, 128), (2, 75, 75, 128), (2, 75, 75, 128), (2, 38, 38, 128), + (2, 38, 38, 256), (2, 38, 38, 256), (2, 38, 38, 256), (2, 19, 19, 256), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 10, 10, 512), + (2, 10, 10, 1024), (2, 10, 10, 1024), (2, 10, 10, 1024), +] + +moblenet_v1_expected_feature_map_shape_enforcing_min_depth = [ + (2, 150, 150, 8), (2, 150, 150, 8), (2, 150, 150, 8), (2, 75, 75, 8), + (2, 75, 75, 8), (2, 75, 75, 8), (2, 75, 75, 8), (2, 38, 38, 8), + (2, 38, 38, 8), (2, 38, 38, 8), (2, 38, 38, 8), (2, 19, 19, 8), + (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), + (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), + (2, 19, 19, 8), (2, 19, 19, 8), (2, 19, 19, 8), (2, 10, 10, 8), + (2, 10, 10, 8), (2, 10, 10, 8), (2, 10, 10, 8), +] + +moblenet_v1_expected_feature_map_shape_with_conv_defs = [ + (2, 150, 150, 32), (2, 150, 150, 32), (2, 150, 150, 64), (2, 75, 75, 64), + (2, 75, 75, 128), (2, 75, 75, 128), (2, 75, 75, 128), (2, 38, 38, 128), + (2, 38, 38, 256), (2, 38, 38, 256), (2, 38, 38, 256), (2, 19, 19, 256), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), + (2, 19, 19, 512), (2, 19, 19, 512), (2, 19, 19, 512), (2, 10, 10, 512), + (2, 10, 10, 512), (2, 10, 10, 512), (2, 10, 10, 256), +] + +# For Mobilenet V2 +moblenet_v2_expected_feature_map_shape_128 = [ + (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), + (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), + (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), + (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), + (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), + (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), + (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), + (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), + (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), + (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), + (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), + (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), + (2, 4, 4, 320), (2, 4, 4, 1280) +] + +moblenet_v2_expected_feature_map_shape_128_explicit_padding = [ + (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), + (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), + (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), + (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), + (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), + (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), + (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), + (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), + (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), + (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), + (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), + (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), + (2, 4, 4, 320), (2, 4, 4, 1280) +] + +mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs = [ + (2, 64, 64, 32), (2, 64, 64, 96), (2, 32, 32, 96), (2, 32, 32, 24), + (2, 32, 32, 144), (2, 32, 32, 144), (2, 32, 32, 24), (2, 32, 32, 144), + (2, 16, 16, 144), (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), + (2, 16, 16, 32), (2, 16, 16, 192), (2, 16, 16, 192), (2, 16, 16, 32), + (2, 16, 16, 192), (2, 8, 8, 192), (2, 8, 8, 64), (2, 8, 8, 384), + (2, 8, 8, 384), (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), + (2, 8, 8, 64), (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 64), + (2, 8, 8, 384), (2, 8, 8, 384), (2, 8, 8, 96), (2, 8, 8, 576), + (2, 8, 8, 576), (2, 8, 8, 96), (2, 8, 8, 576), (2, 8, 8, 576), + (2, 8, 8, 96), (2, 8, 8, 576), (2, 4, 4, 576), (2, 4, 4, 160), + (2, 4, 4, 960), (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), + (2, 4, 4, 960), (2, 4, 4, 160), (2, 4, 4, 960), (2, 4, 4, 960), + (2, 4, 4, 320), (2, 4, 4, 1280) +] + +moblenet_v2_expected_feature_map_shape_299 = [ + (2, 150, 150, 32), (2, 150, 150, 96), (2, 75, 75, 96), (2, 75, 75, 24), + (2, 75, 75, 144), (2, 75, 75, 144), (2, 75, 75, 24), (2, 75, 75, 144), + (2, 38, 38, 144), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), + (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), + (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 64), (2, 19, 19, 384), + (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), + (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), + (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 96), (2, 19, 19, 576), + (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), + (2, 19, 19, 96), (2, 19, 19, 576), (2, 10, 10, 576), (2, 10, 10, 160), + (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), + (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), + (2, 10, 10, 320), (2, 10, 10, 1280) +] + +moblenet_v2_expected_feature_map_shape_enforcing_min_depth = [ + (2, 150, 150, 32), (2, 150, 150, 192), (2, 75, 75, 192), (2, 75, 75, 32), + (2, 75, 75, 192), (2, 75, 75, 192), (2, 75, 75, 32), (2, 75, 75, 192), + (2, 38, 38, 192), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), + (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), + (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), + (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), + (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), + (2, 19, 19, 192), (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), + (2, 19, 19, 192), (2, 19, 19, 32), (2, 19, 19, 192), (2, 19, 19, 192), + (2, 19, 19, 32), (2, 19, 19, 192), (2, 10, 10, 192), (2, 10, 10, 32), + (2, 10, 10, 192), (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), + (2, 10, 10, 192), (2, 10, 10, 32), (2, 10, 10, 192), (2, 10, 10, 192), + (2, 10, 10, 32), (2, 10, 10, 32) +] + +moblenet_v2_expected_feature_map_shape_with_conv_defs = [ + (2, 150, 150, 32), (2, 150, 150, 96), (2, 75, 75, 96), (2, 75, 75, 24), + (2, 75, 75, 144), (2, 75, 75, 144), (2, 75, 75, 24), (2, 75, 75, 144), + (2, 38, 38, 144), (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), + (2, 38, 38, 32), (2, 38, 38, 192), (2, 38, 38, 192), (2, 38, 38, 32), + (2, 38, 38, 192), (2, 19, 19, 192), (2, 19, 19, 64), (2, 19, 19, 384), + (2, 19, 19, 384), (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), + (2, 19, 19, 64), (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 64), + (2, 19, 19, 384), (2, 19, 19, 384), (2, 19, 19, 96), (2, 19, 19, 576), + (2, 19, 19, 576), (2, 19, 19, 96), (2, 19, 19, 576), (2, 19, 19, 576), + (2, 19, 19, 96), (2, 19, 19, 576), (2, 10, 10, 576), (2, 10, 10, 160), + (2, 10, 10, 960), (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), + (2, 10, 10, 960), (2, 10, 10, 160), (2, 10, 10, 960), (2, 10, 10, 960), + (2, 10, 10, 320), (2, 10, 10, 256) +] diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecf8fb01b2dba536b8d6c531a3e8becac75091c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py @@ -0,0 +1,925 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD Keras-based EfficientNet + BiFPN (EfficientDet) Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from six.moves import range +from six.moves import zip +import tensorflow.compat.v2 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils +from object_detection.utils import tf_version +# pylint: disable=g-import-not-at-top +if tf_version.is_tf2(): + from official.vision.image_classification.efficientnet import efficientnet_model + +_EFFICIENTNET_LEVEL_ENDPOINTS = { + 1: 'stack_0/block_0/project_bn', + 2: 'stack_1/block_1/add', + 3: 'stack_2/block_1/add', + 4: 'stack_4/block_2/add', + 5: 'stack_6/block_0/project_bn', +} + + +class SSDEfficientNetBiFPNKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Keras-based EfficientNetBiFPN (EfficientDet) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level, + bifpn_max_level, + bifpn_num_iterations, + bifpn_num_filters, + bifpn_combine_method, + efficientnet_version, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name=None): + """SSD Keras-based EfficientNetBiFPN (EfficientDet) feature extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + efficientnet_version: the EfficientNet version to use for this feature + extractor's backbone. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetBiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + if depth_multiplier != 1.0: + raise ValueError('EfficientNetBiFPN does not support a non-default ' + 'depth_multiplier.') + if use_explicit_padding: + raise ValueError('EfficientNetBiFPN does not support explicit padding.') + if use_depthwise: + raise ValueError('EfficientNetBiFPN does not support use_depthwise.') + if override_base_feature_extractor_hyperparams: + raise ValueError('EfficientNetBiFPN does not support ' + 'override_base_feature_extractor_hyperparams.') + + self._bifpn_min_level = bifpn_min_level + self._bifpn_max_level = bifpn_max_level + self._bifpn_num_iterations = bifpn_num_iterations + self._bifpn_num_filters = max(bifpn_num_filters, min_depth) + self._bifpn_node_params = {'combine_method': bifpn_combine_method} + self._efficientnet_version = efficientnet_version + + logging.info('EfficientDet EfficientNet backbone version: %s', + self._efficientnet_version) + logging.info('EfficientDet BiFPN num filters: %d', self._bifpn_num_filters) + logging.info('EfficientDet BiFPN num iterations: %d', + self._bifpn_num_iterations) + + self._backbone_max_level = min( + max(_EFFICIENTNET_LEVEL_ENDPOINTS.keys()), bifpn_max_level) + self._output_layer_names = [ + _EFFICIENTNET_LEVEL_ENDPOINTS[i] + for i in range(bifpn_min_level, self._backbone_max_level + 1)] + self._output_layer_alias = [ + 'level_{}'.format(i) + for i in range(bifpn_min_level, self._backbone_max_level + 1)] + + # Initialize the EfficientNet backbone. + # Note, this is currently done in the init method rather than in the build + # method, since doing so introduces an error which is not well understood. + efficientnet_base = efficientnet_model.EfficientNet.from_name( + model_name=self._efficientnet_version, + overrides={'rescale_input': False}) + outputs = [efficientnet_base.get_layer(output_layer_name).output + for output_layer_name in self._output_layer_names] + self._efficientnet = tf.keras.Model( + inputs=efficientnet_base.inputs, outputs=outputs) + self.classification_backbone = efficientnet_base + self._bifpn_stage = None + + def build(self, input_shape): + self._bifpn_stage = bifpn_generators.KerasBiFpnFeatureMaps( + bifpn_num_iterations=self._bifpn_num_iterations, + bifpn_num_filters=self._bifpn_num_filters, + fpn_min_level=self._bifpn_min_level, + fpn_max_level=self._bifpn_max_level, + input_max_level=self._backbone_max_level, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + bifpn_node_params=self._bifpn_node_params, + name='bifpn') + self.built = True + + def preprocess(self, inputs): + """SSD preprocessing. + + Channel-wise mean subtraction and scaling. + + Args: + inputs: a [batch, height, width, channels] float tensor representing a + batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if inputs.shape.as_list()[3] == 3: + # Input images are expected to be in the range [0, 255]. + channel_offset = [0.485, 0.456, 0.406] + channel_scale = [0.229, 0.224, 0.225] + return ((inputs / 255.0) - [[channel_offset]]) / [[channel_scale]] + else: + return inputs + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + base_feature_maps = self._efficientnet( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + output_feature_map_dict = self._bifpn_stage( + list(zip(self._output_layer_alias, base_feature_maps))) + + return list(output_feature_map_dict.values()) + + +class SSDEfficientNetB0BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=3, + bifpn_num_filters=64, + bifpn_combine_method='fast_attention', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D0'): + """SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB0BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b0', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB1BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=4, + bifpn_num_filters=88, + bifpn_combine_method='fast_attention', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D1'): + """SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB1BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b1', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB2BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=5, + bifpn_num_filters=112, + bifpn_combine_method='fast_attention', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D2'): + + """SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB2BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b2', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB3BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=6, + bifpn_num_filters=160, + bifpn_combine_method='fast_attention', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D3'): + + """SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB3BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b3', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB4BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=7, + bifpn_num_filters=224, + bifpn_combine_method='fast_attention', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D4'): + + """SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB4BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b4', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB5BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=7, + bifpn_num_filters=288, + bifpn_combine_method='fast_attention', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D5'): + + """SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB5BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b5', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB6BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=8, + bifpn_num_filters=384, + bifpn_combine_method='sum', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientDet-D6-D7'): + + """SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor. + + SSD Keras EfficientNet-b6 BiFPN Feature Extractor, a.k.a. EfficientDet-d6 + and EfficientDet-d7. The EfficientDet-d[6,7] models use the same backbone + EfficientNet-b6 and the same BiFPN architecture, and therefore have the same + number of parameters. They only differ in their input resolutions. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB6BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b6', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDEfficientNetB7BiFPNKerasFeatureExtractor( + SSDEfficientNetBiFPNKerasFeatureExtractor): + """SSD Keras EfficientNet-b7 BiFPN Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=8, + bifpn_num_filters=384, + bifpn_combine_method='sum', + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=None, + name='EfficientNet-B7_BiFPN'): + + """SSD Keras EfficientNet-b7 BiFPN Feature Extractor. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: unsupported by EfficientNetBiFPN. float, depth + multiplier for the feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during training + or not. When training with a small batch size (e.g. 1), it is desirable + to freeze batch norm update and use pretrained batch norm params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + bifpn_min_level: the highest resolution feature map to use in BiFPN. The + valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + bifpn_max_level: the smallest resolution feature map to use in the BiFPN. + BiFPN constructions uses features maps starting from bifpn_min_level + upto the bifpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of BiFPN + levels. + bifpn_num_iterations: number of BiFPN iterations. Overrided if + efficientdet_version is provided. + bifpn_num_filters: number of filters (channels) in all BiFPN layers. + Overrided if efficientdet_version is provided. + bifpn_combine_method: the method used to combine BiFPN nodes. + use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use + explicit padding when extracting features. + use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular + convolutions when inputs to a node have a differing number of channels, + and use separable convolutions after combine operations. + override_base_feature_extractor_hyperparams: unsupported. Whether to + override hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras will + auto-generate one from the class name. + """ + super(SSDEfficientNetB7BiFPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + bifpn_min_level=bifpn_min_level, + bifpn_max_level=bifpn_max_level, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version='efficientnet-b7', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_efficientnet_bifpn_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_efficientnet_bifpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..450fcc8f8548a0f9bb27de5da0126f13b3c93da2 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_efficientnet_bifpn_feature_extractor_tf2_test.py @@ -0,0 +1,179 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the ssd_efficientnet_bifpn_feature_extractor.""" +import unittest +from absl.testing import parameterized + +import numpy as np +import tensorflow.compat.v2 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.models import ssd_efficientnet_bifpn_feature_extractor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +def _count_params(model, trainable_only=True): + """Returns the count of all model parameters, or just trainable ones.""" + if not trainable_only: + return model.count_params() + else: + return int(np.sum([ + tf.keras.backend.count_params(p) for p in model.trainable_weights])) + + +@parameterized.parameters( + {'efficientdet_version': 'efficientdet-d0', + 'efficientnet_version': 'efficientnet-b0', + 'bifpn_num_iterations': 3, + 'bifpn_num_filters': 64, + 'bifpn_combine_method': 'fast_attention'}, + {'efficientdet_version': 'efficientdet-d1', + 'efficientnet_version': 'efficientnet-b1', + 'bifpn_num_iterations': 4, + 'bifpn_num_filters': 88, + 'bifpn_combine_method': 'fast_attention'}, + {'efficientdet_version': 'efficientdet-d2', + 'efficientnet_version': 'efficientnet-b2', + 'bifpn_num_iterations': 5, + 'bifpn_num_filters': 112, + 'bifpn_combine_method': 'fast_attention'}, + {'efficientdet_version': 'efficientdet-d3', + 'efficientnet_version': 'efficientnet-b3', + 'bifpn_num_iterations': 6, + 'bifpn_num_filters': 160, + 'bifpn_combine_method': 'fast_attention'}, + {'efficientdet_version': 'efficientdet-d4', + 'efficientnet_version': 'efficientnet-b4', + 'bifpn_num_iterations': 7, + 'bifpn_num_filters': 224, + 'bifpn_combine_method': 'fast_attention'}, + {'efficientdet_version': 'efficientdet-d5', + 'efficientnet_version': 'efficientnet-b5', + 'bifpn_num_iterations': 7, + 'bifpn_num_filters': 288, + 'bifpn_combine_method': 'fast_attention'}, + # efficientdet-d6 and efficientdet-d7 only differ in input size. + {'efficientdet_version': 'efficientdet-d6-d7', + 'efficientnet_version': 'efficientnet-b6', + 'bifpn_num_iterations': 8, + 'bifpn_num_filters': 384, + 'bifpn_combine_method': 'sum'}) +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDEfficientNetBiFPNFeatureExtractorTest( + test_case.TestCase, parameterized.TestCase): + + def _build_conv_hyperparams(self, add_batch_norm=True): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + force_use_bias: true + activation: SWISH + regularizer { + l2_regularizer { + weight: 0.0004 + } + } + initializer { + truncated_normal_initializer { + stddev: 0.03 + mean: 0.0 + } + } + """ + if add_batch_norm: + batch_norm_proto = """ + batch_norm { + scale: true, + decay: 0.99, + epsilon: 0.001, + } + """ + conv_hyperparams_text_proto += batch_norm_proto + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _create_feature_extractor(self, + efficientnet_version='efficientnet-b0', + bifpn_num_iterations=3, + bifpn_num_filters=64, + bifpn_combine_method='fast_attention'): + """Constructs a new EfficientNetBiFPN feature extractor.""" + depth_multiplier = 1.0 + pad_to_multiple = 1 + min_depth = 16 + return (ssd_efficientnet_bifpn_feature_extractor + .SSDEfficientNetBiFPNKerasFeatureExtractor( + is_training=True, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + bifpn_min_level=3, + bifpn_max_level=7, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method, + efficientnet_version=efficientnet_version)) + + def test_efficientdet_feature_extractor_shapes(self, + efficientdet_version, + efficientnet_version, + bifpn_num_iterations, + bifpn_num_filters, + bifpn_combine_method): + feature_extractor = self._create_feature_extractor( + efficientnet_version=efficientnet_version, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method) + outputs = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32)) + + self.assertEqual(outputs[0].shape, (2, 32, 32, bifpn_num_filters)) + self.assertEqual(outputs[1].shape, (2, 16, 16, bifpn_num_filters)) + self.assertEqual(outputs[2].shape, (2, 8, 8, bifpn_num_filters)) + self.assertEqual(outputs[3].shape, (2, 4, 4, bifpn_num_filters)) + self.assertEqual(outputs[4].shape, (2, 2, 2, bifpn_num_filters)) + + def test_efficientdet_feature_extractor_params(self, + efficientdet_version, + efficientnet_version, + bifpn_num_iterations, + bifpn_num_filters, + bifpn_combine_method): + feature_extractor = self._create_feature_extractor( + efficientnet_version=efficientnet_version, + bifpn_num_iterations=bifpn_num_iterations, + bifpn_num_filters=bifpn_num_filters, + bifpn_combine_method=bifpn_combine_method) + _ = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32)) + expected_params = { + 'efficientdet-d0': 5484829, + 'efficientdet-d1': 8185156, + 'efficientdet-d2': 9818153, + 'efficientdet-d3': 13792706, + 'efficientdet-d4': 22691445, + 'efficientdet-d5': 35795677, + 'efficientdet-d6-d7': 53624512, + } + num_params = _count_params(feature_extractor) + self.assertEqual(expected_params[efficientdet_version], num_params) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_feature_extractor_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_feature_extractor_test.py new file mode 100644 index 0000000000000000000000000000000000000000..29c43e376c6167b61a256eb0812ee4d3bcee3ed5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_feature_extractor_test.py @@ -0,0 +1,263 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base test class SSDFeatureExtractors.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from abc import abstractmethod + +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf +import tf_slim as slim +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import test_utils + + +class SsdFeatureExtractorTestBase(test_case.TestCase): + + def _build_conv_hyperparams(self, add_batch_norm=True): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + if add_batch_norm: + batch_norm_proto = """ + batch_norm { + scale: false + } + """ + conv_hyperparams_text_proto += batch_norm_proto + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def conv_hyperparams_fn(self): + with slim.arg_scope([]) as sc: + return sc + + @abstractmethod + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + use_keras=False, + use_depthwise=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + use_depthwise: Whether to use depthwise convolutions. + Returns: + an ssd_meta_arch.SSDFeatureExtractor or an + ssd_meta_arch.SSDKerasFeatureExtractor object. + """ + pass + + def _create_features(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + use_keras=False): + kwargs = {} + if use_explicit_padding: + kwargs.update({'use_explicit_padding': use_explicit_padding}) + if use_depthwise: + kwargs.update({'use_depthwise': use_depthwise}) + if num_layers != 6: + kwargs.update({'num_layers': num_layers}) + if use_keras: + kwargs.update({'use_keras': use_keras}) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + **kwargs) + return feature_extractor + + def _extract_features(self, + image_tensor, + feature_extractor, + use_keras=False): + if use_keras: + feature_maps = feature_extractor(image_tensor) + else: + feature_maps = feature_extractor.extract_features(image_tensor) + return feature_maps + + def check_extract_features_returns_correct_shape(self, + batch_size, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shapes, + use_explicit_padding=False, + num_layers=6, + use_keras=False, + use_depthwise=False): + with test_utils.GraphContextOrNone() as g: + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def graph_fn(image_tensor): + return self._extract_features( + image_tensor, + feature_extractor, + use_keras=use_keras) + + image_tensor = np.random.rand(batch_size, image_height, image_width, + 3).astype(np.float32) + feature_maps = self.execute(graph_fn, [image_tensor], graph=g) + for feature_map, expected_shape in zip( + feature_maps, expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def check_extract_features_returns_correct_shapes_with_dynamic_inputs( + self, + batch_size, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shapes, + use_explicit_padding=False, + num_layers=6, + use_keras=False, + use_depthwise=False): + + with test_utils.GraphContextOrNone() as g: + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def graph_fn(image_height, image_width): + image_tensor = tf.random_uniform([batch_size, image_height, image_width, + 3], dtype=tf.float32) + return self._extract_features( + image_tensor, + feature_extractor, + use_keras=use_keras) + + feature_maps = self.execute_cpu(graph_fn, [ + np.array(image_height, dtype=np.int32), + np.array(image_width, dtype=np.int32) + ], graph=g) + for feature_map, expected_shape in zip( + feature_maps, expected_feature_map_shapes): + self.assertAllEqual(feature_map.shape, expected_shape) + + def check_extract_features_raises_error_with_invalid_image_size( + self, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=False, + use_depthwise=False): + + with test_utils.GraphContextOrNone() as g: + batch = 4 + width = tf.random.uniform([], minval=image_width, maxval=image_width+1, + dtype=tf.int32) + height = tf.random.uniform([], minval=image_height, maxval=image_height+1, + dtype=tf.int32) + shape = tf.stack([batch, height, width, 3]) + preprocessed_inputs = tf.random.uniform(shape) + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def graph_fn(): + feature_maps = self._extract_features( + preprocessed_inputs, + feature_extractor, + use_keras=use_keras) + return feature_maps + if self.is_tf2(): + with self.assertRaises(ValueError): + self.execute_cpu(graph_fn, [], graph=g) + else: + with self.assertRaises(tf.errors.InvalidArgumentError): + self.execute_cpu(graph_fn, [], graph=g) + + def check_feature_extractor_variables_under_scope(self, + depth_multiplier, + pad_to_multiple, + scope_name, + use_keras=False, + use_depthwise=False): + variables = self.get_feature_extractor_variables( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + for variable in variables: + self.assertTrue(variable.name.startswith(scope_name)) + + def get_feature_extractor_variables(self, + depth_multiplier, + pad_to_multiple, + use_keras=False, + use_depthwise=False): + g = tf.Graph() + with g.as_default(): + feature_extractor = self._create_features( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + self._extract_features( + preprocessed_inputs, + feature_extractor, + use_keras=use_keras) + return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..c782bb2524ecf1a135d94c5743e5a20461231403 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor.py @@ -0,0 +1,137 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for InceptionV2 features.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import inception_v2 + + +class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using InceptionV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """InceptionV2 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: If `override_base_feature_extractor_hyperparams` is False. + """ + super(SSDInceptionV2FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + if not self._override_base_feature_extractor_hyperparams: + raise ValueError('SSD Inception V2 feature extractor always uses' + 'scope returned by `conv_hyperparams_fn` for both the ' + 'base feature extractor and the additional layers ' + 'added since there is no arg_scope defined for the base ' + 'feature extractor.') + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('InceptionV2', + reuse=self._reuse_weights) as scope: + _, image_features = inception_v2.inception_v2_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Mixed_5c', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a24aabb709c8577650223b5c9a7387b6a8515081 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1e33ed70ed45cef900d9f615cba9a5f196d36e23 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py @@ -0,0 +1,160 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.models.ssd_inception_v2_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_inception_v2_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdInceptionV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=True): + """Constructs a SsdInceptionV2FeatureExtractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + + Returns: + an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor. + """ + min_depth = 32 + return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + num_layers=num_layers, + override_base_feature_extractor_hyperparams=True) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 128), (2, 10, 10, 128), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'InceptionV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa7f78d15baf0280833d4ad2e36d9f16a4ffccb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor.py @@ -0,0 +1,137 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for InceptionV3 features.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import inception_v3 + + +class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using InceptionV3 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """InceptionV3 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: If `override_base_feature_extractor_hyperparams` is False. + """ + super(SSDInceptionV3FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + if not self._override_base_feature_extractor_hyperparams: + raise ValueError('SSD Inception V3 feature extractor always uses' + 'scope returned by `conv_hyperparams_fn` for both the ' + 'base feature extractor and the additional layers ' + 'added since there is no arg_scope defined for the base ' + 'feature extractor.') + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, -1, 512, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope: + _, image_features = inception_v3.inception_v3_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Mixed_7c', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f4546aa3a0f53ee8c95bfbe8621a72192c0a8fa Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0cbb451586b865cc448c292231a21dc468110a4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py @@ -0,0 +1,160 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.models.ssd_inception_v3_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_inception_v3_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdInceptionV3FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=True): + """Constructs a SsdInceptionV3FeatureExtractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + + Returns: + an ssd_inception_v3_feature_extractor.SsdInceptionV3FeatureExtractor. + """ + min_depth = 32 + return ssd_inception_v3_feature_extractor.SSDInceptionV3FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + num_layers=num_layers, + override_base_feature_extractor_hyperparams=True) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), + (2, 2, 2, 2048), (2, 1, 1, 512), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), + (2, 2, 2, 2048), (2, 1, 1, 512), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 35, 35, 288), (2, 17, 17, 768), + (2, 8, 8, 2048), (2, 4, 4, 512), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 35, 35, 128), (2, 17, 17, 128), + (2, 8, 8, 192), (2, 4, 4, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 37, 37, 288), (2, 18, 18, 768), + (2, 8, 8, 2048), (2, 4, 4, 512), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'InceptionV3' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768), + (2, 2, 2, 2048), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..019d7543bb7b271d6158b6b30fbb69a7db5a99a8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor.py @@ -0,0 +1,586 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSDFeatureExtractor for MobileDet features.""" + +import functools +import numpy as np +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +BACKBONE_WEIGHT_DECAY = 4e-5 + + +def _scale_filters(filters, multiplier, base=8): + """Scale the filters accordingly to (multiplier, base).""" + round_half_up = int(int(filters) * multiplier / base + 0.5) + result = int(round_half_up * base) + return max(result, base) + + +def _swish6(h): + with tf.name_scope('swish6'): + return h * tf.nn.relu6(h + np.float32(3)) * np.float32(1. / 6.) + + +def _conv(h, filters, kernel_size, strides=1, + normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6): + if activation_fn is None: + raise ValueError('Activation function cannot be None. Use tf.identity ' + 'instead to better support quantized training.') + return slim.conv2d( + h, + filters, + kernel_size, + stride=strides, + activation_fn=activation_fn, + normalizer_fn=normalizer_fn, + weights_initializer=tf.initializers.he_normal(), + weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), + padding='SAME') + + +def _separable_conv( + h, filters, kernel_size, strides=1, activation_fn=tf.nn.relu6): + """Separable convolution layer.""" + if activation_fn is None: + raise ValueError('Activation function cannot be None. Use tf.identity ' + 'instead to better support quantized training.') + # Depthwise variant of He initialization derived under the principle proposed + # in the original paper. Note the original He normalization was designed for + # full convolutions and calling tf.initializers.he_normal() can over-estimate + # the fan-in of a depthwise kernel by orders of magnitude. + stddev = (2.0 / kernel_size**2)**0.5 / .87962566103423978 + depthwise_initializer = tf.initializers.truncated_normal(stddev=stddev) + return slim.separable_conv2d( + h, + filters, + kernel_size, + stride=strides, + activation_fn=activation_fn, + normalizer_fn=slim.batch_norm, + weights_initializer=depthwise_initializer, + pointwise_initializer=tf.initializers.he_normal(), + weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY), + padding='SAME') + + +def _squeeze_and_excite(h, hidden_dim, activation_fn=tf.nn.relu6): + with tf.variable_scope(None, default_name='SqueezeExcite'): + height, width = h.shape[1], h.shape[2] + u = slim.avg_pool2d(h, [height, width], stride=1, padding='VALID') + u = _conv(u, hidden_dim, 1, + normalizer_fn=None, activation_fn=activation_fn) + u = _conv(u, h.shape[-1], 1, + normalizer_fn=None, activation_fn=tf.nn.sigmoid) + return u * h + + +def _inverted_bottleneck_no_expansion( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, strides=1, use_se=False): + """Inverted bottleneck layer without the first 1x1 expansion convolution.""" + with tf.variable_scope(None, default_name='IBNNoExpansion'): + # Setting filters to None will make _separable_conv a depthwise conv. + h = _separable_conv( + h, None, kernel_size, strides=strides, activation_fn=activation_fn) + if use_se: + hidden_dim = _scale_filters(h.shape[-1], 0.25) + h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + return h + + +def _inverted_bottleneck( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): + """Inverted bottleneck layer.""" + with tf.variable_scope(None, default_name='IBN'): + shortcut = h + expanded_filters = int(h.shape[-1]) * expansion + if expansion <= 1: + raise ValueError('Expansion factor must be greater than 1.') + h = _conv(h, expanded_filters, 1, activation_fn=activation_fn) + # Setting filters to None will make _separable_conv a depthwise conv. + h = _separable_conv(h, None, kernel_size, strides=strides, + activation_fn=activation_fn) + if use_se: + hidden_dim = _scale_filters(expanded_filters, 0.25) + h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + if residual: + h = h + shortcut + return h + + +def _fused_conv( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, expansion=8, strides=1, use_se=False, residual=True): + """Fused convolution layer.""" + with tf.variable_scope(None, default_name='FusedConv'): + shortcut = h + expanded_filters = int(h.shape[-1]) * expansion + if expansion <= 1: + raise ValueError('Expansion factor must be greater than 1.') + h = _conv(h, expanded_filters, kernel_size, strides=strides, + activation_fn=activation_fn) + if use_se: + hidden_dim = _scale_filters(expanded_filters, 0.25) + h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + if residual: + h = h + shortcut + return h + + +def _tucker_conv( + h, filters, activation_fn=tf.nn.relu6, + kernel_size=3, input_rank_ratio=0.25, output_rank_ratio=0.25, + strides=1, residual=True): + """Tucker convolution layer (generalized bottleneck).""" + with tf.variable_scope(None, default_name='TuckerConv'): + shortcut = h + input_rank = _scale_filters(h.shape[-1], input_rank_ratio) + h = _conv(h, input_rank, 1, activation_fn=activation_fn) + output_rank = _scale_filters(filters, output_rank_ratio) + h = _conv(h, output_rank, kernel_size, strides=strides, + activation_fn=activation_fn) + h = _conv(h, filters, 1, activation_fn=tf.identity) + if residual: + h = h + shortcut + return h + + +def mobiledet_cpu_backbone(h, multiplier=1.0): + """Build a MobileDet CPU backbone.""" + def _scale(filters): + return _scale_filters(filters, multiplier) + ibn = functools.partial( + _inverted_bottleneck, use_se=True, activation_fn=_swish6) + + endpoints = {} + h = _conv(h, _scale(16), 3, strides=2, activation_fn=_swish6) + h = _inverted_bottleneck_no_expansion( + h, _scale(8), use_se=True, activation_fn=_swish6) + endpoints['C1'] = h + h = ibn(h, _scale(16), expansion=4, strides=2, residual=False) + endpoints['C2'] = h + h = ibn(h, _scale(32), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(32), expansion=4) + h = ibn(h, _scale(32), expansion=4) + h = ibn(h, _scale(32), expansion=4) + endpoints['C3'] = h + h = ibn(h, _scale(72), kernel_size=5, expansion=8, strides=2, residual=False) + h = ibn(h, _scale(72), expansion=8) + h = ibn(h, _scale(72), kernel_size=5, expansion=4) + h = ibn(h, _scale(72), expansion=4) + h = ibn(h, _scale(72), expansion=8, residual=False) + h = ibn(h, _scale(72), expansion=8) + h = ibn(h, _scale(72), expansion=8) + h = ibn(h, _scale(72), expansion=8) + endpoints['C4'] = h + h = ibn(h, _scale(104), kernel_size=5, expansion=8, strides=2, residual=False) + h = ibn(h, _scale(104), kernel_size=5, expansion=4) + h = ibn(h, _scale(104), kernel_size=5, expansion=4) + h = ibn(h, _scale(104), expansion=4) + h = ibn(h, _scale(144), expansion=8, residual=False) + endpoints['C5'] = h + return endpoints + + +def mobiledet_dsp_backbone(h, multiplier=1.0): + """Build a MobileDet DSP backbone.""" + def _scale(filters): + return _scale_filters(filters, multiplier) + + ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) + fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) + tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) + + endpoints = {} + h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) + h = _inverted_bottleneck_no_expansion( + h, _scale(24), activation_fn=tf.nn.relu6) + endpoints['C1'] = h + h = fused(h, _scale(32), expansion=4, strides=2, residual=False) + h = fused(h, _scale(32), expansion=4) + h = ibn(h, _scale(32), expansion=4) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.75) + endpoints['C2'] = h + h = fused(h, _scale(64), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(64), expansion=4) + h = fused(h, _scale(64), expansion=4) + h = fused(h, _scale(64), expansion=4) + endpoints['C3'] = h + h = fused(h, _scale(120), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(120), expansion=4) + h = ibn(h, _scale(120), expansion=8) + h = ibn(h, _scale(120), expansion=8) + h = fused(h, _scale(144), expansion=8, residual=False) + h = ibn(h, _scale(144), expansion=8) + h = ibn(h, _scale(144), expansion=8) + h = ibn(h, _scale(144), expansion=8) + endpoints['C4'] = h + h = ibn(h, _scale(160), expansion=4, strides=2, residual=False) + h = ibn(h, _scale(160), expansion=4) + h = fused(h, _scale(160), expansion=4) + h = tucker(h, _scale(160), input_rank_ratio=0.75, output_rank_ratio=0.75) + h = ibn(h, _scale(240), expansion=8, residual=False) + endpoints['C5'] = h + return endpoints + + +def mobiledet_edgetpu_backbone(h, multiplier=1.0): + """Build a MobileDet EdgeTPU backbone.""" + def _scale(filters): + return _scale_filters(filters, multiplier) + + ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) + fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) + tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) + + endpoints = {} + h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) + h = tucker(h, _scale(16), + input_rank_ratio=0.25, output_rank_ratio=0.75, residual=False) + endpoints['C1'] = h + h = fused(h, _scale(16), expansion=8, strides=2, residual=False) + h = fused(h, _scale(16), expansion=4) + h = fused(h, _scale(16), expansion=8) + h = fused(h, _scale(16), expansion=4) + endpoints['C2'] = h + h = fused(h, _scale(40), expansion=8, kernel_size=5, strides=2, + residual=False) + h = fused(h, _scale(40), expansion=4) + h = fused(h, _scale(40), expansion=4) + h = fused(h, _scale(40), expansion=4) + endpoints['C3'] = h + h = ibn(h, _scale(72), expansion=8, strides=2, residual=False) + h = ibn(h, _scale(72), expansion=8) + h = fused(h, _scale(72), expansion=4) + h = fused(h, _scale(72), expansion=4) + h = ibn(h, _scale(96), expansion=8, kernel_size=5, residual=False) + h = ibn(h, _scale(96), expansion=8, kernel_size=5) + h = ibn(h, _scale(96), expansion=8) + h = ibn(h, _scale(96), expansion=8) + endpoints['C4'] = h + h = ibn(h, _scale(120), expansion=8, kernel_size=5, strides=2, residual=False) + h = ibn(h, _scale(120), expansion=8) + h = ibn(h, _scale(120), expansion=4, kernel_size=5) + h = ibn(h, _scale(120), expansion=8) + h = ibn(h, _scale(384), expansion=8, kernel_size=5, residual=False) + endpoints['C5'] = h + return endpoints + + +def mobiledet_gpu_backbone(h, multiplier=1.0): + """Build a MobileDet GPU backbone.""" + + def _scale(filters): + return _scale_filters(filters, multiplier) + + ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6) + fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6) + tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6) + + endpoints = {} + # block 0 + h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6) + + # block 1 + h = tucker( + h, + _scale(16), + input_rank_ratio=0.25, + output_rank_ratio=0.25, + residual=False) + endpoints['C1'] = h + + # block 2 + h = fused(h, _scale(32), expansion=8, strides=2, residual=False) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) + h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25) + endpoints['C2'] = h + + # block 3 + h = fused( + h, _scale(64), expansion=8, kernel_size=3, strides=2, residual=False) + h = fused(h, _scale(64), expansion=8) + h = fused(h, _scale(64), expansion=8) + h = fused(h, _scale(64), expansion=4) + endpoints['C3'] = h + + # block 4 + h = fused( + h, _scale(128), expansion=8, kernel_size=3, strides=2, residual=False) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + + # block 5 + h = fused( + h, _scale(128), expansion=8, kernel_size=3, strides=1, residual=False) + h = fused(h, _scale(128), expansion=8) + h = fused(h, _scale(128), expansion=8) + h = fused(h, _scale(128), expansion=8) + endpoints['C4'] = h + + # block 6 + h = fused( + h, _scale(128), expansion=4, kernel_size=3, strides=2, residual=False) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + h = fused(h, _scale(128), expansion=4) + + # block 7 + h = ibn(h, _scale(384), expansion=8, kernel_size=3, strides=1, residual=False) + endpoints['C5'] = h + return endpoints + + +class SSDMobileDetFeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): + """Base class of SSD feature extractor using MobileDet features.""" + + def __init__(self, + backbone_fn, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDet'): + """MobileDet Feature Extractor for SSD Models. + + Reference: + https://arxiv.org/abs/2004.14525 + + Args: + backbone_fn: function to construct the MobileDet backbone. + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: Integer, minimum feature extractor depth (number of filters). + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. + use_depthwise: Whether to use depthwise convolutions in the SSD head. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + scope_name: scope name (string) of network variables. + """ + if use_explicit_padding: + raise NotImplementedError( + 'Explicit padding is not yet supported in MobileDet backbones.') + + super(SSDMobileDetFeatureExtractorBase, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams + ) + self._backbone_fn = backbone_fn + self._scope_name = scope_name + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. The preprocessing assumes an input + value range of [0, 255]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + padded_inputs = ops.pad_to_multiple( + preprocessed_inputs, self._pad_to_multiple) + + feature_map_layout = { + 'from_layer': ['C4', 'C5', '', '', '', ''], + # Do not specify the layer depths (number of filters) for C4 and C5, as + # their values are determined based on the backbone. + 'layer_depth': [-1, -1, 512, 256, 256, 128], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + with tf.variable_scope(self._scope_name, reuse=self._reuse_weights): + with slim.arg_scope([slim.batch_norm], + is_training=self._is_training, + epsilon=0.01, decay=0.99, center=True, scale=True): + endpoints = self._backbone_fn( + padded_inputs, + multiplier=self._depth_multiplier) + + image_features = {'C4': endpoints['C4'], 'C5': endpoints['C5']} + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) + + +class SSDMobileDetCPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-CPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetCPU'): + super(SSDMobileDetCPUFeatureExtractor, self).__init__( + backbone_fn=mobiledet_cpu_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) + + +class SSDMobileDetDSPFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-DSP feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetDSP'): + super(SSDMobileDetDSPFeatureExtractor, self).__init__( + backbone_fn=mobiledet_dsp_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) + + +class SSDMobileDetEdgeTPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-EdgeTPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetEdgeTPU'): + super(SSDMobileDetEdgeTPUFeatureExtractor, self).__init__( + backbone_fn=mobiledet_edgetpu_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) + + +class SSDMobileDetGPUFeatureExtractor(SSDMobileDetFeatureExtractorBase): + """MobileDet-GPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobileDetGPU'): + super(SSDMobileDetGPUFeatureExtractor, self).__init__( + backbone_fn=mobiledet_gpu_backbone, + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e371531b225904cd9559bd88ebf9d6deb56b869 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2af37554b55f68e85ddbe7587b86015e10ac65e8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobiledet_feature_extractor_tf1_test.py @@ -0,0 +1,172 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobiledet_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobiledet_feature_extractor +from object_detection.utils import tf_version + +try: + from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDMobileDetFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + feature_extractor_cls, + is_training=False, + depth_multiplier=1.0, + pad_to_multiple=1, + use_explicit_padding=False, + use_keras=False): + """Constructs a new MobileDet feature extractor. + + Args: + feature_extractor_cls: feature extractor class. + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: If True, we will use 'VALID' padding for + convolutions, but prepad inputs so that the output dimensions are the + same as if 'SAME' padding were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDMobileDetFeatureExtractor object. + """ + min_depth = 32 + return feature_extractor_cls( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding) + + def test_mobiledet_cpu_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 72), + (2, 20, 10, 144), + (2, 10, 5, 512), + (2, 5, 3, 256), + (2, 3, 2, 256), + (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def test_mobiledet_dsp_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 144), + (2, 20, 10, 240), + (2, 10, 5, 512), + (2, 5, 3, 256), + (2, 3, 2, 256), + (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def test_mobiledet_edgetpu_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 96), + (2, 20, 10, 384), + (2, 10, 5, 512), + (2, 5, 3, 256), + (2, 3, 2, 256), + (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def test_mobiledet_gpu_returns_correct_shapes(self): + expected_feature_map_shapes = [(2, 40, 20, 128), (2, 20, 10, 384), + (2, 10, 5, 512), (2, 5, 3, 256), + (2, 3, 2, 256), (2, 2, 1, 128)] + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetGPUFeatureExtractor) + image = tf.random.normal((2, 640, 320, 3)) + feature_maps = feature_extractor.extract_features(image) + + self.assertEqual(len(expected_feature_map_shapes), len(feature_maps)) + for expected_shape, x in zip(expected_feature_map_shapes, feature_maps): + self.assertTrue(x.shape.is_compatible_with(expected_shape)) + + def _check_quantization(self, model_fn): + checkpoint_dir = self.get_temp_dir() + + with tf.Graph().as_default() as training_graph: + model_fn(is_training=True) + contrib_quantize.experimental_create_training_graph(training_graph) + with self.session(graph=training_graph) as sess: + sess.run(tf.global_variables_initializer()) + tf.train.Saver().save(sess, checkpoint_dir) + + with tf.Graph().as_default() as eval_graph: + model_fn(is_training=False) + contrib_quantize.experimental_create_eval_graph(eval_graph) + with self.session(graph=eval_graph) as sess: + tf.train.Saver().restore(sess, checkpoint_dir) + + def test_mobiledet_cpu_quantization(self): + def model_fn(is_training): + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetCPUFeatureExtractor, + is_training=is_training) + image = tf.random.normal((2, 320, 320, 3)) + feature_extractor.extract_features(image) + self._check_quantization(model_fn) + + def test_mobiledet_dsp_quantization(self): + def model_fn(is_training): + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetDSPFeatureExtractor, + is_training=is_training) + image = tf.random.normal((2, 320, 320, 3)) + feature_extractor.extract_features(image) + self._check_quantization(model_fn) + + def test_mobiledet_edgetpu_quantization(self): + def model_fn(is_training): + feature_extractor = self._create_feature_extractor( + ssd_mobiledet_feature_extractor.SSDMobileDetEdgeTPUFeatureExtractor, + is_training=is_training) + image = tf.random.normal((2, 320, 320, 3)) + feature_extractor.extract_features(image) + self._check_quantization(model_fn) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..6de4cae310e05b92032b76d63120ab4b24eadd0d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py @@ -0,0 +1,49 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSDFeatureExtractor for MobileNetEdgeTPU features.""" + +from object_detection.models import ssd_mobilenet_v3_feature_extractor +from nets.mobilenet import mobilenet_v3 + + +class SSDMobileNetEdgeTPUFeatureExtractor( + ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3FeatureExtractorBase): + """MobileNetEdgeTPU feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetEdgeTPU'): + super(SSDMobileNetEdgeTPUFeatureExtractor, self).__init__( + conv_defs=mobilenet_v3.V3_EDGETPU, + from_layer=['layer_18/expansion_output', 'layer_23'], + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name + ) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aadd7f3c9e10ff50e41846cd8192d4e454cbd83 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3290f895a4a5a0701df7d8ea110280f638f61c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py @@ -0,0 +1,112 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base test class for ssd_mobilenet_edgetpu_feature_extractor.""" + +import abc + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test + + +class _SsdMobilenetEdgeTPUFeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Base class for MobilenetEdgeTPU tests.""" + + @abc.abstractmethod + def _get_input_sizes(self): + """Return feature map sizes for the two inputs to SSD head.""" + pass + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]), + (2, 4, 4, input_feature_sizes[1]), + (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_has_fused_batchnorm(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..841fe5a148864a0d62b52fd8f6f3e0059670dd57 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py @@ -0,0 +1,65 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobilenet_edgetpu_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor +from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetEdgeTPUFeatureExtractorTest( + ssd_mobilenet_edgetpu_feature_extractor_testbase + ._SsdMobilenetEdgeTPUFeatureExtractorTestBase): + + def _get_input_sizes(self): + """Return first two input feature map sizes.""" + return [384, 192] + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_keras=False): + """Constructs a new MobileNetEdgeTPU feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return (ssd_mobilenet_edgetpu_feature_extractor + .SSDMobileNetEdgeTPUFeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..7fdcdac5f6e95a891ef2ed8b5a648dca351243bb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor.py @@ -0,0 +1,138 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV1 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """MobileNetV1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV1FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', + '', ''][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=None, regularize_depthwise=True)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4015d59be80676ae558ab808baa58c4f1a450b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2f1d4839693c891b550e04cdaff391219c4b8cf1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py @@ -0,0 +1,272 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for SSD Mobilenet V1 feature extractors. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=False, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + del use_keras + return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), + (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=False) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=False) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name, use_keras=False) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple, use_keras=False) + self.assertEqual(len(variables), 151) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue( + any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4, + use_keras=False) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b60537b886909edbc7236f799c51733b8030380a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py @@ -0,0 +1,248 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for SSD Mobilenet V1 feature extractors. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SsdMobilenetV1FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=False, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v1_keras_feature_extractor + .SSDMobileNetV1KerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + name='MobilenetV1')) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), + (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=True) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=True) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4, + use_keras=True) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..37f8eb837b53053a64fb41aaaf16be06388f1478 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py @@ -0,0 +1,202 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD MobilenetV1 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +# A modified config of mobilenet v1 that makes it more detection friendly, +def _create_modified_mobilenet_config(): + conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS) + conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512) + conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256) + return conv_defs + + +class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV1 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD FPN feature extractor based on Mobilenet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v1 layers + {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, + Conv2d_13_pointwise}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV1FpnFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=None, regularize_depthwise=True)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + conv_defs=self._conv_defs, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + + depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('fpn', reuse=self._reuse_weights): + feature_blocks = [ + 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', + 'Conv2d_13_pointwise' + ] + base_fpn_max_level = min(self._fpn_max_level, 5) + feature_block_list = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_block_list.append(feature_blocks[level - 2]) + fpn_features = feature_map_generators.fpn_top_down_feature_maps( + [(key, image_features[key]) for key in feature_block_list], + depth=depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op) + feature_maps = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + feature_blocks[base_fpn_max_level - 2])] + # Construct coarse features + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): + if self._use_depthwise: + conv_op = functools.partial( + slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + if self._use_explicit_padding: + last_feature_map = ops.fixed_padding( + last_feature_map, kernel_size) + last_feature_map = conv_op( + last_feature_map, + num_outputs=depth_fn(self._additional_layer_depth), + kernel_size=[kernel_size, kernel_size], + stride=2, + padding=padding, + scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13)) + feature_maps.append(last_feature_map) + return feature_maps diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..991aced06a8216c6b7e2189b138d64665da92451 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..449b7803d390f60747f0f4d67d8b98414a7d24eb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py @@ -0,0 +1,206 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v1_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 FPN feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV1FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True, use_explicit_padding=False, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v1_fpn_feature_extractor. + SSDMobileNetV1FpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_depthwise=True, + use_explicit_padding=use_explicit_padding)) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_384(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=False) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=False) + + def test_preprocess_returns_correct_value_range(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple, + use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name, use_keras=False) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple, use_keras=False) + self.assertEqual(len(variables), 153) + + def test_fused_batchnorm(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple, + use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + + self.assertTrue( + any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..307cfa8b0b5594f921fee670699cc026ec16fbce --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,179 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v1_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V1 FPN feature extractors in SSD. +""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_fpn_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SsdMobilenetV1FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True, use_explicit_padding=False, + use_keras=True): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + del use_keras + return (ssd_mobilenet_v1_fpn_keras_feature_extractor. + SSDMobileNetV1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + use_depthwise=True, + name='MobilenetV1_FPN')) + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_384(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, + use_keras=True) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, + use_keras=True) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple, + use_keras=True) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..7792931875dc122ea938f8c87633e31f4adc4336 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py @@ -0,0 +1,256 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD Keras-based MobilenetV1 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v1 +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +# A modified config of mobilenet v1 that makes it more detection friendly. +def _create_modified_mobilenet_config(): + conv_def_block_12 = model_utils.ConvDefs(conv_name='conv_pw_12', filters=512) + conv_def_block_13 = model_utils.ConvDefs(conv_name='conv_pw_13', filters=256) + return [conv_def_block_12, conv_def_block_13] + + +class SSDMobileNetV1FpnKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based MobilenetV1 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False, + name=None): + """SSD Keras based FPN feature extractor Mobilenet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v1 layers + {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, + Conv2d_13_pointwise}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + self._feature_blocks = [ + 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', + 'Conv2d_13_pointwise' + ] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self, input_shape): + full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + conv_defs=self._conv_defs, + include_top=False) + conv2d_3_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_3_relu').output + conv2d_5_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_5_relu').output + conv2d_11_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_11_relu').output + conv2d_13_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_13_relu').output + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v1.inputs, + outputs=[conv2d_3_pointwise, conv2d_5_pointwise, + conv2d_11_pointwise, conv2d_13_pointwise] + ) + # pylint:disable=g-long-lambda + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + stride = 2 + for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): + coarse_feature_layers = [] + if self._use_explicit_padding: + def fixed_padding(features, kernel_size=kernel_size): + return ops.fixed_padding(features, kernel_size) + coarse_feature_layers.append(tf.keras.layers.Lambda( + fixed_padding, name='fixed_padding')) + layer_name = 'bottom_up_Conv2d_{}'.format( + i - self._base_fpn_max_level + 13) + conv_block = feature_map_generators.create_conv_block( + self._use_depthwise, kernel_size, padding, stride, layer_name, + self._conv_hyperparams, self._is_training, self._freeze_batchnorm, + self._depth_fn(self._additional_layer_depth)) + coarse_feature_layers.extend(conv_block) + self._coarse_feature_layers.append(coarse_feature_layers) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append(self._feature_blocks[level - 2]) + + feature_start_index = len(self._feature_blocks) - self._num_levels + fpn_input_image_features = [ + (key, image_features[feature_start_index + index]) + for index, key in enumerate(feature_block_list)] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + self._feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + self._feature_blocks[self._base_fpn_max_level - 2])] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + return feature_maps + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map for restoring from an (object-based) checkpoint. + + Args: + feature_extractor_scope: A scope name for the feature extractor (unused). + + Returns: + A dict mapping keys to Keras models + """ + return {'feature_extractor': self.classification_backbone} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..2f0df91540ae3598cde3d08c764b023c3c7f758e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py @@ -0,0 +1,165 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for Keras MobilenetV1 features.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v1 +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class SSDMobileNetV1KerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras MobilenetV1 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + name=None): + """Keras MobileNetV1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV1KerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._feature_map_layout = { + 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', + '', ''][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + self.classification_backbone = None + self._feature_map_generator = None + + def build(self, input_shape): + full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + include_top=False) + conv2d_11_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_11_relu').output + conv2d_13_pointwise = full_mobilenet_v1.get_layer( + name='conv_pw_13_relu').output + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v1.inputs, + outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) + self._feature_map_generator = ( + feature_map_generators.KerasMultiResolutionFeatureMaps( + feature_map_layout=self._feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_maps = self._feature_map_generator({ + 'Conv2d_11_pointwise': image_features[0], + 'Conv2d_13_pointwise': image_features[1]}) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..85f6a5594d2ecdb9989afe223aa71962103da394 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py @@ -0,0 +1,84 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV1 PPN features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import mobilenet_v1 + + +class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV1 PPN features.""" + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + with tf.variable_scope('MobilenetV1', + reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v1.mobilenet_v1_arg_scope( + is_training=None, regularize_depthwise=True)): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams + else context_manager.IdentityContextManager()): + _, image_features = mobilenet_v1.mobilenet_v1_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='Conv2d_13_pointwise', + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=0, + num_layers=6, + image_features={ + 'image_features': image_features['Conv2d_11_pointwise'] + }) + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45304ec76e8259aad2ed9a31e47b1e8d8a579ca9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b5918c0dfa9a3e3819df14f9d504dd63b8febc63 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py @@ -0,0 +1,186 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v1_ppn_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v1_ppn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV1PpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + is_training=True, use_explicit_padding=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return (ssd_mobilenet_v1_ppn_feature_extractor. + SSDMobileNetV1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def test_extract_features_returns_correct_shapes_320(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_300(self): + image_height = 300 + image_width = 300 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_640(self): + image_height = 640 + image_width = 640 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 512), (2, 20, 20, 512), + (2, 10, 10, 512), (2, 5, 5, 512), + (2, 3, 3, 512), (2, 2, 2, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_with_dynamic_image_shape(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512), (2, 1, 1, 512)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512), + (2, 5, 5, 512), (2, 3, 3, 512), + (2, 2, 2, 512)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), + (2, 4, 4, 32), (2, 2, 2, 32), + (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV1' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_has_fused_batchnorm(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a37e16ab0e3fd0dc5a5e36695affd4239e1843 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor.py @@ -0,0 +1,140 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV2 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """MobileNetV2 Feature Extractor for SSD Models. + + Mobilenet v2 (experimental), designed by sandler@. More details can be found + in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV2FeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = mobilenet_v2.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='layer_19', + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f56c13ef8c650e563c3d4f6ddabc2681f6549e7c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..96f9bc26e120f2f4396968429f474406b67894ca --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py @@ -0,0 +1,196 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_feature_extractor.""" +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple) + self.assertEqual(len(variables), 292) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4cb5afcf7c978cc24e01d5806914c618cd7fd7 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py @@ -0,0 +1,192 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_feature_extractor.""" +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SsdMobilenetV2FeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + use_keras=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + use_keras: unused argument. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v2_keras_feature_extractor. + SSDMobileNetV2KerasFeatureExtractor( + is_training=False, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers, + name='MobilenetV2')) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_128_explicit_padding( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True, use_keras=True) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32), + (2, 5, 5, 32), (2, 3, 3, 32), + (2, 2, 2, 32), (2, 1, 1, 32)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=True) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, scope_name, use_keras=True) + + def test_variable_count(self): + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, pad_to_multiple, use_keras=True) + self.assertEqual(len(variables), 292) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False, num_layers=4, + use_keras=True) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..65cdcc85ab6b3e034279868342379864f8d3b5ef --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py @@ -0,0 +1,199 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD MobilenetV2 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +# A modified config of mobilenet v2 that makes it more detection friendly. +def _create_modified_mobilenet_config(): + conv_defs = copy.deepcopy(mobilenet_v2.V2_DEF) + conv_defs['spec'][-1] = mobilenet.op( + slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=256) + return conv_defs + + +class SSDMobileNetV2FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD FPN feature extractor based on Mobilenet v2 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v2 layers + {layer_4, layer_7, layer_14, layer_19}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDMobileNetV2FpnFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = mobilenet_v2.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='layer_19', + depth_multiplier=self._depth_multiplier, + conv_defs=self._conv_defs, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope('fpn', reuse=self._reuse_weights): + feature_blocks = [ + 'layer_4', 'layer_7', 'layer_14', 'layer_19' + ] + base_fpn_max_level = min(self._fpn_max_level, 5) + feature_block_list = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_block_list.append(feature_blocks[level - 2]) + fpn_features = feature_map_generators.fpn_top_down_feature_maps( + [(key, image_features[key]) for key in feature_block_list], + depth=depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op) + feature_maps = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + feature_blocks[base_fpn_max_level - 2])] + # Construct coarse features + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): + if self._use_depthwise: + conv_op = functools.partial( + slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + if self._use_explicit_padding: + last_feature_map = ops.fixed_padding( + last_feature_map, kernel_size) + last_feature_map = conv_op( + last_feature_map, + num_outputs=depth_fn(self._additional_layer_depth), + kernel_size=[kernel_size, kernel_size], + stride=2, + padding=padding, + scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 19)) + feature_maps.append(last_feature_map) + return feature_maps diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..196cf27acad4a15ad722bd0475b34ca7a7dad6e9 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9cdbed5fbe160baefb0afd41477748b9374e191f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py @@ -0,0 +1,372 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V2 FPN feature extractors in SSD. +""" +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_fpn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +@parameterized.parameters( + { + 'use_depthwise': False + }, + { + 'use_depthwise': True + }, +) +class SsdMobilenetV2FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + is_training=True, + use_explicit_padding=False, + use_keras=False, + use_depthwise=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + use_depthwise: Whether to use depthwise convolutions. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v2_fpn_feature_extractor + .SSDMobileNetV2FpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_depthwise=use_depthwise, + use_explicit_padding=use_explicit_padding)) + + def test_extract_features_returns_correct_shapes_256(self, use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_384(self, use_depthwise): + use_keras = False + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_with_dynamic_image_shape(self, + use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self, use_depthwise): + use_keras = False + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self, use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_raises_error_with_invalid_image_size( + self, use_depthwise): + use_keras = False + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_preprocess_returns_correct_value_range(self, + use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_variables_only_created_in_scope(self, use_depthwise): + use_keras = False + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = 'MobilenetV2' + self.check_feature_extractor_variables_under_scope( + depth_multiplier, + pad_to_multiple, + scope_name, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_fused_batchnorm(self, use_depthwise): + use_keras = False + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue( + any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) + + def test_variable_count(self, use_depthwise): + use_keras = False + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + expected_variables_len = 274 + if use_depthwise: + expected_variables_len = 278 + self.assertEqual(len(variables), expected_variables_len) + + def test_get_expected_feature_map_variable_names(self, + use_depthwise): + use_keras = False + depth_multiplier = 1.0 + pad_to_multiple = 1 + + slim_expected_feature_maps_variables = set([ + # Slim Mobilenet V2 feature maps + 'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights', + 'MobilenetV2/Conv_1/weights', + # FPN layers + 'MobilenetV2/fpn/bottom_up_Conv2d_20/weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_21/weights', + 'MobilenetV2/fpn/smoothing_1/weights', + 'MobilenetV2/fpn/smoothing_2/weights', + 'MobilenetV2/fpn/projection_1/weights', + 'MobilenetV2/fpn/projection_2/weights', + 'MobilenetV2/fpn/projection_3/weights', + ]) + slim_expected_feature_maps_variables_with_depthwise = set([ + # Slim Mobilenet V2 feature maps + 'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights', + 'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights', + 'MobilenetV2/Conv_1/weights', + # FPN layers + 'MobilenetV2/fpn/bottom_up_Conv2d_20/pointwise_weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_20/depthwise_weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_21/pointwise_weights', + 'MobilenetV2/fpn/bottom_up_Conv2d_21/depthwise_weights', + 'MobilenetV2/fpn/smoothing_1/depthwise_weights', + 'MobilenetV2/fpn/smoothing_1/pointwise_weights', + 'MobilenetV2/fpn/smoothing_2/depthwise_weights', + 'MobilenetV2/fpn/smoothing_2/pointwise_weights', + 'MobilenetV2/fpn/projection_1/weights', + 'MobilenetV2/fpn/projection_2/weights', + 'MobilenetV2/fpn/projection_3/weights', + ]) + + g = tf.Graph() + with g.as_default(): + preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + _ = feature_extractor.extract_features(preprocessed_inputs) + expected_feature_maps_variables = slim_expected_feature_maps_variables + if use_depthwise: + expected_feature_maps_variables = ( + slim_expected_feature_maps_variables_with_depthwise) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + variable_intersection = expected_feature_maps_variables.intersection( + actual_variable_set) + self.assertSetEqual(expected_feature_maps_variables, + variable_intersection) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..44522ac94494430cb109e084689cc6a1a1dbeddb --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,269 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_mobilenet_v2_fpn_feature_extractor. + +By using parameterized test decorator, this test serves for both Slim-based and +Keras-based Mobilenet V2 FPN feature extractors in SSD. +""" +import unittest +from absl.testing import parameterized +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_fpn_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +@parameterized.parameters( + { + 'use_depthwise': False, + }, + { + 'use_depthwise': True, + }, +) +class SsdMobilenetV2FpnFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + is_training=True, + use_explicit_padding=False, + use_keras=False, + use_depthwise=False): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + is_training: whether the network is in training mode. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + use_depthwise: Whether to use depthwise convolutions. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + del use_keras + min_depth = 32 + return (ssd_mobilenet_v2_fpn_keras_feature_extractor + .SSDMobileNetV2FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + name='MobilenetV2_FPN')) + + def test_extract_features_returns_correct_shapes_256(self, + use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_384(self, + use_depthwise): + use_keras = True + image_height = 320 + image_width = 320 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_with_dynamic_image_shape(self, + use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self, use_depthwise): + use_keras = True + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256), + (2, 10, 10, 256), (2, 5, 5, 256), + (2, 3, 3, 256)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth( + self, use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32), + (2, 8, 8, 32), (2, 4, 4, 32), + (2, 2, 2, 32)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=False, + use_keras=use_keras, + use_depthwise=use_depthwise) + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_explicit_padding=True, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_extract_features_raises_error_with_invalid_image_size( + self, use_depthwise=False): + use_keras = True + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + + def test_preprocess_returns_correct_value_range(self, + use_depthwise): + use_keras = True + image_height = 256 + image_width = 256 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, + pad_to_multiple, + use_keras=use_keras, + use_depthwise=use_depthwise) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0834ea6b9db2d853b06392b48b594a7c9a5f301b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py @@ -0,0 +1,243 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD Keras-based MobilenetV2 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops +from object_detection.utils import shape_utils + +# Total number of blocks in Mobilenet_V2 base network. +NUM_LAYERS = 19 + + +# A modified config of mobilenet v2 that makes it more detection friendly. +def _create_modified_mobilenet_config(): + last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256) + return [last_conv] + + +class SSDMobileNetV2FpnKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based MobilenetV2 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False, + name=None): + """SSD Keras based FPN feature extractor Mobilenet v2 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v2 layers + {layer_4, layer_7, layer_14, layer_19}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._conv_defs = None + if self._use_depthwise: + self._conv_defs = _create_modified_mobilenet_config() + self._use_native_resize_op = use_native_resize_op + self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19'] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self, input_shape): + full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + include_top=False) + layer_names = [layer.name for layer in full_mobilenet_v2.layers] + outputs = [] + for layer_idx in [4, 7, 14]: + add_name = 'block_{}_add'.format(layer_idx - 2) + project_name = 'block_{}_project_BN'.format(layer_idx - 2) + output_layer_name = add_name if add_name in layer_names else project_name + outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output) + layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output + outputs.append(layer_19) + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v2.inputs, + outputs=outputs) + # pylint:disable=g-long-lambda + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + use_depthwise=self._use_depthwise, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + padding = 'VALID' if self._use_explicit_padding else 'SAME' + kernel_size = 3 + stride = 2 + for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): + coarse_feature_layers = [] + if self._use_explicit_padding: + def fixed_padding(features, kernel_size=kernel_size): + return ops.fixed_padding(features, kernel_size) + coarse_feature_layers.append(tf.keras.layers.Lambda( + fixed_padding, name='fixed_padding')) + layer_name = 'bottom_up_Conv2d_{}'.format( + i - self._base_fpn_max_level + NUM_LAYERS) + conv_block = feature_map_generators.create_conv_block( + self._use_depthwise, kernel_size, padding, stride, layer_name, + self._conv_hyperparams, self._is_training, self._freeze_batchnorm, + self._depth_fn(self._additional_layer_depth)) + coarse_feature_layers.extend(conv_block) + self._coarse_feature_layers.append(coarse_feature_layers) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append(self._feature_blocks[level - 2]) + + feature_start_index = len(self._feature_blocks) - self._num_levels + fpn_input_image_features = [ + (key, image_features[feature_start_index + index]) + for index, key in enumerate(feature_block_list)] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_{}'.format( + self._feature_blocks[level - 2])]) + last_feature_map = fpn_features['top_down_{}'.format( + self._feature_blocks[self._base_fpn_max_level - 2])] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + return feature_maps diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0f79fc271d55edbc0e61384948bd816fa6f9cd3b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py @@ -0,0 +1,168 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for MobilenetV2 features.""" + +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import mobilenet_v2 +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class SSDMobileNetV2KerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + name=None): + """MobileNetV2 Feature Extractor for SSD Models. + + Mobilenet v2 (experimental), designed by sandler@. More details can be found + in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor (Functions + as a width multiplier for the mobilenet_v2 network itself). + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + name: A string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDMobileNetV2KerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + self._feature_map_layout = { + 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', '' + ][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + self.classification_backbone = None + self.feature_map_generator = None + + def build(self, input_shape): + full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + weights=None, + use_explicit_padding=self._use_explicit_padding, + alpha=self._depth_multiplier, + min_depth=self._min_depth, + include_top=False) + conv2d_11_pointwise = full_mobilenet_v2.get_layer( + name='block_13_expand_relu').output + conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output + self.classification_backbone = tf.keras.Model( + inputs=full_mobilenet_v2.inputs, + outputs=[conv2d_11_pointwise, conv2d_13_pointwise]) + self.feature_map_generator = ( + feature_map_generators.KerasMultiResolutionFeatureMaps( + feature_map_layout=self._feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_maps = self.feature_map_generator({ + 'layer_15/expansion_output': image_features[0], + 'layer_19': image_features[1]}) + + return list(feature_maps.values()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..be1d55a0390a218b21ba809c952d6f8ee58f995c --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py @@ -0,0 +1,412 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD MobilenetV2 NAS-FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import functools +from six.moves import range +import tensorflow.compat.v1 as tf + +import tf_slim as slim +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v2 + + +Block = collections.namedtuple( + 'Block', ['inputs', 'output_level', 'kernel_size', 'expansion_size']) + +_MNASFPN_CELL_CONFIG = [ + Block(inputs=(1, 2), output_level=4, kernel_size=3, expansion_size=256), + Block(inputs=(0, 4), output_level=3, kernel_size=3, expansion_size=128), + Block(inputs=(5, 4), output_level=4, kernel_size=3, expansion_size=128), + Block(inputs=(4, 3), output_level=5, kernel_size=5, expansion_size=128), + Block(inputs=(4, 3), output_level=6, kernel_size=3, expansion_size=96), +] + +MNASFPN_DEF = dict( + feature_levels=[3, 4, 5, 6], + spec=[_MNASFPN_CELL_CONFIG] * 4, +) + + +def _maybe_pad(feature, use_explicit_padding, kernel_size=3): + return ops.fixed_padding(feature, + kernel_size) if use_explicit_padding else feature + + +# Wrapper around mobilenet.depth_multiplier +def _apply_multiplier(d, multiplier, min_depth): + p = {'num_outputs': d} + mobilenet.depth_multiplier( + p, multiplier=multiplier, divisible_by=8, min_depth=min_depth) + return p['num_outputs'] + + +def _apply_size_dependent_ordering(input_feature, feature_level, block_level, + expansion_size, use_explicit_padding, + use_native_resize_op): + """Applies Size-Dependent-Ordering when resizing feature maps. + + See https://arxiv.org/abs/1912.01106 + + Args: + input_feature: input feature map to be resized. + feature_level: the level of the input feature. + block_level: the desired output level for the block. + expansion_size: the expansion size for the block. + use_explicit_padding: Whether to use explicit padding. + use_native_resize_op: Whether to use native resize op. + + Returns: + A transformed feature at the desired resolution and expansion size. + """ + padding = 'VALID' if use_explicit_padding else 'SAME' + if feature_level >= block_level: # Perform 1x1 then upsampling. + node = slim.conv2d( + input_feature, + expansion_size, [1, 1], + activation_fn=None, + normalizer_fn=slim.batch_norm, + padding=padding, + scope='Conv1x1') + if feature_level == block_level: + return node + scale = 2**(feature_level - block_level) + if use_native_resize_op: + input_shape = shape_utils.combined_static_and_dynamic_shape(node) + node = tf.image.resize_nearest_neighbor( + node, [input_shape[1] * scale, input_shape[2] * scale]) + else: + node = ops.nearest_neighbor_upsampling(node, scale=scale) + else: # Perform downsampling then 1x1. + stride = 2**(block_level - feature_level) + node = slim.max_pool2d( + _maybe_pad(input_feature, use_explicit_padding), [3, 3], + stride=[stride, stride], + padding=padding, + scope='Downsample') + node = slim.conv2d( + node, + expansion_size, [1, 1], + activation_fn=None, + normalizer_fn=slim.batch_norm, + padding=padding, + scope='Conv1x1') + return node + + +def _mnasfpn_cell(feature_maps, + feature_levels, + cell_spec, + output_channel=48, + use_explicit_padding=False, + use_native_resize_op=False, + multiplier_func=None): + """Create a MnasFPN cell. + + Args: + feature_maps: input feature maps. + feature_levels: levels of the feature maps. + cell_spec: A list of Block configs. + output_channel: Number of features for the input, output and intermediate + feature maps. + use_explicit_padding: Whether to use explicit padding. + use_native_resize_op: Whether to use native resize op. + multiplier_func: Depth-multiplier function. If None, use identity function. + + Returns: + A transformed list of feature maps at the same resolutions as the inputs. + """ + # This is the level where multipliers are realized. + if multiplier_func is None: + multiplier_func = lambda x: x + num_outputs = len(feature_maps) + cell_features = list(feature_maps) + cell_levels = list(feature_levels) + padding = 'VALID' if use_explicit_padding else 'SAME' + for bi, block in enumerate(cell_spec): + with tf.variable_scope('block_{}'.format(bi)): + block_level = block.output_level + intermediate_feature = None + for i, inp in enumerate(block.inputs): + with tf.variable_scope('input_{}'.format(i)): + input_level = cell_levels[inp] + node = _apply_size_dependent_ordering( + cell_features[inp], input_level, block_level, + multiplier_func(block.expansion_size), use_explicit_padding, + use_native_resize_op) + # Add features incrementally to avoid producing AddN, which doesn't + # play well with TfLite. + if intermediate_feature is None: + intermediate_feature = node + else: + intermediate_feature += node + node = tf.nn.relu6(intermediate_feature) + node = slim.separable_conv2d( + _maybe_pad(node, use_explicit_padding, block.kernel_size), + multiplier_func(output_channel), + block.kernel_size, + activation_fn=None, + normalizer_fn=slim.batch_norm, + padding=padding, + scope='SepConv') + cell_features.append(node) + cell_levels.append(block_level) + + # Cell-wide residuals. + out_idx = range(len(cell_features) - num_outputs, len(cell_features)) + for in_i, out_i in enumerate(out_idx): + if cell_features[out_i].shape.as_list( + ) == cell_features[in_i].shape.as_list(): + cell_features[out_i] += cell_features[in_i] + + return cell_features[-num_outputs:] + + +def mnasfpn(feature_maps, + head_def, + output_channel=48, + use_explicit_padding=False, + use_native_resize_op=False, + multiplier_func=None): + """Create the MnasFPN head given head_def.""" + features = feature_maps + for ci, cell_spec in enumerate(head_def['spec']): + with tf.variable_scope('cell_{}'.format(ci)): + features = _mnasfpn_cell(features, head_def['feature_levels'], cell_spec, + output_channel, use_explicit_padding, + use_native_resize_op, multiplier_func) + return features + + +def training_scope(l2_weight_decay=1e-4, is_training=None): + """Arg scope for training MnasFPN.""" + with slim.arg_scope( + [slim.conv2d], + weights_initializer=tf.initializers.he_normal(), + weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ + slim.arg_scope( + [slim.separable_conv2d], + weights_initializer=tf.initializers.truncated_normal( + stddev=0.536), # He_normal for 3x3 depthwise kernel. + weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \ + slim.arg_scope([slim.batch_norm], + is_training=is_training, + epsilon=0.01, + decay=0.99, + center=True, + scale=True) as s: + return s + + +class SSDMobileNetV2MnasFPNFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using MobilenetV2 MnasFPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=6, + additional_layer_depth=48, + head_def=None, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False, + data_format='channels_last'): + """SSD MnasFPN feature extractor based on Mobilenet v2 architecture. + + See https://arxiv.org/abs/1912.01106 + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + fpn_min_level: the highest resolution feature map to use in MnasFPN. + Currently the only valid value is 3. + fpn_max_level: the smallest resolution feature map to construct or use in + MnasFPN. Currentl the only valid value is 6. + additional_layer_depth: additional feature map layer channel depth for + NAS-FPN. + head_def: A dictionary specifying the MnasFPN head architecture. Default + uses MNASFPN_DEF. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + use_native_resize_op: Whether to use native resize op. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + data_format: The ordering of the dimensions in the inputs, The valid + values are {'channels_first', 'channels_last'). + """ + super(SSDMobileNetV2MnasFPNFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) + if fpn_min_level != 3 or fpn_max_level != 6: + raise ValueError('Min and max levels of MnasFPN must be 3 and 6 for now.') + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._fpn_layer_depth = additional_layer_depth + self._head_def = head_def if head_def else MNASFPN_DEF + self._data_format = data_format + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def _verify_config(self, inputs): + """Verify that MnasFPN config and its inputs.""" + num_inputs = len(inputs) + assert len(self._head_def['feature_levels']) == num_inputs + + base_width = inputs[0].shape.as_list( + )[1] * 2**self._head_def['feature_levels'][0] + for i in range(1, num_inputs): + width = inputs[i].shape.as_list()[1] + level = self._head_def['feature_levels'][i] + expected_width = base_width // 2**level + if width != expected_width: + raise ValueError( + 'Resolution of input {} does not match its level {}.'.format( + i, level)) + + for cell_spec in self._head_def['spec']: + # The last K nodes in a cell are the inputs to the next cell. Assert that + # their feature maps are at the right level. + for i in range(num_inputs): + if cell_spec[-num_inputs + + i].output_level != self._head_def['feature_levels'][i]: + raise ValueError( + 'Mismatch between node level {} and desired output level {}.' + .format(cell_spec[-num_inputs + i].output_level, + self._head_def['feature_levels'][i])) + # Assert that each block only uses precending blocks. + for bi, block_spec in enumerate(cell_spec): + for inp in block_spec.inputs: + if inp >= bi + num_inputs: + raise ValueError( + 'Block {} is trying to access uncreated block {}.'.format( + bi, inp)) + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v2.training_scope(is_training=None, bn_decay=0.99)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with slim.arg_scope( + training_scope(l2_weight_decay=4e-5, + is_training=self._is_training)): + + _, image_features = mobilenet_v2.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + final_endpoint='layer_18', + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + + multiplier_func = functools.partial( + _apply_multiplier, + multiplier=self._depth_multiplier, + min_depth=self._min_depth) + with tf.variable_scope('MnasFPN', reuse=self._reuse_weights): + with slim.arg_scope( + training_scope(l2_weight_decay=1e-4, is_training=self._is_training)): + # Create C6 by downsampling C5. + c6 = slim.max_pool2d( + _maybe_pad(image_features['layer_18'], self._use_explicit_padding), + [3, 3], + stride=[2, 2], + padding='VALID' if self._use_explicit_padding else 'SAME', + scope='C6_downsample') + c6 = slim.conv2d( + c6, + multiplier_func(self._fpn_layer_depth), + [1, 1], + activation_fn=tf.identity, + normalizer_fn=slim.batch_norm, + weights_regularizer=None, # this 1x1 has no kernel regularizer. + padding='VALID', + scope='C6_Conv1x1') + image_features['C6'] = tf.identity(c6) # Needed for quantization. + for k in sorted(image_features.keys()): + tf.logging.error('{}: {}'.format(k, image_features[k])) + + mnasfpn_inputs = [ + image_features['layer_7'], # C3 + image_features['layer_14'], # C4 + image_features['layer_18'], # C5 + image_features['C6'] # C6 + ] + self._verify_config(mnasfpn_inputs) + feature_maps = mnasfpn( + mnasfpn_inputs, + head_def=self._head_def, + output_channel=self._fpn_layer_depth, + use_explicit_padding=self._use_explicit_padding, + use_native_resize_op=self._use_native_resize_op, + multiplier_func=multiplier_func) + return feature_maps diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6937c1a985f9bfaf61635b7995b4a62f7ed137c3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..032433128de057c97a422c97e96d16bd2942f62b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py @@ -0,0 +1,87 @@ +# Lint as: python2, python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobilenet_v2_nas_fpn_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_mobilenet_v2_mnasfpn_feature_extractor as mnasfpn_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV2MnasFPNFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False): + min_depth = 16 + is_training = True + fpn_num_filters = 48 + return mnasfpn_feature_extractor.SSDMobileNetV2MnasFPNFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + additional_layer_depth=fpn_num_filters, + use_explicit_padding=use_explicit_padding) + + def test_extract_features_returns_correct_shapes_320_256(self): + image_height = 320 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 40, 32, 48), (2, 20, 16, 48), + (2, 10, 8, 48), (2, 5, 4, 48)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5**12 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 16), (2, 16, 16, 16), + (2, 8, 8, 16), (2, 4, 4, 16)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=False) + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_explicit_padding=True) + + def test_preprocess_returns_correct_value_range(self): + image_height = 320 + image_width = 320 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..cc85fdccb793ee8f4e1d3197a957f450fa108f75 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor.py @@ -0,0 +1,218 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSDFeatureExtractor for MobileNetV3 features.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets.mobilenet import mobilenet +from nets.mobilenet import mobilenet_v3 + + +class SSDMobileNetV3FeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor): + """Base class of SSD feature extractor using MobilenetV3 features.""" + + def __init__(self, + conv_defs, + from_layer, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetV3'): + """MobileNetV3 Feature Extractor for SSD Models. + + MobileNet v3. Details found in: + https://arxiv.org/abs/1905.02244 + + Args: + conv_defs: MobileNetV3 conv defs for backbone. + from_layer: A cell of two layer names (string) to connect to the 1st and + 2nd inputs of the SSD head. + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the base + feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + scope_name: scope name (string) of network variables. + """ + super(SSDMobileNetV3FeatureExtractorBase, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams + ) + self._conv_defs = conv_defs + self._from_layer = from_layer + self._scope_name = scope_name + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + Raises: + ValueError if conv_defs is not provided or from_layer does not meet the + size requirement. + """ + + if not self._conv_defs: + raise ValueError('Must provide backbone conv defs.') + + if len(self._from_layer) != 2: + raise ValueError('SSD input feature names are not provided.') + + preprocessed_inputs = shape_utils.check_min_image_dim( + 33, preprocessed_inputs) + + feature_map_layout = { + 'from_layer': [ + self._from_layer[0], self._from_layer[1], '', '', '', '' + ], + 'layer_depth': [-1, -1, 512, 256, 256, 128], + 'use_depthwise': self._use_depthwise, + 'use_explicit_padding': self._use_explicit_padding, + } + + with tf.variable_scope( + self._scope_name, reuse=self._reuse_weights) as scope: + with slim.arg_scope( + mobilenet_v3.training_scope(is_training=None, bn_decay=0.9997)), \ + slim.arg_scope( + [mobilenet.depth_multiplier], min_depth=self._min_depth): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = mobilenet_v3.mobilenet_base( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + conv_defs=self._conv_defs, + final_endpoint=self._from_layer[1], + depth_multiplier=self._depth_multiplier, + use_explicit_padding=self._use_explicit_padding, + scope=scope) + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) + + +class SSDMobileNetV3LargeFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): + """Mobilenet V3-Large feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetV3'): + super(SSDMobileNetV3LargeFeatureExtractor, self).__init__( + conv_defs=mobilenet_v3.V3_LARGE_DETECTION, + from_layer=['layer_14/expansion_output', 'layer_17'], + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name + ) + + +class SSDMobileNetV3SmallFeatureExtractor(SSDMobileNetV3FeatureExtractorBase): + """Mobilenet V3-Small feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False, + scope_name='MobilenetV3'): + super(SSDMobileNetV3SmallFeatureExtractor, self).__init__( + conv_defs=mobilenet_v3.V3_SMALL_DETECTION, + from_layer=['layer_10/expansion_output', 'layer_13'], + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams, + scope_name=scope_name + ) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee5ed2dc5f76c5f936b04e06e03d9b741dcf144e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ba60f2efe588f5afb9f7d9a3951dd1ce4d77c5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py @@ -0,0 +1,112 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base test class for ssd_mobilenet_v3_feature_extractor.""" + +import abc + +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test + + +class _SsdMobilenetV3FeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Base class for MobilenetV3 tests.""" + + @abc.abstractmethod + def _get_input_sizes(self): + """Return feature map sizes for the two inputs to SSD head.""" + pass + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]), + (2, 4, 4, input_feature_sizes[1]), + (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, + image_height, + image_width, + depth_multiplier, + pad_to_multiple, + expected_feature_map_shape, + use_keras=False) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 32 + input_feature_sizes = self._get_input_sizes() + expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]), + (2, 10, 10, input_feature_sizes[1]), + (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, + 256), + (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(4, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_has_fused_batchnorm(self): + image_height = 40 + image_width = 40 + depth_multiplier = 1 + pad_to_multiple = 1 + image_placeholder = tf.placeholder(tf.float32, + [1, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=False) + preprocessed_image = feature_extractor.preprocess(image_placeholder) + _ = feature_extractor.extract_features(preprocessed_image) + self.assertTrue(any('FusedBatchNorm' in op.type + for op in tf.get_default_graph().get_operations())) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..43c02490a7358820404380d20aa1d2190fce01a1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_mobilenet_v3_feature_extractor_tf1_test.py @@ -0,0 +1,105 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd_mobilenet_v3_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_mobilenet_v3_feature_extractor +from object_detection.models import ssd_mobilenet_v3_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV3LargeFeatureExtractorTest( + ssd_mobilenet_v3_feature_extractor_testbase + ._SsdMobilenetV3FeatureExtractorTestBase): + + def _get_input_sizes(self): + """Return first two input feature map sizes.""" + return [672, 480] + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_keras=False): + """Constructs a new Mobilenet V3-Large feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ( + ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3LargeFeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdMobilenetV3SmallFeatureExtractorTest( + ssd_mobilenet_v3_feature_extractor_testbase + ._SsdMobilenetV3FeatureExtractorTestBase): + + def _get_input_sizes(self): + """Return first two input feature map sizes.""" + return [288, 288] + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + use_keras=False): + """Constructs a new Mobilenet V3-Small feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_keras: if True builds a keras-based feature extractor, if False builds + a slim-based one. + + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ( + ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3SmallFeatureExtractor( + False, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..48f1dee3b4f6aceffd87b995bebb06a88b25c4ca --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor.py @@ -0,0 +1,182 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSDFeatureExtractor for PNASNet features. + +Based on PNASNet ImageNet model: https://arxiv.org/abs/1712.00559 +""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import variables_helper +try: + from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top +except: # pylint: disable=bare-except + pass + + +def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): + """Defines the default arg scope for the PNASNet Large for object detection. + + This provides a small edit to switch batch norm training on and off. + + Args: + is_batch_norm_training: Boolean indicating whether to train with batch norm. + Default is False. + + Returns: + An `arg_scope` to use for the PNASNet Large Model. + """ + imagenet_scope = pnasnet.pnasnet_large_arg_scope() + with slim.arg_scope(imagenet_scope): + with slim.arg_scope([slim.batch_norm], + is_training=is_batch_norm_training) as sc: + return sc + + +class SSDPNASNetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD Feature Extractor using PNASNet features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + num_layers=6, + override_base_feature_extractor_hyperparams=False): + """PNASNet Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + use_depthwise: Whether to use depthwise convolutions. + num_layers: Number of SSD layers. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDPNASNetFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + num_layers=num_layers, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + Maps pixel values to the range [-1, 1]. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + return (2.0 / 255.0) * resized_inputs - 1.0 + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + + feature_map_layout = { + 'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''][:self._num_layers], + 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], + 'use_explicit_padding': self._use_explicit_padding, + 'use_depthwise': self._use_depthwise, + } + + with slim.arg_scope( + pnasnet_large_arg_scope_for_detection( + is_batch_norm_training=self._is_training)): + with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], + reuse=self._reuse_weights): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = pnasnet.build_pnasnet_large( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), + num_classes=None, + is_training=self._is_training, + final_endpoint='Cell_11') + with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights): + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.multi_resolution_feature_maps( + feature_map_layout=feature_map_layout, + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + insert_1x1_conv=True, + image_features=image_features) + + return list(feature_maps.values()) + + def restore_from_classification_checkpoint_fn(self, feature_extractor_scope): + """Returns a map of variables to load from a foreign checkpoint. + + Note that this overrides the default implementation in + ssd_meta_arch.SSDFeatureExtractor which does not work for PNASNet + checkpoints. + + Args: + feature_extractor_scope: A scope name for the first stage feature + extractor. + + Returns: + A dict mapping variable names (to load from a checkpoint) to variables in + the model graph. + """ + variables_to_restore = {} + for variable in variables_helper.get_global_variables_safely(): + if variable.op.name.startswith(feature_extractor_scope): + var_name = variable.op.name.replace(feature_extractor_scope + '/', '') + var_name += '/ExponentialMovingAverage' + variables_to_restore[var_name] = variable + return variables_to_restore diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b4bb3356ae6eceab38d161e3c904691181f30f3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f5bff92d9f7da6fbf8243dd3dc1dff0bc9e628 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py @@ -0,0 +1,108 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for ssd_pnas_feature_extractor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.models import ssd_pnasnet_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SsdPnasNetFeatureExtractorTest( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + num_layers=6, + is_training=True): + """Constructs a new feature extractor. + + Args: + depth_multiplier: float depth multiplier for feature extractor + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + num_layers: number of SSD layers. + is_training: whether the network is in training mode. + Returns: + an ssd_meta_arch.SSDFeatureExtractor object. + """ + min_depth = 32 + return ssd_pnasnet_feature_extractor.SSDPNASNetFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding, + num_layers=num_layers) + + def test_extract_features_returns_correct_shapes_128(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320), + (2, 2, 2, 512), (2, 1, 1, 256), + (2, 1, 1, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_299(self): + image_height = 299 + image_width = 299 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 2160), (2, 10, 10, 4320), + (2, 5, 5, 512), (2, 3, 3, 256), + (2, 2, 2, 256), (2, 1, 1, 128)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = np.random.rand(2, image_height, image_width, 3) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) + + def test_extract_features_with_fewer_layers(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320), + (2, 2, 2, 512), (2, 1, 1, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, num_layers=4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1827a1a10e3c99562a33d6b64d29c50a09d3bd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py @@ -0,0 +1,391 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1. + +See https://arxiv.org/abs/1708.02002 for details. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import resnet_v1 + + +class SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD FPN feature extractor based on Resnet v1 architecture.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_base_fn, + resnet_scope_name, + fpn_scope_name, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + resnet_base_fn: base resnet network to use. + resnet_scope_name: scope name under which to construct resnet + fpn_scope_name: scope name under which to construct the feature pyramid + network. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + + Raises: + ValueError: On supplying invalid arguments for unused arguments. + """ + super(SSDResnetV1FpnFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams_fn=conv_hyperparams_fn, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + if self._use_explicit_padding is True: + raise ValueError('Explicit padding is not a valid option.') + self._resnet_base_fn = resnet_base_fn + self._resnet_scope_name = resnet_scope_name + self._fpn_scope_name = fpn_scope_name + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._use_native_resize_op = use_native_resize_op + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def _filter_features(self, image_features): + # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead + # of munging the scope here. + filtered_image_features = dict({}) + for key, feature in image_features.items(): + feature_name = key.split('/')[-1] + if feature_name in ['block1', 'block2', 'block3', 'block4']: + filtered_image_features[feature_name] = feature + return filtered_image_features + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + with tf.variable_scope( + self._resnet_scope_name, reuse=self._reuse_weights) as scope: + with slim.arg_scope(resnet_v1.resnet_arg_scope()): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + _, image_features = self._resnet_base_fn( + inputs=ops.pad_to_multiple(preprocessed_inputs, + self._pad_to_multiple), + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + store_non_strided_activations=True, + min_base_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + scope=scope) + image_features = self._filter_features(image_features) + depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) + with slim.arg_scope(self._conv_hyperparams_fn()): + with tf.variable_scope(self._fpn_scope_name, + reuse=self._reuse_weights): + base_fpn_max_level = min(self._fpn_max_level, 5) + feature_block_list = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + fpn_features = feature_map_generators.fpn_top_down_feature_maps( + [(key, image_features[key]) for key in feature_block_list], + depth=depth_fn(self._additional_layer_depth), + use_native_resize_op=self._use_native_resize_op) + feature_maps = [] + for level in range(self._fpn_min_level, base_fpn_max_level + 1): + feature_maps.append( + fpn_features['top_down_block{}'.format(level - 1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + base_fpn_max_level - 1)] + # Construct coarse features + for i in range(base_fpn_max_level, self._fpn_max_level): + last_feature_map = slim.conv2d( + last_feature_map, + num_outputs=depth_fn(self._additional_layer_depth), + kernel_size=[3, 3], + stride=2, + padding='SAME', + scope='bottom_up_block{}'.format(i)) + feature_maps.append(last_feature_map) + return feature_maps + + +class SSDResnet50V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): + """SSD Resnet50 V1 FPN feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet50V1FpnFeatureExtractor, self).__init__( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_v1.resnet_v1_50, + 'resnet_v1_50', + 'fpn', + fpn_min_level, + fpn_max_level, + additional_layer_depth, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + use_native_resize_op=use_native_resize_op, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + +class SSDResnet101V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): + """SSD Resnet101 V1 FPN feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet101V1FpnFeatureExtractor, self).__init__( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_v1.resnet_v1_101, + 'resnet_v1_101', + 'fpn', + fpn_min_level, + fpn_max_level, + additional_layer_depth, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + use_native_resize_op=use_native_resize_op, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) + + +class SSDResnet152V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor): + """SSD Resnet152 V1 FPN feature extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + use_native_resize_op=False, + override_base_feature_extractor_hyperparams=False): + """SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. UNUSED currently. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize + to do upsampling in FPN. Default is false. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet152V1FpnFeatureExtractor, self).__init__( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_v1.resnet_v1_152, + 'resnet_v1_152', + 'fpn', + fpn_min_level, + fpn_max_level, + additional_layer_depth, + reuse_weights=reuse_weights, + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + use_native_resize_op=use_native_resize_op, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..029f0c84024a2d1ab8b253f40488aceb2336ca89 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..1ccad530ed5f34da2bd903c23b1d974f86a9d933 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py @@ -0,0 +1,193 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 FPN feature extractors.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import numpy as np +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test +from object_detection.utils import test_utils + + +class SSDResnetFPNFeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Helper test class for SSD Resnet v1 FPN feature extractors.""" + + @abc.abstractmethod + def _resnet_scope_name(self): + pass + + @abc.abstractmethod + def _fpn_scope_name(self): + return 'fpn' + + @abc.abstractmethod + def _create_feature_extractor(self, + depth_multiplier, + pad_to_multiple, + use_explicit_padding=False, + min_depth=32, + use_keras=False): + pass + + def test_extract_features_returns_correct_shapes_256(self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_returns_correct_shapes_with_depth_multiplier( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 0.5 + expected_num_channels = int(256 * depth_multiplier) + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 32, 32, expected_num_channels), + (2, 16, 16, expected_num_channels), + (2, 8, 8, expected_num_channels), + (2, 4, 4, expected_num_channels), + (2, 2, 2, expected_num_channels)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_returns_correct_shapes_with_min_depth( + self): + image_height = 256 + image_width = 256 + depth_multiplier = 1.0 + pad_to_multiple = 1 + min_depth = 320 + expected_feature_map_shape = [(2, 32, 32, min_depth), + (2, 16, 16, min_depth), + (2, 8, 8, min_depth), + (2, 4, 4, min_depth), + (2, 2, 2, min_depth)] + + with test_utils.GraphContextOrNone() as g: + image_tensor = tf.random.uniform([2, image_height, image_width, 3]) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, min_depth=min_depth, + use_keras=self.is_tf2()) + + def graph_fn(): + if self.is_tf2(): + return feature_extractor(image_tensor) + return feature_extractor.extract_features(image_tensor) + + feature_maps = self.execute(graph_fn, [], graph=g) + for feature_map, expected_shape in zip(feature_maps, + expected_feature_map_shape): + self.assertAllEqual(feature_map.shape, expected_shape) + + def test_extract_features_returns_correct_shapes_with_pad_to_multiple( + self): + image_height = 254 + image_width = 254 + depth_multiplier = 1.0 + pad_to_multiple = 32 + expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256), + (2, 8, 8, 256), (2, 4, 4, 256), + (2, 2, 2, 256)] + + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape, use_keras=self.is_tf2()) + + def test_extract_features_raises_error_with_invalid_image_size( + self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple, + use_keras=self.is_tf2()) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image_np = np.random.rand(4, image_height, image_width, 3) + with test_utils.GraphContextOrNone() as g: + test_image = tf.constant(test_image_np) + feature_extractor = self._create_feature_extractor( + depth_multiplier, pad_to_multiple, use_keras=self.is_tf2()) + + def graph_fn(): + preprocessed_image = feature_extractor.preprocess(test_image) + return preprocessed_image + + preprocessed_image_out = self.execute(graph_fn, [], graph=g) + self.assertAllClose(preprocessed_image_out, + test_image_np - [[123.68, 116.779, 103.939]]) + + def test_variables_only_created_in_scope(self): + if self.is_tf2(): + self.skipTest('test_variables_only_created_in_scope is only tf1') + depth_multiplier = 1 + pad_to_multiple = 1 + scope_name = self._resnet_scope_name() + self.check_feature_extractor_variables_under_scope( + depth_multiplier, + pad_to_multiple, + scope_name, + use_keras=self.is_tf2()) + + def test_variable_count(self): + if self.is_tf2(): + self.skipTest('test_variable_count is only tf1') + depth_multiplier = 1 + pad_to_multiple = 1 + variables = self.get_feature_extractor_variables( + depth_multiplier, + pad_to_multiple, + use_keras=self.is_tf2()) + # The number of expected variables in resnet_v1_50, resnet_v1_101, + # and resnet_v1_152 is 279, 534, and 789 respectively. + expected_variables_len = 279 + scope_name = self._resnet_scope_name() + if scope_name in ('ResNet101V1_FPN', 'resnet_v1_101'): + expected_variables_len = 534 + elif scope_name in ('ResNet152V1_FPN', 'resnet_v1_152'): + expected_variables_len = 789 + self.assertEqual(len(variables), expected_variables_len) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..58952ff9486d6be3f077c9e21788ce8409806d18 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py @@ -0,0 +1,85 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 FPN feature extractors.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_resnet_v1_fpn_feature_extractor +from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet50V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet50v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return ( + ssd_resnet_v1_fpn_feature_extractor.SSDResnet50V1FpnFeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _resnet_scope_name(self): + return 'resnet_v1_50' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet101V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet101v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return ( + ssd_resnet_v1_fpn_feature_extractor.SSDResnet101V1FpnFeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _resnet_scope_name(self): + return 'resnet_v1_101' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet152V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet152v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return ( + ssd_resnet_v1_fpn_feature_extractor.SSDResnet152V1FpnFeatureExtractor( + is_training, depth_multiplier, min_depth, pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _resnet_scope_name(self): + return 'resnet_v1_152' + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..27c54ddd08ffa866dad4975c9bed7c629e8c46ac --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py @@ -0,0 +1,103 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 FPN feature extractors.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase +from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDResnet50V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet50v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=True): + is_training = True + return (ssd_resnet_v1_fpn_keras_feature_extractor. + SSDResNet50V1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + name='ResNet50V1_FPN')) + + def _resnet_scope_name(self): + return 'ResNet50V1_FPN' + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDResnet101V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet101v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return (ssd_resnet_v1_fpn_keras_feature_extractor. + SSDResNet101V1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + name='ResNet101V1_FPN')) + + def _resnet_scope_name(self): + return 'ResNet101V1_FPN' + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class SSDResnet152V1FeatureExtractorTest( + ssd_resnet_v1_fpn_feature_extractor_testbase. + SSDResnetFPNFeatureExtractorTestBase): + """SSDResnet152v1Fpn feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False, min_depth=32, + use_keras=False): + is_training = True + return (ssd_resnet_v1_fpn_keras_feature_extractor. + SSDResNet152V1FpnKerasFeatureExtractor( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=self._build_conv_hyperparams( + add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + name='ResNet152V1_FPN')) + + def _resnet_scope_name(self): + return 'ResNet152V1_FPN' + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac929cc6349a21b541f20adb624ad157d4f4a63 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py @@ -0,0 +1,457 @@ +# Lint as: python2, python3 +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""SSD Keras-based ResnetV1 FPN Feature Extractor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import resnet_v1 +from object_detection.utils import ops +from object_detection.utils import shape_utils + +_RESNET_MODEL_OUTPUT_LAYERS = { + 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block23_out', 'conv5_block3_out'], + 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', + 'conv4_block36_out', 'conv5_block3_out'], +} + + +class SSDResNetV1FpnKerasFeatureExtractor( + ssd_meta_arch.SSDKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + resnet_v1_base_model, + resnet_v1_base_model_name, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name=None): + """SSD Keras based FPN feature extractor Resnet v1 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4} + respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNetV1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + if self._use_explicit_padding: + raise ValueError('Explicit padding is not a valid option.') + if self._use_depthwise: + raise ValueError('Depthwise is not a valid option.') + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._additional_layer_depth = additional_layer_depth + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name + self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self, input_shape): + full_resnet_v1_model = self._resnet_v1_base_model( + batchnorm_training=(self._is_training and not self._freeze_batchnorm), + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + depth_multiplier=self._depth_multiplier, + min_depth=self._min_depth, + classes=None, + weights=None, + include_top=False) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] + outputs = [full_resnet_v1_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + self.classification_backbone = tf.keras.Model( + inputs=full_resnet_v1_model.inputs, + outputs=outputs) + # pylint:disable=g-long-lambda + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + depth = self._depth_fn(self._additional_layer_depth) + for i in range(self._base_fpn_max_level, self._fpn_max_level): + layers = [] + layer_name = 'bottom_up_block{}'.format(i) + layers.append( + tf.keras.layers.Conv2D( + depth, + [3, 3], + padding='SAME', + strides=2, + name=layer_name + '_conv', + **self._conv_hyperparams.params())) + layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + self._conv_hyperparams.build_activation_layer( + name=layer_name)) + self._coarse_feature_layers.append(layers) + self.built = True + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def _extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + """ + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + image_features = self.classification_backbone( + ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple)) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + feature_block_map = dict( + list(zip(self._resnet_block_names, image_features))) + fpn_input_image_features = [ + (feature_block, feature_block_map[feature_block]) + for feature_block in feature_block_list] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + return feature_maps + + +class SSDResNet50V1FpnKerasFeatureExtractor( + SSDResNetV1FpnKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1-50 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name='ResNet50V1_FPN'): + """SSD Keras based FPN feature extractor ResnetV1-50 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNet50V1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + resnet_v1_base_model=resnet_v1.resnet_v1_50, + resnet_v1_base_model_name='resnet_v1_50', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDResNet101V1FpnKerasFeatureExtractor( + SSDResNetV1FpnKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1-101 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=None, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name='ResNet101V1_FPN'): + """SSD Keras based FPN feature extractor ResnetV1-101 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNet101V1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + resnet_v1_base_model=resnet_v1.resnet_v1_101, + resnet_v1_base_model_name='resnet_v1_101', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) + + +class SSDResNet152V1FpnKerasFeatureExtractor( + SSDResNetV1FpnKerasFeatureExtractor): + """SSD Feature Extractor using Keras-based ResnetV1-152 FPN features.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams, + freeze_batchnorm, + inplace_batchnorm_update, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=None, + override_base_feature_extractor_hyperparams=False, + name='ResNet152V1_FPN'): + """SSD Keras based FPN feature extractor ResnetV1-152 architecture. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + freeze_batchnorm: whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + fpn_min_level: the minimum level in feature pyramid networks. + fpn_max_level: the maximum level in feature pyramid networks. + additional_layer_depth: additional feature map layer channel depth. + reuse_weights: whether to reuse variables. Default is None. + use_explicit_padding: whether to use explicit padding when extracting + features. Default is None, as it's an invalid option and not implemented + in this feature extractor. + use_depthwise: Whether to use depthwise convolutions. UNUSED currently. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + name: a string name scope to assign to the model. If 'None', Keras + will auto-generate one from the class name. + """ + super(SSDResNet152V1FpnKerasFeatureExtractor, self).__init__( + is_training=is_training, + depth_multiplier=depth_multiplier, + min_depth=min_depth, + pad_to_multiple=pad_to_multiple, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + resnet_v1_base_model=resnet_v1.resnet_v1_152, + resnet_v1_base_model_name='resnet_v1_152', + use_explicit_padding=use_explicit_padding, + use_depthwise=use_depthwise, + override_base_feature_extractor_hyperparams= + override_base_feature_extractor_hyperparams, + name=name) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb34e261e83122a2b59bdca2ed96363f67fd632 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py @@ -0,0 +1,284 @@ +# Lint as: python2, python3 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SSD feature extractors based on Resnet v1 and PPN architectures.""" + +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.meta_architectures import ssd_meta_arch +from object_detection.models import feature_map_generators +from object_detection.utils import context_manager +from object_detection.utils import ops +from object_detection.utils import shape_utils +from nets import resnet_v1 + + +class _SSDResnetPpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): + """SSD feature extractor based on resnet architecture and PPN.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + resnet_base_fn, + resnet_scope_name, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + base_feature_map_depth=1024, + num_layers=6, + override_base_feature_extractor_hyperparams=False, + use_bounded_activations=False): + """Resnet based PPN Feature Extractor for SSD Models. + + See go/pooling-pyramid for more details about PPN. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + resnet_base_fn: base resnet network to use. + resnet_scope_name: scope name to construct resnet + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + base_feature_map_depth: Depth of the base feature before the max pooling. + num_layers: Number of layers used to make predictions. They are pooled + from the base feature. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + use_bounded_activations: Whether or not to use bounded activations for + resnet v1 bottleneck residual unit. Bounded activations better lend + themselves to quantized inference. + """ + super(_SSDResnetPpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams) + self._resnet_base_fn = resnet_base_fn + self._resnet_scope_name = resnet_scope_name + self._base_feature_map_depth = base_feature_map_depth + self._num_layers = num_layers + self._use_bounded_activations = use_bounded_activations + + def _filter_features(self, image_features): + # TODO(rathodv): Change resnet endpoint to strip scope prefixes instead + # of munging the scope here. + filtered_image_features = dict({}) + for key, feature in image_features.items(): + feature_name = key.split('/')[-1] + if feature_name in ['block2', 'block3', 'block4']: + filtered_image_features[feature_name] = feature + return filtered_image_features + + def preprocess(self, resized_inputs): + """SSD preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge. + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs + + def extract_features(self, preprocessed_inputs): + """Extract features from preprocessed inputs. + + Args: + preprocessed_inputs: a [batch, height, width, channels] float tensor + representing a batch of images. + + Returns: + feature_maps: a list of tensors where the ith tensor has shape + [batch, height_i, width_i, depth_i] + + Raises: + ValueError: depth multiplier is not supported. + """ + if self._depth_multiplier != 1.0: + raise ValueError('Depth multiplier not supported.') + + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + with tf.variable_scope( + self._resnet_scope_name, reuse=self._reuse_weights) as scope: + with slim.arg_scope(resnet_v1.resnet_arg_scope()): + with (slim.arg_scope(self._conv_hyperparams_fn()) + if self._override_base_feature_extractor_hyperparams else + context_manager.IdentityContextManager()): + with slim.arg_scope( + [resnet_v1.bottleneck], + use_bounded_activations=self._use_bounded_activations): + _, activations = self._resnet_base_fn( + inputs=ops.pad_to_multiple(preprocessed_inputs, + self._pad_to_multiple), + num_classes=None, + is_training=None, + global_pool=False, + output_stride=None, + store_non_strided_activations=True, + scope=scope) + + with slim.arg_scope(self._conv_hyperparams_fn()): + feature_maps = feature_map_generators.pooling_pyramid_feature_maps( + base_feature_map_depth=self._base_feature_map_depth, + num_layers=self._num_layers, + image_features={ + 'image_features': self._filter_features(activations)['block3'] + }) + return list(feature_maps.values()) + + +class SSDResnet50V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): + """PPN Resnet50 v1 Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """Resnet50 v1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet50V1PpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50', + reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) + + +class SSDResnet101V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): + """PPN Resnet101 v1 Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """Resnet101 v1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet101V1PpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101', + reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) + + +class SSDResnet152V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor): + """PPN Resnet152 v1 Feature Extractor.""" + + def __init__(self, + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + conv_hyperparams_fn, + reuse_weights=None, + use_explicit_padding=False, + use_depthwise=False, + override_base_feature_extractor_hyperparams=False): + """Resnet152 v1 Feature Extractor for SSD Models. + + Args: + is_training: whether the network is in training mode. + depth_multiplier: float depth multiplier for feature extractor. + min_depth: minimum feature extractor depth. + pad_to_multiple: the nearest multiple to zero pad the input height and + width dimensions to. + conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d + and separable_conv2d ops in the layers that are added on top of the + base feature extractor. + reuse_weights: Whether to reuse variables. Default is None. + use_explicit_padding: Whether to use explicit padding when extracting + features. Default is False. + use_depthwise: Whether to use depthwise convolutions. Default is False. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams_fn`. + """ + super(SSDResnet152V1PpnFeatureExtractor, self).__init__( + is_training, depth_multiplier, min_depth, pad_to_multiple, + conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152', + reuse_weights, use_explicit_padding, use_depthwise, + override_base_feature_extractor_hyperparams=( + override_base_feature_extractor_hyperparams)) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ced0f805cd794372db757cb2bf75131715684c6b Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py new file mode 100644 index 0000000000000000000000000000000000000000..ba80c6627a0711d54ffdf63a58e35ba6431c9f62 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py @@ -0,0 +1,82 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 feature extractors.""" +import abc +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_feature_extractor_test + + +class SSDResnetPpnFeatureExtractorTestBase( + ssd_feature_extractor_test.SsdFeatureExtractorTestBase): + """Helper test class for SSD Resnet PPN feature extractors.""" + + @abc.abstractmethod + def _scope_name(self): + pass + + def test_extract_features_returns_correct_shapes_289(self): + image_height = 289 + image_width = 289 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024), + (2, 5, 5, 1024), (2, 3, 3, 1024), + (2, 2, 2, 1024), (2, 1, 1, 1024)] + self.check_extract_features_returns_correct_shape( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self): + image_height = 289 + image_width = 289 + depth_multiplier = 1.0 + pad_to_multiple = 1 + expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024), + (2, 5, 5, 1024), (2, 3, 3, 1024), + (2, 2, 2, 1024), (2, 1, 1, 1024)] + self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( + 2, image_height, image_width, depth_multiplier, pad_to_multiple, + expected_feature_map_shape) + + def test_extract_features_raises_error_with_invalid_image_size(self): + image_height = 32 + image_width = 32 + depth_multiplier = 1.0 + pad_to_multiple = 1 + self.check_extract_features_raises_error_with_invalid_image_size( + image_height, image_width, depth_multiplier, pad_to_multiple) + + def test_preprocess_returns_correct_value_range(self): + image_height = 128 + image_width = 128 + depth_multiplier = 1 + pad_to_multiple = 1 + test_image = tf.constant(np.random.rand(4, image_height, image_width, 3)) + feature_extractor = self._create_feature_extractor(depth_multiplier, + pad_to_multiple) + preprocessed_image = feature_extractor.preprocess(test_image) + with self.test_session() as sess: + test_image_out, preprocessed_image_out = sess.run( + [test_image, preprocessed_image]) + self.assertAllClose(preprocessed_image_out, + test_image_out - [[123.68, 116.779, 103.939]]) + + def test_variables_only_created_in_scope(self): + depth_multiplier = 1 + pad_to_multiple = 1 + self.check_feature_extractor_variables_under_scope( + depth_multiplier, pad_to_multiple, self._scope_name()) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..bb95cb53f3905ef9288ade7600005c1ba9372be5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py @@ -0,0 +1,93 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for ssd resnet v1 feature extractors.""" +import unittest +import tensorflow.compat.v1 as tf + +from object_detection.models import ssd_resnet_v1_ppn_feature_extractor +from object_detection.models import ssd_resnet_v1_ppn_feature_extractor_testbase +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet50V1PpnFeatureExtractorTest( + ssd_resnet_v1_ppn_feature_extractor_testbase. + SSDResnetPpnFeatureExtractorTestBase): + """SSDResnet50v1 feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False): + min_depth = 32 + is_training = True + return ssd_resnet_v1_ppn_feature_extractor.SSDResnet50V1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding) + + def _scope_name(self): + return 'resnet_v1_50' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet101V1PpnFeatureExtractorTest( + ssd_resnet_v1_ppn_feature_extractor_testbase. + SSDResnetPpnFeatureExtractorTestBase): + """SSDResnet101v1 feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False): + min_depth = 32 + is_training = True + return ( + ssd_resnet_v1_ppn_feature_extractor.SSDResnet101V1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _scope_name(self): + return 'resnet_v1_101' + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class SSDResnet152V1PpnFeatureExtractorTest( + ssd_resnet_v1_ppn_feature_extractor_testbase. + SSDResnetPpnFeatureExtractorTestBase): + """SSDResnet152v1 feature extractor test.""" + + def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, + use_explicit_padding=False): + min_depth = 32 + is_training = True + return ( + ssd_resnet_v1_ppn_feature_extractor.SSDResnet152V1PpnFeatureExtractor( + is_training, + depth_multiplier, + min_depth, + pad_to_multiple, + self.conv_hyperparams_fn, + use_explicit_padding=use_explicit_padding)) + + def _scope_name(self): + return 'resnet_v1_152' + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/packages/tf1/setup.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/packages/tf1/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..a40a368a6f5fddbccfc13b4d76f38a49d3c1c8d3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/packages/tf1/setup.py @@ -0,0 +1,27 @@ +"""Setup script for object_detection with TF1.0.""" +import os +from setuptools import find_packages +from setuptools import setup + +REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython', + 'contextlib2', 'tf-slim', 'six', 'pycocotools', 'lvis', + 'scipy', 'pandas'] + +setup( + name='object_detection', + version='0.1', + install_requires=REQUIRED_PACKAGES, + include_package_data=True, + packages=( + [p for p in find_packages() if p.startswith('object_detection')] + + find_packages(where=os.path.join('.', 'slim'))), + package_dir={ + 'datasets': os.path.join('slim', 'datasets'), + 'nets': os.path.join('slim', 'nets'), + 'preprocessing': os.path.join('slim', 'preprocessing'), + 'deployment': os.path.join('slim', 'deployment'), + 'scripts': os.path.join('slim', 'scripts'), + }, + description='Tensorflow Object Detection Library with TF1.0', + python_requires='>3.6', +) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/packages/tf2/setup.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/packages/tf2/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..3f9f0e35363cde03bee00641f3fb53ccc85c55ad --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/packages/tf2/setup.py @@ -0,0 +1,44 @@ +"""Setup script for object_detection with TF2.0.""" +import os +from setuptools import find_packages +from setuptools import setup + +# Note: adding apache-beam to required packages causes conflict with +# tf-models-offical requirements. These packages request for incompatible +# oauth2client package. +REQUIRED_PACKAGES = [ + # Required for apache-beam with PY3 + 'avro-python3', + 'apache-beam', + 'pillow', + 'lxml', + 'matplotlib', + 'Cython', + 'contextlib2', + 'tf-slim', + 'six', + 'pycocotools', + 'lvis', + 'scipy', + 'pandas', + 'tf-models-official' +] + +setup( + name='object_detection', + version='0.1', + install_requires=REQUIRED_PACKAGES, + include_package_data=True, + packages=( + [p for p in find_packages() if p.startswith('object_detection')] + + find_packages(where=os.path.join('.', 'slim'))), + package_dir={ + 'datasets': os.path.join('slim', 'datasets'), + 'nets': os.path.join('slim', 'nets'), + 'preprocessing': os.path.join('slim', 'preprocessing'), + 'deployment': os.path.join('slim', 'deployment'), + 'scripts': os.path.join('slim', 'scripts'), + }, + description='Tensorflow Object Detection Library', + python_requires='>3.6', +) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c54453f30b7080fa56906a7f7ac945f90a3d44a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5073ec2cf95cfb4465bc48d2bd0383076efa54e6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/convolutional_box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/convolutional_box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13a5bf6807f0a202b9c91c40a118b18f46897fdd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/convolutional_box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/convolutional_keras_box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/convolutional_keras_box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252de3cdb9de11f2240bb284e750eb7bc6711766 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/convolutional_keras_box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/mask_rcnn_box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/mask_rcnn_box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3ef0ac4dbf88a35334d560ef2333ad60badcad6 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/mask_rcnn_box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/mask_rcnn_keras_box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/mask_rcnn_keras_box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8137b9ec0d9aa73036c672a384a62de7a99d4c3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/mask_rcnn_keras_box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/rfcn_box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/rfcn_box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff29ba5efe582b5be6453883c220779269c923fa Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/rfcn_box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/rfcn_keras_box_predictor.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/rfcn_keras_box_predictor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06ee5b831ff6ed9900ab52a720a7458fa80278d5 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/__pycache__/rfcn_keras_box_predictor.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..44b47533091db0a8967f7009cd6095074fe2f202 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor.py @@ -0,0 +1,421 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Convolutional Box Predictors with and without weight sharing.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import functools +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.core import box_predictor +from object_detection.utils import shape_utils +from object_detection.utils import static_shape + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class _NoopVariableScope(object): + """A dummy class that does not push any scope.""" + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +class ConvolutionalBoxPredictor(box_predictor.BoxPredictor): + """Convolutional Box Predictor. + + Optionally add an intermediate 1x1 convolutional layer after features and + predict in parallel branches box_encodings and + class_predictions_with_background. + + Currently this box predictor assumes that predictions are "shared" across + classes --- that is each anchor makes box predictions which do not depend + on class. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + other_heads, + conv_hyperparams_fn, + num_layers_before_predictor, + min_depth, + max_depth): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes. + class_prediction_head: The head that predicts the classes. + other_heads: A dictionary mapping head names to convolutional + head classes. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._other_heads = other_heads + self._conv_hyperparams_fn = conv_hyperparams_fn + self._min_depth = min_depth + self._max_depth = max_depth + self._num_layers_before_predictor = num_layers_before_predictor + + @property + def num_classes(self): + return self._num_classes + + def _predict(self, image_features, num_predictions_per_location_list): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. + + Returns: + A dictionary containing: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + (optional) Predictions from other heads. + """ + predictions = { + BOX_ENCODINGS: [], + CLASS_PREDICTIONS_WITH_BACKGROUND: [], + } + for head_name in self._other_heads.keys(): + predictions[head_name] = [] + # TODO(rathodv): Come up with a better way to generate scope names + # in box predictor once we have time to retrain all models in the zoo. + # The following lines create scope names to be backwards compatible with the + # existing checkpoints. + box_predictor_scopes = [_NoopVariableScope()] + if len(image_features) > 1: + box_predictor_scopes = [ + tf.variable_scope('BoxPredictor_{}'.format(i)) + for i in range(len(image_features)) + ] + for (image_feature, + num_predictions_per_location, box_predictor_scope) in zip( + image_features, num_predictions_per_location_list, + box_predictor_scopes): + net = image_feature + with box_predictor_scope: + with slim.arg_scope(self._conv_hyperparams_fn()): + with slim.arg_scope([slim.dropout], is_training=self._is_training): + # Add additional conv layers before the class predictor. + features_depth = static_shape.get_depth(image_feature.get_shape()) + depth = max(min(features_depth, self._max_depth), self._min_depth) + tf.logging.info('depth of additional conv before box predictor: {}'. + format(depth)) + if depth > 0 and self._num_layers_before_predictor > 0: + for i in range(self._num_layers_before_predictor): + net = slim.conv2d( + net, + depth, [1, 1], + reuse=tf.AUTO_REUSE, + scope='Conv2d_%d_1x1_%d' % (i, depth)) + sorted_keys = sorted(self._other_heads.keys()) + sorted_keys.append(BOX_ENCODINGS) + sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) + for head_name in sorted_keys: + if head_name == BOX_ENCODINGS: + head_obj = self._box_prediction_head + elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + head_obj = self._class_prediction_head + else: + head_obj = self._other_heads[head_name] + prediction = head_obj.predict( + features=net, + num_predictions_per_location=num_predictions_per_location) + predictions[head_name].append(prediction) + return predictions + + +# TODO(rathodv): Replace with slim.arg_scope_func_key once its available +# externally. +def _arg_scope_func_key(op): + """Returns a key that can be used to index arg_scope dictionary.""" + return getattr(op, '_key_op', str(op)) + + +# TODO(rathodv): Merge the implementation with ConvolutionalBoxPredictor above +# since they are very similar. +class WeightSharedConvolutionalBoxPredictor(box_predictor.BoxPredictor): + """Convolutional Box Predictor with weight sharing. + + Defines the box predictor as defined in + https://arxiv.org/abs/1708.02002. This class differs from + ConvolutionalBoxPredictor in that it shares weights and biases while + predicting from different feature maps. However, batch_norm parameters are not + shared because the statistics of the activations vary among the different + feature maps. + + Also note that separate multi-layer towers are constructed for the box + encoding and class predictors respectively. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + other_heads, + conv_hyperparams_fn, + depth, + num_layers_before_predictor, + kernel_size=3, + apply_batch_norm=False, + share_prediction_tower=False, + use_depthwise=False): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes. + class_prediction_head: The head that predicts the classes. + other_heads: A dictionary mapping head names to convolutional + head classes. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + kernel_size: Size of final convolution kernel. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + share_prediction_tower: Whether to share the multi-layer tower among box + prediction head, class prediction head and other heads. + use_depthwise: Whether to use depthwise separable conv2d instead of + regular conv2d. + """ + super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training, + num_classes) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._other_heads = other_heads + self._conv_hyperparams_fn = conv_hyperparams_fn + self._depth = depth + self._num_layers_before_predictor = num_layers_before_predictor + self._kernel_size = kernel_size + self._apply_batch_norm = apply_batch_norm + self._share_prediction_tower = share_prediction_tower + self._use_depthwise = use_depthwise + + @property + def num_classes(self): + return self._num_classes + + def _insert_additional_projection_layer(self, image_feature, + inserted_layer_counter, + target_channel): + if inserted_layer_counter < 0: + return image_feature, inserted_layer_counter + image_feature = slim.conv2d( + image_feature, + target_channel, [1, 1], + stride=1, + padding='SAME', + activation_fn=None, + normalizer_fn=(tf.identity if self._apply_batch_norm else None), + scope='ProjectionLayer/conv2d_{}'.format( + inserted_layer_counter)) + if self._apply_batch_norm: + image_feature = slim.batch_norm( + image_feature, + scope='ProjectionLayer/conv2d_{}/BatchNorm'.format( + inserted_layer_counter)) + inserted_layer_counter += 1 + return image_feature, inserted_layer_counter + + def _compute_base_tower(self, tower_name_scope, image_feature, feature_index): + net = image_feature + for i in range(self._num_layers_before_predictor): + if self._use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + net = conv_op( + net, + self._depth, [self._kernel_size, self._kernel_size], + stride=1, + padding='SAME', + activation_fn=None, + normalizer_fn=(tf.identity if self._apply_batch_norm else None), + scope='{}/conv2d_{}'.format(tower_name_scope, i)) + if self._apply_batch_norm: + net = slim.batch_norm( + net, + scope='{}/conv2d_{}/BatchNorm/feature_{}'. + format(tower_name_scope, i, feature_index)) + net = tf.nn.relu6(net) + return net + + def _predict_head(self, head_name, head_obj, image_feature, box_tower_feature, + feature_index, num_predictions_per_location): + if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + tower_name_scope = 'ClassPredictionTower' + else: + tower_name_scope = head_name + 'PredictionTower' + if self._share_prediction_tower: + head_tower_feature = box_tower_feature + else: + head_tower_feature = self._compute_base_tower( + tower_name_scope=tower_name_scope, + image_feature=image_feature, + feature_index=feature_index) + return head_obj.predict( + features=head_tower_feature, + num_predictions_per_location=num_predictions_per_location) + + def _predict(self, image_features, num_predictions_per_location_list): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels] containing features for a batch of images. Note that + when not all tensors in the list have the same number of channels, an + additional projection layer will be added on top the tensor to generate + feature map with number of channels consitent with the majority. + num_predictions_per_location_list: A list of integers representing the + number of box predictions to be made per spatial location for each + feature map. Note that all values must be the same since the weights are + shared. + + Returns: + A dictionary containing: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, code_size] representing the location of + the objects. Each entry in the list corresponds to a feature map in + the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + (optional) Predictions from other heads. + E.g., mask_predictions: A list of float tensors of shape + [batch_size, num_anchord_i, num_classes, mask_height, mask_width]. + + + Raises: + ValueError: If the num predictions per locations differs between the + feature maps. + """ + if len(set(num_predictions_per_location_list)) > 1: + raise ValueError('num predictions per location must be same for all' + 'feature maps, found: {}'.format( + num_predictions_per_location_list)) + feature_channels = [ + shape_utils.get_dim_as_int(image_feature.shape[3]) + for image_feature in image_features + ] + has_different_feature_channels = len(set(feature_channels)) > 1 + if has_different_feature_channels: + inserted_layer_counter = 0 + target_channel = max(set(feature_channels), key=feature_channels.count) + tf.logging.info('Not all feature maps have the same number of ' + 'channels, found: {}, appending additional projection ' + 'layers to bring all feature maps to uniformly have {} ' + 'channels.'.format(feature_channels, target_channel)) + else: + # Place holder variables if has_different_feature_channels is False. + target_channel = -1 + inserted_layer_counter = -1 + predictions = { + BOX_ENCODINGS: [], + CLASS_PREDICTIONS_WITH_BACKGROUND: [], + } + for head_name in self._other_heads.keys(): + predictions[head_name] = [] + for feature_index, (image_feature, + num_predictions_per_location) in enumerate( + zip(image_features, + num_predictions_per_location_list)): + with tf.variable_scope('WeightSharedConvolutionalBoxPredictor', + reuse=tf.AUTO_REUSE): + with slim.arg_scope(self._conv_hyperparams_fn()): + # TODO(wangjiang) Pass is_training to the head class directly. + with slim.arg_scope([slim.dropout], is_training=self._is_training): + (image_feature, + inserted_layer_counter) = self._insert_additional_projection_layer( + image_feature, inserted_layer_counter, target_channel) + if self._share_prediction_tower: + box_tower_scope = 'PredictionTower' + else: + box_tower_scope = 'BoxPredictionTower' + box_tower_feature = self._compute_base_tower( + tower_name_scope=box_tower_scope, + image_feature=image_feature, + feature_index=feature_index) + box_encodings = self._box_prediction_head.predict( + features=box_tower_feature, + num_predictions_per_location=num_predictions_per_location) + predictions[BOX_ENCODINGS].append(box_encodings) + sorted_keys = sorted(self._other_heads.keys()) + sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) + for head_name in sorted_keys: + if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + head_obj = self._class_prediction_head + else: + head_obj = self._other_heads[head_name] + prediction = self._predict_head( + head_name=head_name, + head_obj=head_obj, + image_feature=image_feature, + box_tower_feature=box_tower_feature, + feature_index=feature_index, + num_predictions_per_location=num_predictions_per_location) + predictions[head_name].append(prediction) + return predictions + + diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b1373c699906e17533f98c2c8f988111a4b1e87 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3236615dfb60bc848ec271fc5173b9c4169feb93 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_box_predictor_tf1_test.py @@ -0,0 +1,932 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.convolutional_box_predictor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import unittest +from absl.testing import parameterized +import numpy as np +from six.moves import range +from six.moves import zip +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import convolutional_box_predictor as box_predictor +from object_detection.predictors.heads import box_head +from object_detection.predictors.heads import class_head +from object_detection.predictors.heads import mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def test_get_boxes_for_five_aspect_ratios_per_location(self): + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_get_boxes_for_one_aspect_ratio_per_location(self): + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[1], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + num_classes_without_background = 6 + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], + num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_predictions_with_feature_maps_of_dynamic_shape( + self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + init_op = tf.global_variables_initializer() + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + with self.test_session() as sess: + sess.run(init_op) + (box_encodings_shape, + objectness_predictions_shape) = sess.run( + [tf.shape(box_encodings), tf.shape(objectness_predictions)], + feed_dict={image_features: + np.random.rand(4, resolution, resolution, 64)}) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions_shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/Conv2d_0_1x1_32/biases', + 'BoxPredictor/Conv2d_0_1x1_32/weights', + 'BoxPredictor/BoxEncodingPredictor/biases', + 'BoxPredictor/BoxEncodingPredictor/weights', + 'BoxPredictor/ClassPredictor/biases', + 'BoxPredictor/ClassPredictor/weights']) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_use_depthwise_convolution(self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + dropout_keep_prob=0.8, + kernel_size=3, + box_code_size=4, + use_dropout=True, + use_depthwise=True)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + init_op = tf.global_variables_initializer() + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + with self.test_session() as sess: + sess.run(init_op) + (box_encodings_shape, + objectness_predictions_shape) = sess.run( + [tf.shape(box_encodings), tf.shape(objectness_predictions)], + feed_dict={image_features: + np.random.rand(4, resolution, resolution, 64)}) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions_shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/Conv2d_0_1x1_32/biases', + 'BoxPredictor/Conv2d_0_1x1_32/weights', + 'BoxPredictor/BoxEncodingPredictor_depthwise/biases', + 'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights', + 'BoxPredictor/BoxEncodingPredictor/biases', + 'BoxPredictor/BoxEncodingPredictor/weights', + 'BoxPredictor/ClassPredictor_depthwise/biases', + 'BoxPredictor/ClassPredictor_depthwise/depthwise_weights', + 'BoxPredictor/ClassPredictor/biases', + 'BoxPredictor/ClassPredictor/weights']) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_no_dangling_outputs(self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + dropout_keep_prob=0.8, + kernel_size=3, + box_code_size=4, + use_dropout=True, + use_depthwise=True)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + + bad_dangling_ops = [] + types_safe_to_dangle = set(['Assign', 'Mul', 'Const']) + for op in tf.get_default_graph().get_operations(): + if (not op.outputs) or (not op.outputs[0].consumers()): + if 'BoxPredictor' in op.name: + if op.type not in types_safe_to_dangle: + bad_dangling_ops.append(op) + + self.assertEqual(bad_dangling_ops, []) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + batch_norm { + train: true, + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def _build_conv_arg_scope_no_batch_norm(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + random_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def test_get_boxes_for_five_aspect_ratios_per_location(self): + + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): + + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=True, + num_classes=2, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + class_prediction_bias_init=-4.6, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + class_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (tf.nn.sigmoid(class_predictions),) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + class_predictions = self.execute(graph_fn, [image_features]) + self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + + num_classes_without_background = 6 + def graph_fn(image_features): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], + num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_two_feature_maps( + self): + + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2]) + self.assertAllEqual(box_encodings.shape, [4, 640, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 640, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_feature_maps_of_different_depth( + self): + + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2, image_features3): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2, image_features3], + num_predictions_per_location=[5, 5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2, image_features3]) + self.assertAllEqual(box_encodings.shape, [4, 960, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 960, num_classes_without_background+1]) + + def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_with_depthwise( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False, + use_depthwise=True)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/depthwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/pointwise_weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_conv_arg_scope_no_batch_norm(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Box prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_separate_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Shared prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_without_batchnorm( + self): + num_classes_without_background = 6 + def graph_fn(image_features1, image_features2): + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True, + apply_batch_norm=False)) + box_predictions = conv_box_predictor.predict( + [image_features1, image_features2], + num_predictions_per_location=[5, 5], + scope='BoxPredictor') + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + with self.test_session(graph=tf.Graph()): + graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), + tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) + actual_variable_set = set( + [var.op.name for var in tf.trainable_variables()]) + expected_variable_set = set([ + # Shared prediction tower + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/biases'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/biases'), + # Box prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictor/biases'), + # Class prediction head + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/weights'), + ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictor/biases')]) + + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_get_predictions_with_feature_maps_of_dynamic_shape( + self): + image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) + conv_box_predictor = ( + box_predictor_builder.build_weight_shared_convolutional_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + box_predictions = conv_box_predictor.predict( + [image_features], num_predictions_per_location=[5], + scope='BoxPredictor') + box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], + axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + init_op = tf.global_variables_initializer() + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + with self.test_session() as sess: + sess.run(init_op) + (box_encodings_shape, + objectness_predictions_shape) = sess.run( + [tf.shape(box_encodings), tf.shape(objectness_predictions)], + feed_dict={image_features: + np.random.rand(4, resolution, resolution, 64)}) + self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 4]) + self.assertAllEqual(objectness_predictions_shape, + [4, expected_num_anchors, 1]) + + def test_other_heads_predictions(self): + box_code_size = 4 + num_classes_without_background = 3 + other_head_name = 'Mask' + mask_height = 5 + mask_width = 5 + num_predictions_per_location = 5 + + def graph_fn(image_features): + box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( + box_code_size) + class_prediction_head = class_head.WeightSharedConvolutionalClassHead( + num_classes_without_background + 1) + other_heads = { + other_head_name: + mask_head.WeightSharedConvolutionalMaskHead( + num_classes_without_background, + mask_height=mask_height, + mask_width=mask_width) + } + conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=False, + num_classes=num_classes_without_background, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + depth=32, + num_layers_before_predictor=2) + box_predictions = conv_box_predictor.predict( + [image_features], + num_predictions_per_location=[num_predictions_per_location], + scope='BoxPredictor') + for key, value in box_predictions.items(): + box_predictions[key] = tf.concat(value, axis=1) + assert len(box_predictions) == 3 + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + box_predictions[other_head_name]) + + batch_size = 4 + feature_ht = 8 + feature_wt = 8 + image_features = np.random.rand(batch_size, feature_ht, feature_wt, + 64).astype(np.float32) + (box_encodings, class_predictions, other_head_predictions) = self.execute( + graph_fn, [image_features]) + num_anchors = feature_ht * feature_wt * num_predictions_per_location + self.assertAllEqual(box_encodings.shape, + [batch_size, num_anchors, box_code_size]) + self.assertAllEqual( + class_predictions.shape, + [batch_size, num_anchors, num_classes_without_background + 1]) + self.assertAllEqual(other_head_predictions.shape, [ + batch_size, num_anchors, num_classes_without_background, mask_height, + mask_width + ]) + + + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..fc72fb04c2d47301b1ac5fc185ca98c6b00073c0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor.py @@ -0,0 +1,486 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Convolutional Box Predictors with and without weight sharing.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.core import box_predictor +from object_detection.utils import shape_utils +from object_detection.utils import static_shape + +keras = tf.keras.layers + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class _NoopVariableScope(object): + """A dummy class that does not push any scope.""" + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): + """Convolutional Keras Box Predictor. + + Optionally add an intermediate 1x1 convolutional layer after features and + predict in parallel branches box_encodings and + class_predictions_with_background. + + Currently this box predictor assumes that predictions are "shared" across + classes --- that is each anchor makes box predictions which do not depend + on class. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_heads, + class_prediction_heads, + other_heads, + conv_hyperparams, + num_layers_before_predictor, + min_depth, + max_depth, + freeze_batchnorm, + inplace_batchnorm_update, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_heads: A list of heads that predict the boxes. + class_prediction_heads: A list of heads that predict the classes. + other_heads: A dictionary mapping head names to lists of convolutional + heads. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + min_depth: Minimum feature depth prior to predicting box encodings + and class predictions. + max_depth: Maximum feature depth prior to predicting box encodings + and class predictions. If max_depth is set to 0, no additional + feature map will be inserted before location and class predictions. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + name=name) + if min_depth > max_depth: + raise ValueError('min_depth should be less than or equal to max_depth') + if len(box_prediction_heads) != len(class_prediction_heads): + raise ValueError('All lists of heads must be the same length.') + for other_head_list in other_heads.values(): + if len(box_prediction_heads) != len(other_head_list): + raise ValueError('All lists of heads must be the same length.') + + self._prediction_heads = { + BOX_ENCODINGS: box_prediction_heads, + CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads, + } + + if other_heads: + self._prediction_heads.update(other_heads) + + # We generate a consistent ordering for the prediction head names, + # So that all workers build the model in the exact same order + self._sorted_head_names = sorted(self._prediction_heads.keys()) + + self._conv_hyperparams = conv_hyperparams + self._min_depth = min_depth + self._max_depth = max_depth + self._num_layers_before_predictor = num_layers_before_predictor + + self._shared_nets = [] + + def build(self, input_shapes): + """Creates the variables of the layer.""" + if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]): + raise ValueError('This box predictor was constructed with %d heads,' + 'but there are %d inputs.' % + (len(self._prediction_heads[BOX_ENCODINGS]), + len(input_shapes))) + for stack_index, input_shape in enumerate(input_shapes): + net = [] + + # Add additional conv layers before the class predictor. + features_depth = static_shape.get_depth(input_shape) + depth = max(min(features_depth, self._max_depth), self._min_depth) + tf.logging.info( + 'depth of additional conv before box predictor: {}'.format(depth)) + + if depth > 0 and self._num_layers_before_predictor > 0: + for i in range(self._num_layers_before_predictor): + net.append(keras.Conv2D(depth, [1, 1], + name='SharedConvolutions_%d/Conv2d_%d_1x1_%d' + % (stack_index, i, depth), + padding='SAME', + **self._conv_hyperparams.params())) + net.append(self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm' + % (stack_index, i, depth))) + net.append(self._conv_hyperparams.build_activation_layer( + name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation' + % (stack_index, i, depth), + )) + # Until certain bugs are fixed in checkpointable lists, + # this net must be appended only once it's been filled with layers + self._shared_nets.append(net) + self.built = True + + def _predict(self, image_features, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Unused Keyword args + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + predictions = collections.defaultdict(list) + + for (index, net) in enumerate(image_features): + + # Apply shared conv layers before the head predictors. + for layer in self._shared_nets[index]: + net = layer(net) + + for head_name in self._sorted_head_names: + head_obj = self._prediction_heads[head_name][index] + prediction = head_obj(net) + predictions[head_name].append(prediction) + + return predictions + + +class WeightSharedConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): + """Convolutional Box Predictor with weight sharing based on Keras. + + Defines the box predictor as defined in + https://arxiv.org/abs/1708.02002. This class differs from + ConvolutionalBoxPredictor in that it shares weights and biases while + predicting from different feature maps. However, batch_norm parameters are not + shared because the statistics of the activations vary among the different + feature maps. + + Also note that separate multi-layer towers are constructed for the box + encoding and class predictors respectively. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + other_heads, + conv_hyperparams, + depth, + num_layers_before_predictor, + freeze_batchnorm, + inplace_batchnorm_update, + kernel_size=3, + apply_batch_norm=False, + share_prediction_tower=False, + use_depthwise=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes. + class_prediction_head: The head that predicts the classes. + other_heads: A dictionary mapping head names to convolutional + head classes. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + depth: depth of conv layers. + num_layers_before_predictor: Number of the additional conv layers before + the predictor. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + inplace_batchnorm_update: Whether to update batch norm moving average + values inplace. When this is false train op must add a control + dependency on tf.graphkeys.UPDATE_OPS collection in order to update + batch norm statistics. + kernel_size: Size of final convolution kernel. + apply_batch_norm: Whether to apply batch normalization to conv layers in + this predictor. + share_prediction_tower: Whether to share the multi-layer tower among box + prediction head, class prediction head and other heads. + use_depthwise: Whether to use depthwise separable conv2d instead of + regular conv2d. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + """ + super(WeightSharedConvolutionalBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=inplace_batchnorm_update, + name=name) + + self._box_prediction_head = box_prediction_head + self._prediction_heads = { + CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_head, + } + if other_heads: + self._prediction_heads.update(other_heads) + # We generate a consistent ordering for the prediction head names, + # so that all workers build the model in the exact same order. + self._sorted_head_names = sorted(self._prediction_heads.keys()) + + self._conv_hyperparams = conv_hyperparams + self._depth = depth + self._num_layers_before_predictor = num_layers_before_predictor + self._kernel_size = kernel_size + self._apply_batch_norm = apply_batch_norm + self._share_prediction_tower = share_prediction_tower + self._use_depthwise = use_depthwise + + # Additional projection layers to bring all feature maps to uniform + # channels. + self._additional_projection_layers = [] + # The base tower layers for each head. + self._base_tower_layers_for_heads = { + BOX_ENCODINGS: [], + CLASS_PREDICTIONS_WITH_BACKGROUND: [], + } + for head_name in other_heads.keys(): + self._base_tower_layers_for_heads[head_name] = [] + + # A dict maps the tower_name_scope of each head to the shared conv layers in + # the base tower for different feature map levels. + self._head_scope_conv_layers = {} + + def _insert_additional_projection_layer( + self, inserted_layer_counter, target_channel): + projection_layers = [] + if inserted_layer_counter >= 0: + use_bias = False if (self._apply_batch_norm and not + self._conv_hyperparams.force_use_bias()) else True + projection_layers.append(keras.Conv2D( + target_channel, [1, 1], strides=1, padding='SAME', + name='ProjectionLayer/conv2d_{}'.format(inserted_layer_counter), + **self._conv_hyperparams.params(use_bias=use_bias))) + if self._apply_batch_norm: + projection_layers.append(self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='ProjectionLayer/conv2d_{}/BatchNorm'.format( + inserted_layer_counter))) + inserted_layer_counter += 1 + return inserted_layer_counter, projection_layers + + def _compute_base_tower(self, tower_name_scope, feature_index): + conv_layers = [] + batch_norm_layers = [] + activation_layers = [] + use_bias = False if (self._apply_batch_norm and not + self._conv_hyperparams.force_use_bias()) else True + for additional_conv_layer_idx in range(self._num_layers_before_predictor): + layer_name = '{}/conv2d_{}'.format( + tower_name_scope, additional_conv_layer_idx) + if tower_name_scope not in self._head_scope_conv_layers: + if self._use_depthwise: + kwargs = self._conv_hyperparams.params(use_bias=use_bias) + # Both the regularizer and initializer apply to the depthwise layer, + # so we remap the kernel_* to depthwise_* here. + kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] + kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] + conv_layers.append( + tf.keras.layers.SeparableConv2D( + self._depth, [self._kernel_size, self._kernel_size], + padding='SAME', + name=layer_name, + **kwargs)) + else: + conv_layers.append( + tf.keras.layers.Conv2D( + self._depth, + [self._kernel_size, self._kernel_size], + padding='SAME', + name=layer_name, + **self._conv_hyperparams.params(use_bias=use_bias))) + # Each feature gets a separate batchnorm parameter even though they share + # the same convolution weights. + if self._apply_batch_norm: + batch_norm_layers.append(self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='{}/conv2d_{}/BatchNorm/feature_{}'.format( + tower_name_scope, additional_conv_layer_idx, feature_index))) + activation_layers.append(self._conv_hyperparams.build_activation_layer( + name='{}/conv2d_{}/activation_{}'.format( + tower_name_scope, additional_conv_layer_idx, feature_index))) + + # Set conv layers as the shared conv layers for different feature maps with + # the same tower_name_scope. + if tower_name_scope in self._head_scope_conv_layers: + conv_layers = self._head_scope_conv_layers[tower_name_scope] + + # Stack the base_tower_layers in the order of conv_layer, batch_norm_layer + # and activation_layer + base_tower_layers = [] + for i in range(self._num_layers_before_predictor): + base_tower_layers.extend([conv_layers[i]]) + if self._apply_batch_norm: + base_tower_layers.extend([batch_norm_layers[i]]) + base_tower_layers.extend([activation_layers[i]]) + return conv_layers, base_tower_layers + + def build(self, input_shapes): + """Creates the variables of the layer.""" + feature_channels = [ + shape_utils.get_dim_as_int(input_shape[3]) + for input_shape in input_shapes + ] + has_different_feature_channels = len(set(feature_channels)) > 1 + if has_different_feature_channels: + inserted_layer_counter = 0 + target_channel = max(set(feature_channels), key=feature_channels.count) + tf.logging.info('Not all feature maps have the same number of ' + 'channels, found: {}, appending additional projection ' + 'layers to bring all feature maps to uniformly have {} ' + 'channels.'.format(feature_channels, target_channel)) + else: + # Place holder variables if has_different_feature_channels is False. + target_channel = -1 + inserted_layer_counter = -1 + + def _build_layers(tower_name_scope, feature_index): + conv_layers, base_tower_layers = self._compute_base_tower( + tower_name_scope=tower_name_scope, feature_index=feature_index) + if tower_name_scope not in self._head_scope_conv_layers: + self._head_scope_conv_layers[tower_name_scope] = conv_layers + return base_tower_layers + + for feature_index in range(len(input_shapes)): + # Additional projection layers should not be shared as input channels + # (and thus weight shapes) are different + inserted_layer_counter, projection_layers = ( + self._insert_additional_projection_layer( + inserted_layer_counter, target_channel)) + self._additional_projection_layers.append(projection_layers) + + if self._share_prediction_tower: + box_tower_scope = 'PredictionTower' + else: + box_tower_scope = 'BoxPredictionTower' + # For box tower base + box_tower_layers = _build_layers(box_tower_scope, feature_index) + self._base_tower_layers_for_heads[BOX_ENCODINGS].append(box_tower_layers) + + for head_name in self._sorted_head_names: + if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: + tower_name_scope = 'ClassPredictionTower' + else: + tower_name_scope = '{}PredictionTower'.format(head_name) + box_tower_layers = _build_layers(tower_name_scope, feature_index) + self._base_tower_layers_for_heads[head_name].append(box_tower_layers) + + self.built = True + + def _predict(self, image_features, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + **kwargs: Unused Keyword args + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + """ + predictions = collections.defaultdict(list) + + def _apply_layers(base_tower_layers, image_feature): + for layer in base_tower_layers: + image_feature = layer(image_feature) + return image_feature + + for (index, image_feature) in enumerate(image_features): + # Apply additional projection layers to image features + for layer in self._additional_projection_layers[index]: + image_feature = layer(image_feature) + + # Apply box tower layers. + box_tower_feature = _apply_layers( + self._base_tower_layers_for_heads[BOX_ENCODINGS][index], + image_feature) + box_encodings = self._box_prediction_head(box_tower_feature) + predictions[BOX_ENCODINGS].append(box_encodings) + + for head_name in self._sorted_head_names: + head_obj = self._prediction_heads[head_name] + if self._share_prediction_tower: + head_tower_feature = box_tower_feature + else: + head_tower_feature = _apply_layers( + self._base_tower_layers_for_heads[head_name][index], + image_feature) + prediction = head_obj(head_tower_feature) + predictions[head_name].append(prediction) + return predictions diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c6750601a91b9197c8e9bae657b1700d2648bf8 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..180a6e94643a80ac04ee12dfacb5bc6d04e09ec8 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/convolutional_keras_box_predictor_tf2_test.py @@ -0,0 +1,952 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.convolutional_keras_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import convolutional_keras_box_predictor as box_predictor +from object_detection.predictors.heads import keras_box_head +from object_detection.predictors.heads import keras_class_head +from object_detection.predictors.heads import keras_mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalKerasBoxPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_get_boxes_for_five_aspect_ratios_per_location(self): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_get_boxes_for_one_aspect_ratio_per_location(self): + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[1], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + num_classes_without_background = 6 + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_predictions_with_feature_maps_of_dynamic_shape( + self): + tf.keras.backend.clear_session() + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=1, + box_code_size=4 + )) + variables = [] + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return box_encodings, objectness_predictions + resolution = 32 + expected_num_anchors = resolution*resolution*5 + box_encodings, objectness_predictions = self.execute( + graph_fn, [np.random.rand(4, resolution, resolution, 64)]) + + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) + self.assertEqual(expected_variable_set, actual_variable_set) + self.assertEqual(conv_box_predictor._sorted_head_names, + ['box_encodings', 'class_predictions_with_background']) + + def test_use_depthwise_convolution(self): + tf.keras.backend.clear_session() + conv_box_predictor = ( + box_predictor_builder.build_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + min_depth=0, + max_depth=32, + num_layers_before_predictor=1, + use_dropout=True, + dropout_keep_prob=0.8, + kernel_size=3, + box_code_size=4, + use_depthwise=True + )) + variables = [] + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return box_encodings, objectness_predictions + + resolution = 32 + expected_num_anchors = resolution*resolution*5 + box_encodings, objectness_predictions = self.execute( + graph_fn, [np.random.rand(4, resolution, resolution, 64)]) + + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + self.assertAllEqual(box_encodings.shape, [4, expected_num_anchors, 1, 4]) + self.assertAllEqual(objectness_predictions.shape, + [4, expected_num_anchors, 1]) + expected_variable_set = set([ + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', + 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', + + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/' + 'bias', + + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor_depthwise/' + 'depthwise_kernel', + + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', + 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/bias', + + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor_depthwise/' + 'depthwise_kernel', + + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', + 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) + self.assertEqual(expected_variable_set, actual_variable_set) + self.assertEqual(conv_box_predictor._sorted_head_names, + ['box_encodings', 'class_predictions_with_background']) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalKerasBoxPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self, add_batch_norm=True): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: RELU_6 + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + stddev: 0.01 + mean: 0.0 + } + } + """ + if add_batch_norm: + batch_norm_proto = """ + batch_norm { + train: true, + } + """ + conv_hyperparams_text_proto += batch_norm_proto + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + # pylint: disable=line-too-long + def test_get_boxes_for_five_aspect_ratios_per_location(self): + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=0, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + objectness_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, objectness_predictions) + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, objectness_predictions) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) + + def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=True, + num_classes=2, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + depth=32, + num_layers_before_predictor=1, + class_prediction_bias_init=-4.6, + box_code_size=4)) + + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + class_predictions = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (tf.nn.sigmoid(class_predictions),) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + class_predictions = self.execute(graph_fn, [image_features]) + self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) + + def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( + self): + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat(box_predictions[ + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features]) + self.assertAllEqual(box_encodings.shape, [4, 320, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 320, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_two_feature_maps( + self): + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2]) + self.assertAllEqual(box_encodings.shape, [4, 640, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 640, num_classes_without_background+1]) + + def test_get_multi_class_predictions_from_feature_maps_of_different_depth( + self): + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5, 5], + depth=32, + num_layers_before_predictor=1, + box_code_size=4)) + + def graph_fn(image_features1, image_features2, image_features3): + box_predictions = conv_box_predictor( + [image_features1, image_features2, image_features3]) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) + image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features1, image_features2, image_features3]) + self.assertAllEqual(box_encodings.shape, [4, 960, 4]) + self.assertAllEqual(class_predictions_with_background.shape, + [4, 960, num_classes_without_background+1]) + + def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_multiple_feature_maps_share_weights_with_depthwise( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False, + use_depthwise=True)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor([image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/depthwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/pointwise_kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + apply_batch_norm=False)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor( + [image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Box prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'BoxPredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'ClassPredictionTower/conv2d_1/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_separate_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor( + [image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Shared prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/BatchNorm/feature_1/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_mean'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_0/moving_variance'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/BatchNorm/feature_1/moving_variance'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_predictions_share_weights_share_tower_without_batchnorm( + self): + tf.keras.backend.clear_session() + num_classes_without_background = 6 + conv_box_predictor = ( + box_predictor_builder + .build_weight_shared_convolutional_keras_box_predictor( + is_training=False, + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(add_batch_norm=False), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + num_predictions_per_location_list=[5, 5], + depth=32, + num_layers_before_predictor=2, + box_code_size=4, + share_prediction_tower=True, + apply_batch_norm=False)) + variables = [] + + def graph_fn(image_features1, image_features2): + box_predictions = conv_box_predictor( + [image_features1, image_features2]) + variables.extend(list(conv_box_predictor.variables)) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + self.execute(graph_fn, [ + np.random.rand(4, 32, 32, 3).astype(np.float32), + np.random.rand(4, 16, 16, 3).astype(np.float32) + ]) + actual_variable_set = set([var.name.split(':')[0] for var in variables]) + expected_variable_set = set([ + # Shared prediction tower + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_0/bias'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'PredictionTower/conv2d_1/bias'), + # Box prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalBoxHead/BoxPredictor/bias'), + # Class prediction head + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/kernel'), + ('WeightSharedConvolutionalBoxPredictor/' + 'WeightSharedConvolutionalClassHead/ClassPredictor/bias')]) + + self.assertEqual(expected_variable_set, actual_variable_set) + + def test_other_heads_predictions(self): + box_code_size = 4 + num_classes_without_background = 3 + other_head_name = 'Mask' + mask_height = 5 + mask_width = 5 + num_predictions_per_location = 5 + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=box_code_size, + conv_hyperparams=self._build_conv_hyperparams(), + num_predictions_per_location=num_predictions_per_location) + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=num_classes_without_background + 1, + conv_hyperparams=self._build_conv_hyperparams(), + num_predictions_per_location=num_predictions_per_location) + other_heads = { + other_head_name: + keras_mask_head.WeightSharedConvolutionalMaskHead( + num_classes=num_classes_without_background, + conv_hyperparams=self._build_conv_hyperparams(), + num_predictions_per_location=num_predictions_per_location, + mask_height=mask_height, + mask_width=mask_width) + } + + conv_box_predictor = box_predictor.WeightSharedConvolutionalBoxPredictor( + is_training=False, + num_classes=num_classes_without_background, + box_prediction_head=box_prediction_head, + class_prediction_head=class_prediction_head, + other_heads=other_heads, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + inplace_batchnorm_update=False, + depth=32, + num_layers_before_predictor=2) + def graph_fn(image_features): + box_predictions = conv_box_predictor([image_features]) + for key, value in box_predictions.items(): + box_predictions[key] = tf.concat(value, axis=1) + assert len(box_predictions) == 3 + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + box_predictions[other_head_name]) + + batch_size = 4 + feature_ht = 8 + feature_wt = 8 + image_features = np.random.rand(batch_size, feature_ht, feature_wt, + 64).astype(np.float32) + (box_encodings, class_predictions, other_head_predictions) = self.execute( + graph_fn, [image_features]) + num_anchors = feature_ht * feature_wt * num_predictions_per_location + self.assertAllEqual(box_encodings.shape, + [batch_size, num_anchors, box_code_size]) + self.assertAllEqual( + class_predictions.shape, + [batch_size, num_anchors, num_classes_without_background + 1]) + self.assertAllEqual(other_head_predictions.shape, [ + batch_size, num_anchors, num_classes_without_background, mask_height, + mask_width + ]) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e85018d8092bd0ca0e78a216e2d983bc11901610 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c92b7903470a8ad2b34c670cf74727444d75c3b4 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/box_head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/box_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b79d9ad553542953de7e69c1b8e95ef6173f8a69 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/box_head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/class_head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/class_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cfdd3b0daab514b8a0288ca4d8fc5ee50663582 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/class_head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a306fa9cc9bfd41337d327495c1803d53d0fc07 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_box_head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_box_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcccdcf077fae02c2db760c885822e6f3ff0065d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_box_head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_class_head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_class_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c419aecb2d1f17720abe8d9b95644daacb15a54 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_class_head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_mask_head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_mask_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6445fdaca9b936834b5a98429c0b12635550a658 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/keras_mask_head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/mask_head.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/mask_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37e67ebd5970b4dfbfb611f551083caf955462d7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/__pycache__/mask_head.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head.py new file mode 100644 index 0000000000000000000000000000000000000000..6535e9b28192b05d15a202ce8b9bfef20f63ce83 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head.py @@ -0,0 +1,281 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Box Head. + +Contains Box prediction head classes for different meta architectures. +All the box prediction heads have a predict function that receives the +`features` as the first argument and returns `box_encodings`. +""" +import functools +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head + + +class MaskRCNNBoxHead(head.Head): + """Box prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_classes, + fc_hyperparams_fn, + use_dropout, + dropout_keep_prob, + box_code_size, + share_box_across_classes=False): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for fully connected ops. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + """ + super(MaskRCNNBoxHead, self).__init__() + self._is_training = is_training + self._num_classes = num_classes + self._fc_hyperparams_fn = fc_hyperparams_fn + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._box_code_size = box_code_size + self._share_box_across_classes = share_box_across_classes + + def predict(self, features, num_predictions_per_location=1): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, + channels] containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + box_encodings: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the location of the + objects. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + flattened_roi_pooled_features = slim.flatten( + spatial_averaged_roi_pooled_features) + if self._use_dropout: + flattened_roi_pooled_features = slim.dropout( + flattened_roi_pooled_features, + keep_prob=self._dropout_keep_prob, + is_training=self._is_training) + number_of_boxes = 1 + if not self._share_box_across_classes: + number_of_boxes = self._num_classes + + with slim.arg_scope(self._fc_hyperparams_fn()): + box_encodings = slim.fully_connected( + flattened_roi_pooled_features, + number_of_boxes * self._box_code_size, + reuse=tf.AUTO_REUSE, + activation_fn=None, + scope='BoxEncodingPredictor') + box_encodings = tf.reshape(box_encodings, + [-1, 1, number_of_boxes, self._box_code_size]) + return box_encodings + + +class ConvolutionalBoxHead(head.Head): + """Convolutional box prediction head.""" + + def __init__(self, + is_training, + box_code_size, + kernel_size, + use_depthwise=False, + box_encodings_clip_range=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalBoxHead, self).__init__() + self._is_training = is_training + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + + Returns: + box_encodings: A float tensors of shape + [batch_size, num_anchors, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. + """ + net = features + if self._use_depthwise: + box_encodings = slim.separable_conv2d( + net, None, [self._kernel_size, self._kernel_size], + padding='SAME', depth_multiplier=1, stride=1, + rate=1, scope='BoxEncodingPredictor_depthwise') + box_encodings = slim.conv2d( + box_encodings, + num_predictions_per_location * self._box_code_size, [1, 1], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='BoxEncodingPredictor') + else: + box_encodings = slim.conv2d( + net, num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='BoxEncodingPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, 1, self._box_code_size]) + return box_encodings + + +# TODO(alirezafathi): See if possible to unify Weight Shared with regular +# convolutional box head. +class WeightSharedConvolutionalBoxHead(head.Head): + """Weight shared convolutional box prediction head. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + box_code_size, + kernel_size=3, + use_depthwise=False, + box_encodings_clip_range=None, + return_flat_predictions=True): + """Constructor. + + Args: + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. + use_depthwise: Whether to use depthwise convolutions for prediction steps. + Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalBoxHead, self).__init__() + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + self._return_flat_predictions = return_flat_predictions + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + box_encodings: A float tensor of shape + [batch_size, num_anchors, code_size] representing the location of + the objects, or a float tensor of shape [batch, height, width, + num_predictions_per_location * box_code_size] representing grid box + location predictions if self._return_flat_predictions is False. + """ + box_encodings_net = features + if self._use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + box_encodings = conv_op( + box_encodings_net, + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + activation_fn=None, stride=1, padding='SAME', + normalizer_fn=None, + scope='BoxPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + if self._return_flat_predictions: + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, self._box_code_size]) + return box_encodings diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59837ddbd53c2256c53df9d371a82bfd0ad02ec1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ab534a2bd029abed5f39e232d023a27dd2e9a361 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/box_head_tf1_test.py @@ -0,0 +1,132 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.box_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import box_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNBoxHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + box_prediction_head = box_head.MaskRCNNBoxHead( + is_training=False, + num_classes=20, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=True, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=False) + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = box_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 20, 4], prediction.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + box_prediction_head = box_head.ConvolutionalBoxHead( + is_training=True, + box_code_size=4, + kernel_size=3) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 4], box_encodings.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head.py new file mode 100644 index 0000000000000000000000000000000000000000..604859313de84a783953e67dbe47e301a740cb96 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head.py @@ -0,0 +1,315 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class Head. + +Contains Class prediction head classes for different meta architectures. +All the class prediction heads have a predict function that receives the +`features` as the first argument and returns class predictions with background. +""" +import functools +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head + + +class MaskRCNNClassHead(head.Head): + """Mask RCNN class prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_class_slots, + fc_hyperparams_fn, + use_dropout, + dropout_keep_prob, + scope='ClassPredictor'): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + fc_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for fully connected ops. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + scope: Scope name for the convolution operation. + """ + super(MaskRCNNClassHead, self).__init__() + self._is_training = is_training + self._num_class_slots = num_class_slots + self._fc_hyperparams_fn = fc_hyperparams_fn + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._scope = scope + + def predict(self, features, num_predictions_per_location=1): + """Predicts boxes and class scores. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, 1, num_class_slots] representing the class predictions for + the proposals. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + flattened_roi_pooled_features = slim.flatten( + spatial_averaged_roi_pooled_features) + if self._use_dropout: + flattened_roi_pooled_features = slim.dropout( + flattened_roi_pooled_features, + keep_prob=self._dropout_keep_prob, + is_training=self._is_training) + + with slim.arg_scope(self._fc_hyperparams_fn()): + class_predictions_with_background = slim.fully_connected( + flattened_roi_pooled_features, + self._num_class_slots, + reuse=tf.AUTO_REUSE, + activation_fn=None, + scope=self._scope) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [-1, 1, self._num_class_slots]) + return class_predictions_with_background + + +class ConvolutionalClassHead(head.Head): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_class_slots, + use_dropout, + dropout_keep_prob, + kernel_size, + apply_sigmoid_to_scores=False, + class_prediction_bias_init=0.0, + use_depthwise=False, + scope='ClassPredictor'): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + apply_sigmoid_to_scores: if True, apply the sigmoid on the output + class_predictions. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + scope: Scope name for the convolution operation. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalClassHead, self).__init__() + self._is_training = is_training + self._num_class_slots = num_class_slots + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._apply_sigmoid_to_scores = apply_sigmoid_to_scores + self._class_prediction_bias_init = class_prediction_bias_init + self._use_depthwise = use_depthwise + self._scope = scope + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + class_predictions_with_background: A float tensors of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals. + """ + net = features + if self._use_dropout: + net = slim.dropout(net, keep_prob=self._dropout_keep_prob) + if self._use_depthwise: + depthwise_scope = self._scope + '_depthwise' + class_predictions_with_background = slim.separable_conv2d( + net, None, [self._kernel_size, self._kernel_size], + padding='SAME', depth_multiplier=1, stride=1, + rate=1, scope=depthwise_scope) + class_predictions_with_background = slim.conv2d( + class_predictions_with_background, + num_predictions_per_location * self._num_class_slots, [1, 1], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope=self._scope) + else: + class_predictions_with_background = slim.conv2d( + net, + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope=self._scope, + biases_initializer=tf.constant_initializer( + self._class_prediction_bias_init)) + if self._apply_sigmoid_to_scores: + class_predictions_with_background = tf.sigmoid( + class_predictions_with_background) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background + + +# TODO(alirezafathi): See if possible to unify Weight Shared with regular +# convolutional class head. +class WeightSharedConvolutionalClassHead(head.Head): + """Weight shared convolutional class prediction head. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + num_class_slots, + kernel_size=3, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + use_depthwise=False, + score_converter_fn=tf.identity, + return_flat_predictions=True, + scope='ClassPredictor'): + """Constructor. + + Args: + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + kernel_size: Size of final convolution kernel. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + score_converter_fn: Callable elementwise nonlinearity (that takes tensors + as inputs and returns tensors). + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + scope: Scope name for the convolution operation. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalClassHead, self).__init__() + self._num_class_slots = num_class_slots + self._kernel_size = kernel_size + self._class_prediction_bias_init = class_prediction_bias_init + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._use_depthwise = use_depthwise + self._score_converter_fn = score_converter_fn + self._return_flat_predictions = return_flat_predictions + self._scope = scope + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + class_predictions_with_background: A tensor of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals, or a tensor of shape [batch, height, + width, num_predictions_per_location * num_class_slots] representing + class predictions before reshaping if self._return_flat_predictions is + False. + """ + class_predictions_net = features + if self._use_dropout: + class_predictions_net = slim.dropout( + class_predictions_net, keep_prob=self._dropout_keep_prob) + if self._use_depthwise: + conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) + else: + conv_op = slim.conv2d + class_predictions_with_background = conv_op( + class_predictions_net, + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + activation_fn=None, stride=1, padding='SAME', + normalizer_fn=None, + biases_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + scope=self._scope) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = self._score_converter_fn( + class_predictions_with_background) + if self._return_flat_predictions: + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6b4289305c3cb09dfad2db64c58f6d27e51a1a3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc8fb120cb9a4c19ff2d595d31dc3645f6e06d0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/class_head_tf1_test.py @@ -0,0 +1,199 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.class_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import class_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNClassHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + class_prediction_head = class_head.MaskRCNNClassHead( + is_training=False, + num_class_slots=20, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=True, + dropout_keep_prob=0.5) + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = class_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list()) + + def test_scope_name(self): + expected_var_names = set([ + """ClassPredictor/weights""", + """ClassPredictor/biases""" + ]) + + g = tf.Graph() + with g.as_default(): + class_prediction_head = class_head.MaskRCNNClassHead( + is_training=True, + num_class_slots=20, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=True, + dropout_keep_prob=0.5) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + self.assertSetEqual(expected_var_names, actual_variable_set) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalClassPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + class_prediction_head = class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20], + class_predictions.get_shape().as_list()) + + def test_scope_name(self): + expected_var_names = set([ + """ClassPredictor/weights""", + """ClassPredictor/biases""" + ]) + g = tf.Graph() + with g.as_default(): + class_prediction_head = class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + self.assertSetEqual(expected_var_names, actual_variable_set) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalClassPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + class_prediction_head = ( + class_head.WeightSharedConvolutionalClassHead(num_class_slots=20)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) + + def test_scope_name(self): + expected_var_names = set([ + """ClassPredictor/weights""", + """ClassPredictor/biases""" + ]) + g = tf.Graph() + with g.as_default(): + class_prediction_head = class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + actual_variable_set = set([ + var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + ]) + self.assertSetEqual(expected_var_names, actual_variable_set) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/head.py new file mode 100644 index 0000000000000000000000000000000000000000..7dc2a9492f2229a874446c26334e5ed5840e722e --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/head.py @@ -0,0 +1,81 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Base head class. + +All the different kinds of prediction heads in different models will inherit +from this class. What is in common between all head classes is that they have a +`predict` function that receives `features` as its first argument. + +How to add a new prediction head to an existing meta architecture? +For example, how can we add a `3d shape` prediction head to Mask RCNN? + +We have to take the following steps to add a new prediction head to an +existing meta arch: +(a) Add a class for predicting the head. This class should inherit from the +`Head` class below and have a `predict` function that receives the features +and predicts the output. The output is always a tf.float32 tensor. +(b) Add the head to the meta architecture. For example in case of Mask RCNN, +go to box_predictor_builder and put in the logic for adding the new head to the +Mask RCNN box predictor. +(c) Add the logic for computing the loss for the new head. +(d) Add the necessary metrics for the new head. +(e) (optional) Add visualization for the new head. +""" +from abc import abstractmethod + +import tensorflow.compat.v1 as tf + + +class Head(object): + """Mask RCNN head base class.""" + + def __init__(self): + """Constructor.""" + pass + + @abstractmethod + def predict(self, features, num_predictions_per_location): + """Returns the head's predictions. + + Args: + features: A float tensor of features. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + A tf.float32 tensor. + """ + pass + + +class KerasHead(tf.keras.layers.Layer): + """Keras head base class.""" + + def call(self, features): + """The Keras model call will delegate to the `_predict` method.""" + return self._predict(features) + + @abstractmethod + def _predict(self, features): + """Returns the head's predictions. + + Args: + features: A float tensor of features. + + Returns: + A tf.float32 tensor. + """ + pass diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d91b4ee4a2836b7faf17061a3a6503e5e92dd64 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head.py new file mode 100644 index 0000000000000000000000000000000000000000..b8def7fc1b01291d92ce545c8c3c29d9a24c646a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head.py @@ -0,0 +1,333 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Box Head. + +Contains Box prediction head classes for different meta architectures. +All the box prediction heads have a _predict function that receives the +`features` as the first argument and returns `box_encodings`. +""" +import tensorflow.compat.v1 as tf + +from object_detection.predictors.heads import head + + +class ConvolutionalBoxHead(head.KerasHead): + """Convolutional box prediction head.""" + + def __init__(self, + is_training, + box_code_size, + kernel_size, + num_predictions_per_location, + conv_hyperparams, + freeze_batchnorm, + use_depthwise=False, + box_encodings_clip_range=None, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + box_code_size: Size of encoding for each box. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalBoxHead, self).__init__(name=name) + self._is_training = is_training + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._num_predictions_per_location = num_predictions_per_location + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + + self._box_encoder_layers = [] + + if self._use_depthwise: + self._box_encoder_layers.append( + tf.keras.layers.DepthwiseConv2D( + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + dilation_rate=1, + name='BoxEncodingPredictor_depthwise', + **conv_hyperparams.params())) + self._box_encoder_layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name='BoxEncodingPredictor_depthwise_batchnorm')) + self._box_encoder_layers.append( + conv_hyperparams.build_activation_layer( + name='BoxEncodingPredictor_depthwise_activation')) + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._box_code_size, [1, 1], + name='BoxEncodingPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='BoxEncodingPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + box_encodings: A float tensor of shape + [batch_size, num_anchors, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. + """ + box_encodings = features + for layer in self._box_encoder_layers: + box_encodings = layer(box_encodings) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, 1, self._box_code_size]) + return box_encodings + + +class MaskRCNNBoxHead(head.KerasHead): + """Box prediction head. + + This is a piece of Mask RCNN which is responsible for predicting + just the box encodings. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_classes, + fc_hyperparams, + freeze_batchnorm, + use_dropout, + dropout_keep_prob, + box_code_size, + share_box_across_classes=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for fully connected dense ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + box_code_size: Size of encoding for each box. + share_box_across_classes: Whether to share boxes across classes rather + than use a different box for each class. + name: A string name scope to assign to the box head. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNBoxHead, self).__init__(name=name) + self._is_training = is_training + self._num_classes = num_classes + self._fc_hyperparams = fc_hyperparams + self._freeze_batchnorm = freeze_batchnorm + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._box_code_size = box_code_size + self._share_box_across_classes = share_box_across_classes + + self._box_encoder_layers = [tf.keras.layers.Flatten()] + + if self._use_dropout: + self._box_encoder_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + + self._number_of_boxes = 1 + if not self._share_box_across_classes: + self._number_of_boxes = self._num_classes + + self._box_encoder_layers.append( + tf.keras.layers.Dense(self._number_of_boxes * self._box_code_size, + name='BoxEncodingPredictor_dense')) + self._box_encoder_layers.append( + fc_hyperparams.build_batch_norm(training=(is_training and + not freeze_batchnorm), + name='BoxEncodingPredictor_batchnorm')) + + def _predict(self, features): + """Predicts box encodings. + + Args: + features: A float tensor of shape [batch_size, height, width, + channels] containing features for a batch of images. + + Returns: + box_encodings: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the location of the + objects. + """ + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + net = spatial_averaged_roi_pooled_features + for layer in self._box_encoder_layers: + net = layer(net) + box_encodings = tf.reshape(net, + [-1, 1, + self._number_of_boxes, + self._box_code_size]) + return box_encodings + + +# TODO(b/128922690): Unify the implementations of ConvolutionalBoxHead +# and WeightSharedConvolutionalBoxHead +class WeightSharedConvolutionalBoxHead(head.KerasHead): + """Weight shared convolutional box prediction head based on Keras. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + box_code_size, + num_predictions_per_location, + conv_hyperparams, + kernel_size=3, + use_depthwise=False, + box_encodings_clip_range=None, + return_flat_predictions=True, + name=None): + """Constructor. + + Args: + box_code_size: Size of encoding for each box. + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + kernel_size: Size of final convolution kernel. + use_depthwise: Whether to use depthwise convolutions for prediction steps. + Default is False. + box_encodings_clip_range: Min and max values for clipping box_encodings. + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalBoxHead, self).__init__(name=name) + self._box_code_size = box_code_size + self._kernel_size = kernel_size + self._num_predictions_per_location = num_predictions_per_location + self._use_depthwise = use_depthwise + self._box_encodings_clip_range = box_encodings_clip_range + self._return_flat_predictions = return_flat_predictions + + self._box_encoder_layers = [] + + if self._use_depthwise: + self._box_encoder_layers.append( + tf.keras.layers.SeparableConv2D( + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='BoxPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._box_code_size, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='BoxPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + box_encodings: A float tensor of shape + [batch_size, num_anchors, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. + """ + box_encodings = features + for layer in self._box_encoder_layers: + box_encodings = layer(box_encodings) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + # Clipping the box encodings to make the inference graph TPU friendly. + if self._box_encodings_clip_range is not None: + box_encodings = tf.clip_by_value( + box_encodings, self._box_encodings_clip_range.min, + self._box_encodings_clip_range.max) + if self._return_flat_predictions: + box_encodings = tf.reshape(box_encodings, + [batch_size, -1, self._box_code_size]) + return box_encodings diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51988d49ac0d78c46823c995b410739b9b7234e7 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e8b8dcc3aa07ce6917a881c42cf51db7318576 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_box_head_tf2_test.py @@ -0,0 +1,199 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.box_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keras_box_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalKerasBoxHeadTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.ConvolutionalBoxHead( + is_training=True, + box_code_size=4, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 4], box_encodings.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.ConvolutionalBoxHead( + is_training=True, + box_code_size=4, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 4], box_encodings.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNKerasBoxHeadTest(test_case.TestCase): + + def _build_fc_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_prediction_size(self): + box_prediction_head = keras_box_head.MaskRCNNBoxHead( + is_training=False, + num_classes=20, + fc_hyperparams=self._build_fc_hyperparams(), + freeze_batchnorm=False, + use_dropout=True, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=False) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = box_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 20, 4], prediction.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalKerasBoxHead(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 4], box_encodings.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_encodings = box_prediction_head(image_feature) + return box_encodings + box_encodings = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 4], box_encodings.shape) + + def test_variable_count_depth_wise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_prediction_head(image_feature) + self.assertEqual(len(box_prediction_head.variables), 3) + + def test_variable_count_depth_wise_False(self): + conv_hyperparams = self._build_conv_hyperparams() + box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead( + box_code_size=4, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + box_prediction_head(image_feature) + self.assertEqual(len(box_prediction_head.variables), 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head.py new file mode 100644 index 0000000000000000000000000000000000000000..988ebb2ee720f5db137ade0aef9919a942a57a5b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head.py @@ -0,0 +1,351 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Class Head. + +Contains Class prediction head classes for different meta architectures. +All the class prediction heads have a predict function that receives the +`features` as the first argument and returns class predictions with background. +""" +import tensorflow.compat.v1 as tf + +from object_detection.predictors.heads import head + + +class ConvolutionalClassHead(head.KerasHead): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_class_slots, + use_dropout, + dropout_keep_prob, + kernel_size, + num_predictions_per_location, + conv_hyperparams, + freeze_batchnorm, + class_prediction_bias_init=0.0, + use_depthwise=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(ConvolutionalClassHead, self).__init__(name=name) + self._is_training = is_training + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._class_prediction_bias_init = class_prediction_bias_init + self._use_depthwise = use_depthwise + self._num_class_slots = num_class_slots + + self._class_predictor_layers = [] + + if self._use_dropout: + self._class_predictor_layers.append( + # The Dropout layer's `training` parameter for the call method must + # be set implicitly by the Keras set_learning_phase. The object + # detection training code takes care of this. + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + if self._use_depthwise: + self._class_predictor_layers.append( + tf.keras.layers.DepthwiseConv2D( + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + dilation_rate=1, + name='ClassPredictor_depthwise', + **conv_hyperparams.params())) + self._class_predictor_layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name='ClassPredictor_depthwise_batchnorm')) + self._class_predictor_layers.append( + conv_hyperparams.build_activation_layer( + name='ClassPredictor_depthwise_activation')) + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._num_class_slots, [1, 1], + name='ClassPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='ClassPredictor', + bias_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals. + """ + class_predictions_with_background = features + for layer in self._class_predictor_layers: + class_predictions_with_background = layer( + class_predictions_with_background) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background + + +class MaskRCNNClassHead(head.KerasHead): + """Mask RCNN class prediction head. + + This is a piece of Mask RCNN which is responsible for predicting + just the class scores of boxes. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_class_slots, + fc_hyperparams, + freeze_batchnorm, + use_dropout, + dropout_keep_prob, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for fully connected dense ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + name: A string name scope to assign to the class head. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNClassHead, self).__init__(name=name) + self._is_training = is_training + self._freeze_batchnorm = freeze_batchnorm + self._num_class_slots = num_class_slots + self._fc_hyperparams = fc_hyperparams + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + + self._class_predictor_layers = [tf.keras.layers.Flatten()] + + if self._use_dropout: + self._class_predictor_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + + self._class_predictor_layers.append( + tf.keras.layers.Dense(self._num_class_slots, + name='ClassPredictor_dense')) + self._class_predictor_layers.append( + fc_hyperparams.build_batch_norm(training=(is_training and + not freeze_batchnorm), + name='ClassPredictor_batchnorm')) + + def _predict(self, features): + """Predicts the class scores for boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, 1, num_class_slots] representing the class predictions for + the proposals. + """ + spatial_averaged_roi_pooled_features = tf.reduce_mean( + features, [1, 2], keep_dims=True, name='AvgPool') + net = spatial_averaged_roi_pooled_features + for layer in self._class_predictor_layers: + net = layer(net) + class_predictions_with_background = tf.reshape( + net, + [-1, 1, self._num_class_slots]) + return class_predictions_with_background + + +class WeightSharedConvolutionalClassHead(head.KerasHead): + """Weight shared convolutional class prediction head. + + This head allows sharing the same set of parameters (weights) when called more + then once on different feature maps. + """ + + def __init__(self, + num_class_slots, + num_predictions_per_location, + conv_hyperparams, + kernel_size=3, + class_prediction_bias_init=0.0, + use_dropout=False, + dropout_keep_prob=0.8, + use_depthwise=False, + score_converter_fn=tf.identity, + return_flat_predictions=True, + name=None): + """Constructor. + + Args: + num_class_slots: number of class slots. Note that num_class_slots may or + may not include an implicit background category. + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + kernel_size: Size of final convolution kernel. + class_prediction_bias_init: constant value to initialize bias of the last + conv2d layer before class prediction. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + score_converter_fn: Callable elementwise nonlinearity (that takes tensors + as inputs and returns tensors). + return_flat_predictions: If true, returns flattened prediction tensor + of shape [batch, height * width * num_predictions_per_location, + box_coder]. Otherwise returns the prediction tensor before reshaping, + whose shape is [batch, height, width, num_predictions_per_location * + num_class_slots]. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if use_depthwise is True and kernel_size is 1. + """ + if use_depthwise and (kernel_size == 1): + raise ValueError('Should not use 1x1 kernel when using depthwise conv') + + super(WeightSharedConvolutionalClassHead, self).__init__(name=name) + self._num_class_slots = num_class_slots + self._kernel_size = kernel_size + self._class_prediction_bias_init = class_prediction_bias_init + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._use_depthwise = use_depthwise + self._score_converter_fn = score_converter_fn + self._return_flat_predictions = return_flat_predictions + + self._class_predictor_layers = [] + + if self._use_dropout: + self._class_predictor_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + if self._use_depthwise: + self._class_predictor_layers.append( + tf.keras.layers.SeparableConv2D( + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + name='ClassPredictor', + bias_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + **conv_hyperparams.params(use_bias=True))) + else: + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * self._num_class_slots, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='ClassPredictor', + bias_initializer=tf.constant_initializer( + self._class_prediction_bias_init), + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + class_predictions_with_background: A float tensor of shape + [batch_size, num_anchors, num_class_slots] representing the class + predictions for the proposals. + """ + class_predictions_with_background = features + for layer in self._class_predictor_layers: + class_predictions_with_background = layer( + class_predictions_with_background) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + class_predictions_with_background = self._score_converter_fn( + class_predictions_with_background) + if self._return_flat_predictions: + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size, -1, self._num_class_slots]) + return class_predictions_with_background diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f9cda0c18e5325bf4b2b909ed69b140601ed0f5 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..aa890ce522defb6ec4c97965846e8f20529bc24b --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_class_head_tf2_test.py @@ -0,0 +1,203 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.class_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keras_class_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalKerasClassPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature,) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.ConvolutionalClassHead( + is_training=True, + num_class_slots=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature,) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNClassHeadTest(test_case.TestCase): + + def _build_fc_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_prediction_size(self): + class_prediction_head = keras_class_head.MaskRCNNClassHead( + is_training=False, + num_class_slots=20, + fc_hyperparams=self._build_fc_hyperparams(), + freeze_batchnorm=False, + use_dropout=True, + dropout_keep_prob=0.5) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = class_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 20], prediction.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + def test_prediction_size_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_predictions = class_prediction_head(image_feature) + return class_predictions + class_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20], class_predictions.shape) + + def test_variable_count_depth_wise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = ( + keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=True)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head(image_feature) + self.assertEqual(len(class_prediction_head.variables), 3) + + def test_variable_count_depth_wise_False(self): + conv_hyperparams = self._build_conv_hyperparams() + class_prediction_head = ( + keras_class_head.WeightSharedConvolutionalClassHead( + num_class_slots=20, + conv_hyperparams=conv_hyperparams, + num_predictions_per_location=1, + use_depthwise=False)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + class_prediction_head(image_feature) + self.assertEqual(len(class_prediction_head.variables), 2) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..3b65cc4b6588457908cfcb4c97efb8a1e5313096 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head.py @@ -0,0 +1,447 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keras Mask Heads. + +Contains Mask prediction head classes for different meta architectures. +All the mask prediction heads have a predict function that receives the +`features` as the first argument and returns `mask_predictions`. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from six.moves import range +import tensorflow.compat.v1 as tf + +from object_detection.predictors.heads import head +from object_detection.utils import ops +from object_detection.utils import shape_utils + + +class ConvolutionalMaskHead(head.KerasHead): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_classes, + use_dropout, + dropout_keep_prob, + kernel_size, + num_predictions_per_location, + conv_hyperparams, + freeze_batchnorm, + use_depthwise=False, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: Number of classes. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Bool. Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalMaskHead, self).__init__(name=name) + self._is_training = is_training + self._num_classes = num_classes + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._num_predictions_per_location = num_predictions_per_location + self._use_depthwise = use_depthwise + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + self._mask_predictor_layers = [] + + # Add a slot for the background class. + if self._masks_are_class_agnostic: + self._num_masks = 1 + else: + self._num_masks = self._num_classes + + num_mask_channels = self._num_masks * self._mask_height * self._mask_width + + if self._use_dropout: + self._mask_predictor_layers.append( + # The Dropout layer's `training` parameter for the call method must + # be set implicitly by the Keras set_learning_phase. The object + # detection training code takes care of this. + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + if self._use_depthwise: + self._mask_predictor_layers.append( + tf.keras.layers.DepthwiseConv2D( + [self._kernel_size, self._kernel_size], + padding='SAME', + depth_multiplier=1, + strides=1, + dilation_rate=1, + name='MaskPredictor_depthwise', + **conv_hyperparams.params())) + self._mask_predictor_layers.append( + conv_hyperparams.build_batch_norm( + training=(is_training and not freeze_batchnorm), + name='MaskPredictor_depthwise_batchnorm')) + self._mask_predictor_layers.append( + conv_hyperparams.build_activation_layer( + name='MaskPredictor_depthwise_activation')) + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * num_mask_channels, [1, 1], + name='MaskPredictor', + **conv_hyperparams.params(use_bias=True))) + else: + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='MaskPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + mask_predictions: A float tensors of shape + [batch_size, num_anchors, num_masks, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + mask_predictions = features + for layer in self._mask_predictor_layers: + mask_predictions = layer(mask_predictions) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) + return mask_predictions + + +class MaskRCNNMaskHead(head.KerasHead): + """Mask RCNN mask prediction head. + + This is a piece of Mask RCNN which is responsible for predicting + just the pixelwise foreground scores for regions within the boxes. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + is_training, + num_classes, + freeze_batchnorm, + conv_hyperparams, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample=False, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the Mask head is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample: Whether to apply convolutions on mask features + before upsampling using nearest neighbor resizing. Otherwise, mask + features are resized to [`mask_height`, `mask_width`] using bilinear + resizing before applying convolutions. + name: A string name scope to assign to the mask head. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNMaskHead, self).__init__(name=name) + self._is_training = is_training + self._freeze_batchnorm = freeze_batchnorm + self._num_classes = num_classes + self._conv_hyperparams = conv_hyperparams + self._mask_height = mask_height + self._mask_width = mask_width + self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers + self._mask_prediction_conv_depth = mask_prediction_conv_depth + self._masks_are_class_agnostic = masks_are_class_agnostic + self._convolve_then_upsample = convolve_then_upsample + + self._mask_predictor_layers = [] + + def build(self, input_shapes): + num_conv_channels = self._mask_prediction_conv_depth + if num_conv_channels == 0: + num_feature_channels = input_shapes.as_list()[3] + num_conv_channels = self._get_mask_predictor_conv_depth( + num_feature_channels, self._num_classes) + + for i in range(self._mask_prediction_num_conv_layers - 1): + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_conv_channels, + [3, 3], + padding='SAME', + name='MaskPredictor_conv2d_{}'.format(i), + **self._conv_hyperparams.params())) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='MaskPredictor_batchnorm_{}'.format(i))) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_activation_layer( + name='MaskPredictor_activation_{}'.format(i))) + + if self._convolve_then_upsample: + # Replace Transposed Convolution with a Nearest Neighbor upsampling step + # followed by 3x3 convolution. + height_scale = self._mask_height // shape_utils.get_dim_as_int( + input_shapes[1]) + width_scale = self._mask_width // shape_utils.get_dim_as_int( + input_shapes[2]) + # pylint: disable=g-long-lambda + self._mask_predictor_layers.append(tf.keras.layers.Lambda( + lambda features: ops.nearest_neighbor_upsampling( + features, height_scale=height_scale, width_scale=width_scale) + )) + # pylint: enable=g-long-lambda + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_conv_channels, + [3, 3], + padding='SAME', + name='MaskPredictor_upsample_conv2d', + **self._conv_hyperparams.params())) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='MaskPredictor_upsample_batchnorm')) + self._mask_predictor_layers.append( + self._conv_hyperparams.build_activation_layer( + name='MaskPredictor_upsample_activation')) + + num_masks = 1 if self._masks_are_class_agnostic else self._num_classes + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_masks, + [3, 3], + padding='SAME', + name='MaskPredictor_last_conv2d', + **self._conv_hyperparams.params(use_bias=True))) + + self.built = True + + def _get_mask_predictor_conv_depth(self, + num_feature_channels, + num_classes, + class_weight=3.0, + feature_weight=2.0): + """Computes the depth of the mask predictor convolutions. + + Computes the depth of the mask predictor convolutions given feature channels + and number of classes by performing a weighted average of the two in + log space to compute the number of convolution channels. The weights that + are used for computing the weighted average do not need to sum to 1. + + Args: + num_feature_channels: An integer containing the number of feature + channels. + num_classes: An integer containing the number of classes. + class_weight: Class weight used in computing the weighted average. + feature_weight: Feature weight used in computing the weighted average. + + Returns: + An integer containing the number of convolution channels used by mask + predictor. + """ + num_feature_channels_log = math.log(float(num_feature_channels), 2.0) + num_classes_log = math.log(float(num_classes), 2.0) + weighted_num_feature_channels_log = ( + num_feature_channels_log * feature_weight) + weighted_num_classes_log = num_classes_log * class_weight + total_weight = feature_weight + class_weight + num_conv_channels_log = round( + (weighted_num_feature_channels_log + weighted_num_classes_log) / + total_weight) + return int(math.pow(2.0, num_conv_channels_log)) + + def _predict(self, features): + """Predicts pixelwise foreground scores for regions within the boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + + Returns: + instance_masks: A float tensor of shape + [batch_size, 1, num_classes, mask_height, mask_width]. + """ + if not self._convolve_then_upsample: + features = tf.image.resize_bilinear( + features, [self._mask_height, self._mask_width], + align_corners=True) + + mask_predictions = features + for layer in self._mask_predictor_layers: + mask_predictions = layer(mask_predictions) + return tf.expand_dims( + tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), + axis=1, + name='MaskPredictor') + + +class WeightSharedConvolutionalMaskHead(head.KerasHead): + """Weight shared convolutional mask prediction head based on Keras.""" + + def __init__(self, + num_classes, + num_predictions_per_location, + conv_hyperparams, + kernel_size=3, + use_dropout=False, + dropout_keep_prob=0.8, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False, + name=None): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + num_predictions_per_location: Number of box predictions to be made per + spatial location. Int specifying number of boxes per location. + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + kernel_size: Size of final convolution kernel. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(WeightSharedConvolutionalMaskHead, self).__init__(name=name) + self._num_classes = num_classes + self._num_predictions_per_location = num_predictions_per_location + self._kernel_size = kernel_size + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + self._mask_predictor_layers = [] + + if self._masks_are_class_agnostic: + self._num_masks = 1 + else: + self._num_masks = self._num_classes + num_mask_channels = self._num_masks * self._mask_height * self._mask_width + + if self._use_dropout: + self._mask_predictor_layers.append( + tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) + self._mask_predictor_layers.append( + tf.keras.layers.Conv2D( + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + padding='SAME', + name='MaskPredictor', + **conv_hyperparams.params(use_bias=True))) + + def _predict(self, features): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + + Returns: + mask_predictions: A tensor of shape + [batch_size, num_anchors, num_classes, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + mask_predictions = features + for layer in self._mask_predictor_layers: + mask_predictions = layer(mask_predictions) + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) + return mask_predictions diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e219f6183dd768c1edb1c9ac594b282ebee17bf Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5465be06fe1fe5150c8c4c3583bfcd3be5c5d079 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keras_mask_head_tf2_test.py @@ -0,0 +1,252 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.mask_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keras_mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class ConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size_use_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False, + mask_height=7, + mask_width=7) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) + + def test_prediction_size_use_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True, + mask_height=7, + mask_width=7) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) + + def test_class_agnostic_prediction_size_use_depthwise_false(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=False, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) + + def test_class_agnostic_prediction_size_use_depthwise_true(self): + conv_hyperparams = self._build_conv_hyperparams() + mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + conv_hyperparams=conv_hyperparams, + freeze_batchnorm=False, + num_predictions_per_location=1, + use_depthwise=True, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNMaskHeadTest(test_case.TestCase): + + def _build_conv_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_prediction_size(self): + mask_prediction_head = keras_mask_head.MaskRCNNMaskHead( + is_training=True, + num_classes=20, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 20, 14, 14], prediction.shape) + + def test_prediction_size_with_convolve_then_upsample(self): + mask_prediction_head = keras_mask_head.MaskRCNNMaskHead( + is_training=True, + num_classes=20, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + mask_height=28, + mask_width=28, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=True, + convolve_then_upsample=True) + def graph_fn(): + roi_pooled_features = tf.random_uniform( + [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head(roi_pooled_features) + return prediction + prediction = self.execute(graph_fn, []) + self.assertAllEqual([64, 1, 1, 28, 28], prediction.shape) + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_prediction_size(self): + mask_prediction_head = ( + keras_mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + num_predictions_per_location=1, + conv_hyperparams=self._build_conv_hyperparams(), + mask_height=7, + mask_width=7)) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.shape) + + def test_class_agnostic_prediction_size(self): + mask_prediction_head = ( + keras_mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + num_predictions_per_location=1, + conv_hyperparams=self._build_conv_hyperparams(), + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True)) + def graph_fn(): + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head(image_feature) + return mask_predictions + mask_predictions = self.execute(graph_fn, []) + self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.shape) + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keypoint_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keypoint_head.py new file mode 100644 index 0000000000000000000000000000000000000000..79a4d4bef3a3877b8b1ed96f6c36b9287dad3f37 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keypoint_head.py @@ -0,0 +1,115 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keypoint Head. + +Contains Keypoint prediction head classes for different meta architectures. +All the keypoint prediction heads have a predict function that receives the +`features` as the first argument and returns `keypoint_predictions`. +Keypoints could be used to represent the human body joint locations as in +Mask RCNN paper. Or they could be used to represent different part locations of +objects. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head + + +class MaskRCNNKeypointHead(head.Head): + """Mask RCNN keypoint prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + num_keypoints=17, + conv_hyperparams_fn=None, + keypoint_heatmap_height=56, + keypoint_heatmap_width=56, + keypoint_prediction_num_conv_layers=8, + keypoint_prediction_conv_depth=512): + """Constructor. + + Args: + num_keypoints: (int scalar) number of keypoints. + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + keypoint_heatmap_height: Desired output mask height. The default value + is 14. + keypoint_heatmap_width: Desired output mask width. The default value + is 14. + keypoint_prediction_num_conv_layers: Number of convolution layers applied + to the image_features in mask prediction branch. + keypoint_prediction_conv_depth: The depth for the first conv2d_transpose + op applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + """ + super(MaskRCNNKeypointHead, self).__init__() + self._num_keypoints = num_keypoints + self._conv_hyperparams_fn = conv_hyperparams_fn + self._keypoint_heatmap_height = keypoint_heatmap_height + self._keypoint_heatmap_width = keypoint_heatmap_width + self._keypoint_prediction_num_conv_layers = ( + keypoint_prediction_num_conv_layers) + self._keypoint_prediction_conv_depth = keypoint_prediction_conv_depth + + def predict(self, features, num_predictions_per_location=1): + """Performs keypoint prediction. + + Args: + features: A float tensor of shape [batch_size, height, width, + channels] containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + instance_masks: A float tensor of shape + [batch_size, 1, num_keypoints, heatmap_height, heatmap_width]. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + with slim.arg_scope(self._conv_hyperparams_fn()): + net = slim.conv2d( + features, + self._keypoint_prediction_conv_depth, [3, 3], + scope='conv_1') + for i in range(1, self._keypoint_prediction_num_conv_layers): + net = slim.conv2d( + net, + self._keypoint_prediction_conv_depth, [3, 3], + scope='conv_%d' % (i + 1)) + net = slim.conv2d_transpose( + net, self._num_keypoints, [2, 2], scope='deconv1') + heatmaps_mask = tf.image.resize_bilinear( + net, [self._keypoint_heatmap_height, self._keypoint_heatmap_width], + align_corners=True, + name='upsample') + return tf.expand_dims( + tf.transpose(heatmaps_mask, perm=[0, 3, 1, 2]), + axis=1, + name='KeypointPredictor') diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keypoint_head_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keypoint_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..828174989133fd2ec6552ad848985719bdae35a5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/keypoint_head_tf1_test.py @@ -0,0 +1,60 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.keypoint_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import keypoint_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNKeypointHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + keypoint_prediction_head = keypoint_head.MaskRCNNKeypointHead( + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams()) + roi_pooled_features = tf.random_uniform( + [64, 14, 14, 1024], minval=-2.0, maxval=2.0, dtype=tf.float32) + prediction = keypoint_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 17, 56, 56], prediction.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ca0a694f531806982bda0f46c407babca36adeed --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head.py @@ -0,0 +1,360 @@ +# Lint as: python2, python3 +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mask Head. + +Contains Mask prediction head classes for different meta architectures. +All the mask prediction heads have a predict function that receives the +`features` as the first argument and returns `mask_predictions`. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from six.moves import range +import tensorflow.compat.v1 as tf +import tf_slim as slim + +from object_detection.predictors.heads import head +from object_detection.utils import ops + + +class MaskRCNNMaskHead(head.Head): + """Mask RCNN mask prediction head. + + Please refer to Mask RCNN paper: + https://arxiv.org/abs/1703.06870 + """ + + def __init__(self, + num_classes, + conv_hyperparams_fn=None, + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False, + convolve_then_upsample=False): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to generate tf-slim arg_scope with + hyperparameters for convolution ops. + mask_height: Desired output mask height. The default value is 14. + mask_width: Desired output mask width. The default value is 14. + mask_prediction_num_conv_layers: Number of convolution layers applied to + the image_features in mask prediction branch. + mask_prediction_conv_depth: The depth for the first conv2d_transpose op + applied to the image_features in the mask prediction branch. If set + to 0, the depth of the convolution layers will be automatically chosen + based on the number of object classes and the number of channels in the + image features. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + convolve_then_upsample: Whether to apply convolutions on mask features + before upsampling using nearest neighbor resizing. Otherwise, mask + features are resized to [`mask_height`, `mask_width`] using bilinear + resizing before applying convolutions. + + Raises: + ValueError: conv_hyperparams_fn is None. + """ + super(MaskRCNNMaskHead, self).__init__() + self._num_classes = num_classes + self._conv_hyperparams_fn = conv_hyperparams_fn + self._mask_height = mask_height + self._mask_width = mask_width + self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers + self._mask_prediction_conv_depth = mask_prediction_conv_depth + self._masks_are_class_agnostic = masks_are_class_agnostic + self._convolve_then_upsample = convolve_then_upsample + if conv_hyperparams_fn is None: + raise ValueError('conv_hyperparams_fn is None.') + + def _get_mask_predictor_conv_depth(self, + num_feature_channels, + num_classes, + class_weight=3.0, + feature_weight=2.0): + """Computes the depth of the mask predictor convolutions. + + Computes the depth of the mask predictor convolutions given feature channels + and number of classes by performing a weighted average of the two in + log space to compute the number of convolution channels. The weights that + are used for computing the weighted average do not need to sum to 1. + + Args: + num_feature_channels: An integer containing the number of feature + channels. + num_classes: An integer containing the number of classes. + class_weight: Class weight used in computing the weighted average. + feature_weight: Feature weight used in computing the weighted average. + + Returns: + An integer containing the number of convolution channels used by mask + predictor. + """ + num_feature_channels_log = math.log(float(num_feature_channels), 2.0) + num_classes_log = math.log(float(num_classes), 2.0) + weighted_num_feature_channels_log = ( + num_feature_channels_log * feature_weight) + weighted_num_classes_log = num_classes_log * class_weight + total_weight = feature_weight + class_weight + num_conv_channels_log = round( + (weighted_num_feature_channels_log + weighted_num_classes_log) / + total_weight) + return int(math.pow(2.0, num_conv_channels_log)) + + def predict(self, features, num_predictions_per_location=1): + """Performs mask prediction. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing features for a batch of images. + num_predictions_per_location: Int containing number of predictions per + location. + + Returns: + instance_masks: A float tensor of shape + [batch_size, 1, num_classes, mask_height, mask_width]. + + Raises: + ValueError: If num_predictions_per_location is not 1. + """ + if num_predictions_per_location != 1: + raise ValueError('Only num_predictions_per_location=1 is supported') + num_conv_channels = self._mask_prediction_conv_depth + if num_conv_channels == 0: + num_feature_channels = features.get_shape().as_list()[3] + num_conv_channels = self._get_mask_predictor_conv_depth( + num_feature_channels, self._num_classes) + with slim.arg_scope(self._conv_hyperparams_fn()): + if not self._convolve_then_upsample: + features = tf.image.resize_bilinear( + features, [self._mask_height, self._mask_width], + align_corners=True) + for _ in range(self._mask_prediction_num_conv_layers - 1): + features = slim.conv2d( + features, + num_outputs=num_conv_channels, + kernel_size=[3, 3]) + if self._convolve_then_upsample: + # Replace Transposed Convolution with a Nearest Neighbor upsampling step + # followed by 3x3 convolution. + height_scale = self._mask_height // features.shape[1].value + width_scale = self._mask_width // features.shape[2].value + features = ops.nearest_neighbor_upsampling( + features, height_scale=height_scale, width_scale=width_scale) + features = slim.conv2d( + features, + num_outputs=num_conv_channels, + kernel_size=[3, 3]) + + num_masks = 1 if self._masks_are_class_agnostic else self._num_classes + mask_predictions = slim.conv2d( + features, + num_outputs=num_masks, + activation_fn=None, + normalizer_fn=None, + kernel_size=[3, 3]) + return tf.expand_dims( + tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), + axis=1, + name='MaskPredictor') + + +class ConvolutionalMaskHead(head.Head): + """Convolutional class prediction head.""" + + def __init__(self, + is_training, + num_classes, + use_dropout, + dropout_keep_prob, + kernel_size, + use_depthwise=False, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: Number of classes. + use_dropout: Option to use dropout or not. Note that a single dropout + op is applied here prior to both box and class predictions, which stands + in contrast to the ConvolutionalBoxPredictor below. + dropout_keep_prob: Keep probability for dropout. + This is only used if use_dropout is True. + kernel_size: Size of final convolution kernel. If the + spatial resolution of the feature map is smaller than the kernel size, + then the kernel size is automatically set to be + min(feature_width, feature_height). + use_depthwise: Whether to use depthwise convolutions for prediction + steps. Default is False. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + + Raises: + ValueError: if min_depth > max_depth. + """ + super(ConvolutionalMaskHead, self).__init__() + self._is_training = is_training + self._num_classes = num_classes + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._kernel_size = kernel_size + self._use_depthwise = use_depthwise + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + mask_predictions: A float tensors of shape + [batch_size, num_anchors, num_masks, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + image_feature = features + # Add a slot for the background class. + if self._masks_are_class_agnostic: + num_masks = 1 + else: + num_masks = self._num_classes + num_mask_channels = num_masks * self._mask_height * self._mask_width + net = image_feature + if self._use_dropout: + net = slim.dropout(net, keep_prob=self._dropout_keep_prob) + if self._use_depthwise: + mask_predictions = slim.separable_conv2d( + net, None, [self._kernel_size, self._kernel_size], + padding='SAME', depth_multiplier=1, stride=1, + rate=1, scope='MaskPredictor_depthwise') + mask_predictions = slim.conv2d( + mask_predictions, + num_predictions_per_location * num_mask_channels, + [1, 1], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='MaskPredictor') + else: + mask_predictions = slim.conv2d( + net, + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + activation_fn=None, + normalizer_fn=None, + normalizer_params=None, + scope='MaskPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, num_masks, self._mask_height, self._mask_width]) + return mask_predictions + + +# TODO(alirezafathi): See if possible to unify Weight Shared with regular +# convolutional mask head. +class WeightSharedConvolutionalMaskHead(head.Head): + """Weight shared convolutional mask prediction head.""" + + def __init__(self, + num_classes, + kernel_size=3, + use_dropout=False, + dropout_keep_prob=0.8, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=False): + """Constructor. + + Args: + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + kernel_size: Size of final convolution kernel. + use_dropout: Whether to apply dropout to class prediction head. + dropout_keep_prob: Probability of keeping activiations. + mask_height: Desired output mask height. The default value is 7. + mask_width: Desired output mask width. The default value is 7. + masks_are_class_agnostic: Boolean determining if the mask-head is + class-agnostic or not. + """ + super(WeightSharedConvolutionalMaskHead, self).__init__() + self._num_classes = num_classes + self._kernel_size = kernel_size + self._use_dropout = use_dropout + self._dropout_keep_prob = dropout_keep_prob + self._mask_height = mask_height + self._mask_width = mask_width + self._masks_are_class_agnostic = masks_are_class_agnostic + + def predict(self, features, num_predictions_per_location): + """Predicts boxes. + + Args: + features: A float tensor of shape [batch_size, height, width, channels] + containing image features. + num_predictions_per_location: Number of box predictions to be made per + spatial location. + + Returns: + mask_predictions: A tensor of shape + [batch_size, num_anchors, num_classes, mask_height, mask_width] + representing the mask predictions for the proposals. + """ + mask_predictions_net = features + if self._masks_are_class_agnostic: + num_masks = 1 + else: + num_masks = self._num_classes + num_mask_channels = num_masks * self._mask_height * self._mask_width + if self._use_dropout: + mask_predictions_net = slim.dropout( + mask_predictions_net, keep_prob=self._dropout_keep_prob) + mask_predictions = slim.conv2d( + mask_predictions_net, + num_predictions_per_location * num_mask_channels, + [self._kernel_size, self._kernel_size], + activation_fn=None, stride=1, padding='SAME', + normalizer_fn=None, + scope='MaskPredictor') + batch_size = features.get_shape().as_list()[0] + if batch_size is None: + batch_size = tf.shape(features)[0] + mask_predictions = tf.reshape( + mask_predictions, + [batch_size, -1, num_masks, self._mask_height, self._mask_width]) + return mask_predictions diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75f672416ac104eea9ad306de99f939d88e5c5c0 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..152394836135abeaa68f32dd48275a89347d4059 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/heads/mask_head_tf1_test.py @@ -0,0 +1,190 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.heads.mask_head.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors.heads import mask_head +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNMaskHeadTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + mask_prediction_head = mask_head.MaskRCNNMaskHead( + num_classes=20, + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + mask_height=14, + mask_width=14, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=False) + roi_pooled_features = tf.random_uniform( + [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 20, 14, 14], prediction.get_shape().as_list()) + + def test_prediction_size_with_convolve_then_upsample(self): + mask_prediction_head = mask_head.MaskRCNNMaskHead( + num_classes=20, + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + mask_height=28, + mask_width=28, + mask_prediction_num_conv_layers=2, + mask_prediction_conv_depth=256, + masks_are_class_agnostic=True, + convolve_then_upsample=True) + roi_pooled_features = tf.random_uniform( + [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + prediction = mask_prediction_head.predict( + features=roi_pooled_features, num_predictions_per_location=1) + self.assertAllEqual([64, 1, 1, 28, 28], prediction.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class ConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + mask_prediction_head = mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + mask_height=7, + mask_width=7) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20, 7, 7], + mask_predictions.get_shape().as_list()) + + def test_class_agnostic_prediction_size(self): + mask_prediction_head = mask_head.ConvolutionalMaskHead( + is_training=True, + num_classes=20, + use_dropout=True, + dropout_keep_prob=0.5, + kernel_size=3, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 1, 7, 7], + mask_predictions.get_shape().as_list()) + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams( + self, op_type=hyperparams_pb2.Hyperparams.CONV): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_prediction_size(self): + mask_prediction_head = ( + mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + mask_height=7, + mask_width=7)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 20, 7, 7], + mask_predictions.get_shape().as_list()) + + def test_class_agnostic_prediction_size(self): + mask_prediction_head = ( + mask_head.WeightSharedConvolutionalMaskHead( + num_classes=20, + mask_height=7, + mask_width=7, + masks_are_class_agnostic=True)) + image_feature = tf.random_uniform( + [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) + mask_predictions = mask_prediction_head.predict( + features=image_feature, + num_predictions_per_location=1) + self.assertAllEqual([64, 323, 1, 7, 7], + mask_predictions.get_shape().as_list()) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..26ff65daabdc8c140a42c8a0f6ed4d3cf42752c5 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor.py @@ -0,0 +1,141 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mask R-CNN Box Predictor.""" +from object_detection.core import box_predictor + + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class MaskRCNNBoxPredictor(box_predictor.BoxPredictor): + """Mask R-CNN Box Predictor. + + See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). + Mask R-CNN. arXiv preprint arXiv:1703.06870. + + This is used for the second stage of the Mask R-CNN detector where proposals + cropped from an image are arranged along the batch dimension of the input + image_features tensor. Notice that locations are *not* shared across classes, + thus for each anchor, a separate prediction is made for each class. + + In addition to predicting boxes and classes, optionally this class allows + predicting masks and/or keypoints inside detection boxes. + + Currently this box predictor makes per-class predictions; that is, each + anchor makes a separate box prediction for each class. + """ + + def __init__(self, + is_training, + num_classes, + box_prediction_head, + class_prediction_head, + third_stage_heads): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + box_prediction_head: The head that predicts the boxes in second stage. + class_prediction_head: The head that predicts the classes in second stage. + third_stage_heads: A dictionary mapping head names to mask rcnn head + classes. + """ + super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._third_stage_heads = third_stage_heads + + @property + def num_classes(self): + return self._num_classes + + def get_second_stage_prediction_heads(self): + return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND + + def get_third_stage_prediction_heads(self): + return sorted(self._third_stage_heads.keys()) + + def _predict(self, + image_features, + num_predictions_per_location, + prediction_stage=2): + """Optionally computes encoded object locations, confidences, and masks. + + Predicts the heads belonging to the given prediction stage. + + Args: + image_features: A list of float tensors of shape + [batch_size, height_i, width_i, channels_i] containing roi pooled + features for each image. The length of the list should be 1 otherwise + a ValueError will be raised. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + Currently, this must be set to [1], or an error will be raised. + prediction_stage: Prediction stage. Acceptable values are 2 and 3. + + Returns: + A dictionary containing the predicted tensors that are listed in + self._prediction_heads. A subset of the following keys will exist in the + dictionary: + BOX_ENCODINGS: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the + location of the objects. + CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape + [batch_size, 1, num_classes + 1] representing the class + predictions for the proposals. + MASK_PREDICTIONS: A float tensor of shape + [batch_size, 1, num_classes, image_height, image_width] + + Raises: + ValueError: If num_predictions_per_location is not 1 or if + len(image_features) is not 1. + ValueError: if prediction_stage is not 2 or 3. + """ + if (len(num_predictions_per_location) != 1 or + num_predictions_per_location[0] != 1): + raise ValueError('Currently FullyConnectedBoxPredictor only supports ' + 'predicting a single box per class per location.') + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'.format( + len(image_features))) + image_feature = image_features[0] + predictions_dict = {} + + if prediction_stage == 2: + predictions_dict[BOX_ENCODINGS] = self._box_prediction_head.predict( + features=image_feature, + num_predictions_per_location=num_predictions_per_location[0]) + predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( + self._class_prediction_head.predict( + features=image_feature, + num_predictions_per_location=num_predictions_per_location[0])) + elif prediction_stage == 3: + for prediction_head in self.get_third_stage_prediction_heads(): + head_object = self._third_stage_heads[prediction_head] + predictions_dict[prediction_head] = head_object.predict( + features=image_feature, + num_predictions_per_location=num_predictions_per_location[0]) + else: + raise ValueError('prediction_stage should be either 2 or 3.') + + return predictions_dict diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..786a3a88e6a14797e6cabafc11cf54bccf36fe74 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a4bcbbf004dedc670956baf05615358e33e1a1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_box_predictor_tf1_test.py @@ -0,0 +1,154 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.mask_rcnn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import mask_rcnn_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class MaskRCNNBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.build(hyperparams, is_training=True) + + def test_get_boxes_with_five_classes(self): + def graph_fn(image_features): + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + ) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_get_boxes_with_five_classes_share_box_across_classes(self): + def graph_fn(image_features): + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=True + ) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self): + with self.assertRaises(ValueError): + box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + predict_instance_masks=True) + + def test_get_instance_masks(self): + def graph_fn(image_features): + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + conv_hyperparams_fn=self._build_arg_scope_with_hyperparams( + op_type=hyperparams_pb2.Hyperparams.CONV), + predict_instance_masks=True) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=3) + return (box_predictions[box_predictor.MASK_PREDICTIONS],) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + mask_predictions = self.execute(graph_fn, [image_features]) + self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) + + def test_do_not_return_instance_masks_without_request(self): + image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) + mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4) + box_predictions = mask_box_predictor.predict( + [image_features], + num_predictions_per_location=[1], + scope='BoxPredictor', + prediction_stage=2) + self.assertEqual(len(box_predictions), 2) + self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) + self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND + in box_predictions) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..baca02edda0e21dbbc070e8c3800f898875aafe1 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor.py @@ -0,0 +1,139 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Mask R-CNN Box Predictor.""" +from object_detection.core import box_predictor + + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class MaskRCNNKerasBoxPredictor(box_predictor.KerasBoxPredictor): + """Mask R-CNN Box Predictor. + + See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). + Mask R-CNN. arXiv preprint arXiv:1703.06870. + + This is used for the second stage of the Mask R-CNN detector where proposals + cropped from an image are arranged along the batch dimension of the input + image_features tensor. Notice that locations are *not* shared across classes, + thus for each anchor, a separate prediction is made for each class. + + In addition to predicting boxes and classes, optionally this class allows + predicting masks and/or keypoints inside detection boxes. + + Currently this box predictor makes per-class predictions; that is, each + anchor makes a separate box prediction for each class. + """ + + def __init__(self, + is_training, + num_classes, + freeze_batchnorm, + box_prediction_head, + class_prediction_head, + third_stage_heads, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + box_prediction_head: The head that predicts the boxes in second stage. + class_prediction_head: The head that predicts the classes in second stage. + third_stage_heads: A dictionary mapping head names to mask rcnn head + classes. + name: A string name scope to assign to the model. If `None`, Keras + will auto-generate one from the class name. + """ + super(MaskRCNNKerasBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=False, name=name) + self._box_prediction_head = box_prediction_head + self._class_prediction_head = class_prediction_head + self._third_stage_heads = third_stage_heads + + @property + def num_classes(self): + return self._num_classes + + def get_second_stage_prediction_heads(self): + return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND + + def get_third_stage_prediction_heads(self): + return sorted(self._third_stage_heads.keys()) + + def _predict(self, + image_features, + prediction_stage=2, + **kwargs): + """Optionally computes encoded object locations, confidences, and masks. + + Predicts the heads belonging to the given prediction stage. + + Args: + image_features: A list of float tensors of shape + [batch_size, height_i, width_i, channels_i] containing roi pooled + features for each image. The length of the list should be 1 otherwise + a ValueError will be raised. + prediction_stage: Prediction stage. Acceptable values are 2 and 3. + **kwargs: Unused Keyword args + + Returns: + A dictionary containing the predicted tensors that are listed in + self._prediction_heads. A subset of the following keys will exist in the + dictionary: + BOX_ENCODINGS: A float tensor of shape + [batch_size, 1, num_classes, code_size] representing the + location of the objects. + CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape + [batch_size, 1, num_classes + 1] representing the class + predictions for the proposals. + MASK_PREDICTIONS: A float tensor of shape + [batch_size, 1, num_classes, image_height, image_width] + + Raises: + ValueError: If num_predictions_per_location is not 1 or if + len(image_features) is not 1. + ValueError: if prediction_stage is not 2 or 3. + """ + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'.format( + len(image_features))) + image_feature = image_features[0] + predictions_dict = {} + + if prediction_stage == 2: + predictions_dict[BOX_ENCODINGS] = self._box_prediction_head(image_feature) + predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( + self._class_prediction_head(image_feature)) + elif prediction_stage == 3: + for prediction_head in self.get_third_stage_prediction_heads(): + head_object = self._third_stage_heads[prediction_head] + predictions_dict[prediction_head] = head_object(image_feature) + else: + raise ValueError('prediction_stage should be either 2 or 3.') + + return predictions_dict diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99aa0be52769258b4191ed526f2a16d26895d659 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a92db9e90fb8299ff449bb614886a9a5542033c3 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/mask_rcnn_keras_box_predictor_tf2_test.py @@ -0,0 +1,144 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.mask_rcnn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import box_predictor_builder +from object_detection.builders import hyperparams_builder +from object_detection.predictors import mask_rcnn_keras_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class MaskRCNNKerasBoxPredictorTest(test_case.TestCase): + + def _build_hyperparams(self, + op_type=hyperparams_pb2.Hyperparams.FC): + hyperparams = hyperparams_pb2.Hyperparams() + hyperparams_text_proto = """ + activation: NONE + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(hyperparams_text_proto, hyperparams) + hyperparams.op = op_type + return hyperparams_builder.KerasLayerHyperparams(hyperparams) + + def test_get_boxes_with_five_classes(self): + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + )) + def graph_fn(image_features): + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_get_boxes_with_five_classes_share_box_across_classes(self): + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + share_box_across_classes=True + )) + def graph_fn(image_features): + + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=2) + return (box_predictions[box_predictor.BOX_ENCODINGS], + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + (box_encodings, + class_predictions_with_background) = self.execute(graph_fn, + [image_features]) + self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) + + def test_get_instance_masks(self): + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4, + conv_hyperparams=self._build_hyperparams( + op_type=hyperparams_pb2.Hyperparams.CONV), + predict_instance_masks=True)) + def graph_fn(image_features): + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=3) + return (box_predictions[box_predictor.MASK_PREDICTIONS],) + image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) + mask_predictions = self.execute(graph_fn, [image_features]) + self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) + + def test_do_not_return_instance_masks_without_request(self): + image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) + mask_box_predictor = ( + box_predictor_builder.build_mask_rcnn_keras_box_predictor( + is_training=False, + num_classes=5, + fc_hyperparams=self._build_hyperparams(), + freeze_batchnorm=False, + use_dropout=False, + dropout_keep_prob=0.5, + box_code_size=4)) + box_predictions = mask_box_predictor( + [image_features], + prediction_stage=2) + self.assertEqual(len(box_predictions), 2) + self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) + self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND + in box_predictions) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..c5cf7acbebde3225eea1a2b0631fda208784f43d --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor.py @@ -0,0 +1,159 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""RFCN Box Predictor.""" +import tensorflow.compat.v1 as tf +import tf_slim as slim +from object_detection.core import box_predictor +from object_detection.utils import ops + + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class RfcnBoxPredictor(box_predictor.BoxPredictor): + """RFCN Box Predictor. + + Applies a position sensitive ROI pooling on position sensitive feature maps to + predict classes and refined locations. See https://arxiv.org/abs/1605.06409 + for details. + + This is used for the second stage of the RFCN meta architecture. Notice that + locations are *not* shared across classes, thus for each anchor, a separate + prediction is made for each class. + """ + + def __init__(self, + is_training, + num_classes, + conv_hyperparams_fn, + num_spatial_bins, + depth, + crop_size, + box_code_size): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams_fn: A function to construct tf-slim arg_scope with + hyperparameters for convolutional layers. + num_spatial_bins: A list of two integers `[spatial_bins_y, + spatial_bins_x]`. + depth: Target depth to reduce the input feature maps to. + crop_size: A list of two integers `[crop_height, crop_width]`. + box_code_size: Size of encoding for each box. + """ + super(RfcnBoxPredictor, self).__init__(is_training, num_classes) + self._conv_hyperparams_fn = conv_hyperparams_fn + self._num_spatial_bins = num_spatial_bins + self._depth = depth + self._crop_size = crop_size + self._box_code_size = box_code_size + + @property + def num_classes(self): + return self._num_classes + + def _predict(self, image_features, num_predictions_per_location, + proposal_boxes): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + num_predictions_per_location: A list of integers representing the number + of box predictions to be made per spatial location for each feature map. + Currently, this must be set to [1], or an error will be raised. + proposal_boxes: A float tensor of shape [batch_size, num_proposals, + box_code_size]. + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + + Raises: + ValueError: if num_predictions_per_location is not 1 or if + len(image_features) is not 1. + """ + if (len(num_predictions_per_location) != 1 or + num_predictions_per_location[0] != 1): + raise ValueError('Currently RfcnBoxPredictor only supports ' + 'predicting a single box per class per location.') + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'. + format(len(image_features))) + image_feature = image_features[0] + num_predictions_per_location = num_predictions_per_location[0] + batch_size = tf.shape(proposal_boxes)[0] + num_boxes = tf.shape(proposal_boxes)[1] + net = image_feature + with slim.arg_scope(self._conv_hyperparams_fn()): + net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth') + # Location predictions. + location_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + self.num_classes * + self._box_code_size) + location_feature_map = slim.conv2d(net, location_feature_map_depth, + [1, 1], activation_fn=None, + scope='refined_locations') + box_encodings = ops.batch_position_sensitive_crop_regions( + location_feature_map, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True) + box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) + box_encodings = tf.reshape(box_encodings, + [batch_size * num_boxes, 1, self.num_classes, + self._box_code_size]) + + # Class predictions. + total_classes = self.num_classes + 1 # Account for background class. + class_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + total_classes) + class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], + activation_fn=None, + scope='class_predictions') + class_predictions_with_background = ( + ops.batch_position_sensitive_crop_regions( + class_feature_map, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True)) + class_predictions_with_background = tf.squeeze( + class_predictions_with_background, axis=[2, 3]) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size * num_boxes, 1, total_classes]) + + return {BOX_ENCODINGS: [box_encodings], + CLASS_PREDICTIONS_WITH_BACKGROUND: + [class_predictions_with_background]} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41d6929f90666216237d87cc0e4555f2e4d8021d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor_tf1_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor_tf1_test.py new file mode 100644 index 0000000000000000000000000000000000000000..555c4b2adeaef6142884adbc5c4e1087084fd884 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_box_predictor_tf1_test.py @@ -0,0 +1,80 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.rfcn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors import rfcn_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +class RfcnBoxPredictorTest(test_case.TestCase): + + def _build_arg_scope_with_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.build(conv_hyperparams, is_training=True) + + def test_get_correct_box_encoding_and_class_prediction_shapes(self): + + def graph_fn(image_features, proposal_boxes): + rfcn_box_predictor = box_predictor.RfcnBoxPredictor( + is_training=False, + num_classes=2, + conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), + num_spatial_bins=[3, 3], + depth=4, + crop_size=[12, 12], + box_code_size=4 + ) + box_predictions = rfcn_box_predictor.predict( + [image_features], num_predictions_per_location=[1], + scope='BoxPredictor', + proposal_boxes=proposal_boxes) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features, proposal_boxes]) + + self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..094e665f69c92235fe48686014f18f71225bb796 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor.py @@ -0,0 +1,204 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""RFCN Box Predictor.""" +import tensorflow.compat.v1 as tf +from object_detection.core import box_predictor +from object_detection.utils import ops + +BOX_ENCODINGS = box_predictor.BOX_ENCODINGS +CLASS_PREDICTIONS_WITH_BACKGROUND = ( + box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) +MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS + + +class RfcnKerasBoxPredictor(box_predictor.KerasBoxPredictor): + """RFCN Box Predictor. + + Applies a position sensitive ROI pooling on position sensitive feature maps to + predict classes and refined locations. See https://arxiv.org/abs/1605.06409 + for details. + + This is used for the second stage of the RFCN meta architecture. Notice that + locations are *not* shared across classes, thus for each anchor, a separate + prediction is made for each class. + """ + + def __init__(self, + is_training, + num_classes, + conv_hyperparams, + freeze_batchnorm, + num_spatial_bins, + depth, + crop_size, + box_code_size, + name=None): + """Constructor. + + Args: + is_training: Indicates whether the BoxPredictor is in training mode. + num_classes: number of classes. Note that num_classes *does not* + include the background category, so if groundtruth labels take values + in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the + assigned classification targets can range from {0,... K}). + conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object + containing hyperparameters for convolution ops. + freeze_batchnorm: Whether to freeze batch norm parameters during + training or not. When training with a small batch size (e.g. 1), it is + desirable to freeze batch norm update and use pretrained batch norm + params. + num_spatial_bins: A list of two integers `[spatial_bins_y, + spatial_bins_x]`. + depth: Target depth to reduce the input feature maps to. + crop_size: A list of two integers `[crop_height, crop_width]`. + box_code_size: Size of encoding for each box. + name: A string name scope to assign to the box predictor. If `None`, Keras + will auto-generate one from the class name. + """ + super(RfcnKerasBoxPredictor, self).__init__( + is_training, num_classes, freeze_batchnorm=freeze_batchnorm, + inplace_batchnorm_update=False, name=name) + self._freeze_batchnorm = freeze_batchnorm + self._conv_hyperparams = conv_hyperparams + self._num_spatial_bins = num_spatial_bins + self._depth = depth + self._crop_size = crop_size + self._box_code_size = box_code_size + + # Build the shared layers used for both heads + self._shared_conv_layers = [] + self._shared_conv_layers.append( + tf.keras.layers.Conv2D( + self._depth, + [1, 1], + padding='SAME', + name='reduce_depth_conv', + **self._conv_hyperparams.params())) + self._shared_conv_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='reduce_depth_batchnorm')) + self._shared_conv_layers.append( + self._conv_hyperparams.build_activation_layer( + name='reduce_depth_activation')) + + self._box_encoder_layers = [] + location_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + self.num_classes * + self._box_code_size) + self._box_encoder_layers.append( + tf.keras.layers.Conv2D( + location_feature_map_depth, + [1, 1], + padding='SAME', + name='refined_locations_conv', + **self._conv_hyperparams.params())) + self._box_encoder_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='refined_locations_batchnorm')) + + self._class_predictor_layers = [] + self._total_classes = self.num_classes + 1 # Account for background class. + class_feature_map_depth = (self._num_spatial_bins[0] * + self._num_spatial_bins[1] * + self._total_classes) + self._class_predictor_layers.append( + tf.keras.layers.Conv2D( + class_feature_map_depth, + [1, 1], + padding='SAME', + name='class_predictions_conv', + **self._conv_hyperparams.params())) + self._class_predictor_layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name='class_predictions_batchnorm')) + + @property + def num_classes(self): + return self._num_classes + + def _predict(self, image_features, proposal_boxes, **kwargs): + """Computes encoded object locations and corresponding confidences. + + Args: + image_features: A list of float tensors of shape [batch_size, height_i, + width_i, channels_i] containing features for a batch of images. + proposal_boxes: A float tensor of shape [batch_size, num_proposals, + box_code_size]. + **kwargs: Unused Keyword args + + Returns: + box_encodings: A list of float tensors of shape + [batch_size, num_anchors_i, q, code_size] representing the location of + the objects, where q is 1 or the number of classes. Each entry in the + list corresponds to a feature map in the input `image_features` list. + class_predictions_with_background: A list of float tensors of shape + [batch_size, num_anchors_i, num_classes + 1] representing the class + predictions for the proposals. Each entry in the list corresponds to a + feature map in the input `image_features` list. + + Raises: + ValueError: if num_predictions_per_location is not 1 or if + len(image_features) is not 1. + """ + if len(image_features) != 1: + raise ValueError('length of `image_features` must be 1. Found {}'. + format(len(image_features))) + image_feature = image_features[0] + batch_size = tf.shape(proposal_boxes)[0] + num_boxes = tf.shape(proposal_boxes)[1] + net = image_feature + for layer in self._shared_conv_layers: + net = layer(net) + + # Location predictions. + box_net = net + for layer in self._box_encoder_layers: + box_net = layer(box_net) + box_encodings = ops.batch_position_sensitive_crop_regions( + box_net, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True) + box_encodings = tf.squeeze(box_encodings, axis=[2, 3]) + box_encodings = tf.reshape(box_encodings, + [batch_size * num_boxes, 1, self.num_classes, + self._box_code_size]) + + # Class predictions. + class_net = net + for layer in self._class_predictor_layers: + class_net = layer(class_net) + class_predictions_with_background = ( + ops.batch_position_sensitive_crop_regions( + class_net, + boxes=proposal_boxes, + crop_size=self._crop_size, + num_spatial_bins=self._num_spatial_bins, + global_pool=True)) + class_predictions_with_background = tf.squeeze( + class_predictions_with_background, axis=[2, 3]) + class_predictions_with_background = tf.reshape( + class_predictions_with_background, + [batch_size * num_boxes, 1, self._total_classes]) + + return {BOX_ENCODINGS: [box_encodings], + CLASS_PREDICTIONS_WITH_BACKGROUND: + [class_predictions_with_background]} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1015acde35ff1c83822cf89103946c9c1a20138c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f845068e35b37a9b0d77873fb5adbf59c78450ae --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/predictors/rfcn_keras_box_predictor_tf2_test.py @@ -0,0 +1,79 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.predictors.rfcn_box_predictor.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from object_detection.builders import hyperparams_builder +from object_detection.predictors import rfcn_keras_box_predictor as box_predictor +from object_detection.protos import hyperparams_pb2 +from object_detection.utils import test_case +from object_detection.utils import tf_version + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class RfcnKerasBoxPredictorTest(test_case.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def test_get_correct_box_encoding_and_class_prediction_shapes(self): + rfcn_box_predictor = box_predictor.RfcnKerasBoxPredictor( + is_training=False, + num_classes=2, + conv_hyperparams=self._build_conv_hyperparams(), + freeze_batchnorm=False, + num_spatial_bins=[3, 3], + depth=4, + crop_size=[12, 12], + box_code_size=4) + def graph_fn(image_features, proposal_boxes): + + box_predictions = rfcn_box_predictor( + [image_features], + proposal_boxes=proposal_boxes) + box_encodings = tf.concat( + box_predictions[box_predictor.BOX_ENCODINGS], axis=1) + class_predictions_with_background = tf.concat( + box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], + axis=1) + return (box_encodings, class_predictions_with_background) + + image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) + proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) + (box_encodings, class_predictions_with_background) = self.execute( + graph_fn, [image_features, proposal_boxes]) + + self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) + self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) + + +if __name__ == '__main__': + tf.test.main() diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97069c16fbd1f6796da54aff182361f4ee33d881 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__/py_pb2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__/py_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..34114e1b5b2a51b9dd8a55257253451428a577c0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__init__/py_pb2.py @@ -0,0 +1,30 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: object_detection/protos/__init__.py + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='object_detection/protos/__init__.py', + package='', + syntax='proto2', + serialized_pb=_b('\n#object_detection/protos/__init__.py') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + + +# @@protoc_insertion_point(module_scope) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/__init__.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa6cf847ab38b5552bf6f458010522001346162e Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/__init__.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/anchor_generator_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/anchor_generator_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0116fba31ecbc2bcfdae80120ea294cc08dadeaf Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/anchor_generator_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/argmax_matcher_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/argmax_matcher_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29c1004bc3560e401b851e6064ce29c9efde82ac Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/argmax_matcher_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/bipartite_matcher_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/bipartite_matcher_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a5f27e264dc2aacdec010e7dc9d0849cd5eac7d Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/bipartite_matcher_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/box_coder_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/box_coder_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c03d6db5fb78a7b45b8205c93a4f2e324af420bd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/box_coder_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/box_predictor_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/box_predictor_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97bb9e2f9db1eced73644bbc550e1282d0c3edea Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/box_predictor_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/calibration_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/calibration_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f72bc6f401a690061a788cf111313d17a81bf701 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/calibration_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/center_net_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/center_net_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d69619619acc458143a6ca8aae8fd2b9c918356 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/center_net_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/eval_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/eval_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cac700f3d1311c8a422b2fed9791affece66dfd Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/eval_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/faster_rcnn_box_coder_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/faster_rcnn_box_coder_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..872e22bfa33602161cc363463d2463bc121ff193 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/faster_rcnn_box_coder_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/faster_rcnn_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/faster_rcnn_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f30d3418cd0e2c44d530453adbe14c3b5504407 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/faster_rcnn_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/flexible_grid_anchor_generator_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/flexible_grid_anchor_generator_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aabadc3e6198f2ccc0cd8a804a847f39dee7bff Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/flexible_grid_anchor_generator_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/fpn_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/fpn_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1b06ae13794222e550632b255888791592c0df1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/fpn_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/graph_rewriter_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/graph_rewriter_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..248e91af1b2fa764e59ac29f1bafa01c84b335a1 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/graph_rewriter_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/grid_anchor_generator_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/grid_anchor_generator_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0581b192c9778ffa959435f4a2fdca98921e145 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/grid_anchor_generator_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/hyperparams_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/hyperparams_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9072050b74bec801b92a818bc11042f34f6f6476 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/hyperparams_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/image_resizer_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/image_resizer_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..980131362b3815fa5c6b173b966562b5ded34487 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/image_resizer_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/input_reader_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/input_reader_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e89bc8ddc4786875a6297b89e76cc440d1891b2 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/input_reader_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/keypoint_box_coder_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/keypoint_box_coder_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62e45240a27ca9da42da2001b53f2a16a210086a Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/keypoint_box_coder_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/losses_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/losses_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..249c88bf00cc9e6ac70a3d632a25c1c41123ec05 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/losses_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/matcher_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/matcher_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ac8831b1dcb9db7d5a1adef58caddab9b517808 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/matcher_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/mean_stddev_box_coder_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/mean_stddev_box_coder_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44ba87b9ebbf53cf37b8740efa053be271496370 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/mean_stddev_box_coder_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/model_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/model_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba482b57e139280350a4a1dc9f407f3fef3d6a38 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/model_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/multiscale_anchor_generator_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/multiscale_anchor_generator_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5407fb9ca0fe1f639a47b8c151b807c0a05d4c62 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/multiscale_anchor_generator_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/optimizer_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/optimizer_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3882f7e59890bc395dac738a9080186718e9bc19 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/optimizer_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/pipeline_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/pipeline_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4666ed5c9cd984d785195fc6de3c8a03d64aebb3 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/pipeline_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/post_processing_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/post_processing_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca9fa350aeb4627d1d6b69835c521af04867f0fa Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/post_processing_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/preprocessor_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/preprocessor_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b7d232b3a553bbf59b051c420cfbe5542362a87 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/preprocessor_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/region_similarity_calculator_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/region_similarity_calculator_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86e6fbd67f249fd435f67932483aab746129a881 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/region_similarity_calculator_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/square_box_coder_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/square_box_coder_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76aaf73c39e17f347e58615cbfc4b0a7e635b364 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/square_box_coder_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/ssd_anchor_generator_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/ssd_anchor_generator_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d978306df8db8d30185146ee70da77a2c325f323 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/ssd_anchor_generator_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/ssd_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/ssd_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a610790104c6ad20153149cf6efe76dcfc251299 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/ssd_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/string_int_label_map_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/string_int_label_map_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5ed8dd7efd41ccc3344c269c4535a7fa0987204 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/string_int_label_map_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/train_pb2.cpython-36.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/train_pb2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fe35ea37610cff0b7e17de2f89e985c14483091 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/__pycache__/train_pb2.cpython-36.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/anchor_generator.proto b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/anchor_generator.proto new file mode 100644 index 0000000000000000000000000000000000000000..9608ca48908e6f7aff35f57042b2275c5d0ab5d4 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/anchor_generator.proto @@ -0,0 +1,19 @@ +syntax = "proto2"; + +package object_detection.protos; + +import "object_detection/protos/flexible_grid_anchor_generator.proto"; +import "object_detection/protos/grid_anchor_generator.proto"; +import "object_detection/protos/multiscale_anchor_generator.proto"; +import "object_detection/protos/ssd_anchor_generator.proto"; + +// Configuration proto for the anchor generator to use in the object detection +// pipeline. See core/anchor_generator.py for details. +message AnchorGenerator { + oneof anchor_generator_oneof { + GridAnchorGenerator grid_anchor_generator = 1; + SsdAnchorGenerator ssd_anchor_generator = 2; + MultiscaleAnchorGenerator multiscale_anchor_generator = 3; + FlexibleGridAnchorGenerator flexible_grid_anchor_generator = 4; + } +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/anchor_generator_pb2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/anchor_generator_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..bea997868735b3a279be3498101cba51654fffe0 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/anchor_generator_pb2.py @@ -0,0 +1,114 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: object_detection/protos/anchor_generator.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from object_detection.protos import flexible_grid_anchor_generator_pb2 as object__detection_dot_protos_dot_flexible__grid__anchor__generator__pb2 +from object_detection.protos import grid_anchor_generator_pb2 as object__detection_dot_protos_dot_grid__anchor__generator__pb2 +from object_detection.protos import multiscale_anchor_generator_pb2 as object__detection_dot_protos_dot_multiscale__anchor__generator__pb2 +from object_detection.protos import ssd_anchor_generator_pb2 as object__detection_dot_protos_dot_ssd__anchor__generator__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='object_detection/protos/anchor_generator.proto', + package='object_detection.protos', + syntax='proto2', + serialized_pb=_b('\n.object_detection/protos/anchor_generator.proto\x12\x17object_detection.protos\x1a.object_detection.protos.WeightSharedConvolutionalBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\xaf\x04\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1c\n\ruse_depthwise\x18\x0b \x01(\x08:\x05\x66\x61lse\x12j\n\x18\x62ox_encodings_clip_range\x18\x0c \x01(\x0b\x32H.object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"\xcc\x05\n%WeightSharedConvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x10\n\x05\x64\x65pth\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x33\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1a\n\x0buse_dropout\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x0c \x01(\x02:\x03\x30.8\x12%\n\x16share_prediction_tower\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x0e \x01(\x08:\x05\x66\x61lse\x12p\n\x0fscore_converter\x18\x10 \x01(\x0e\x32M.object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter:\x08IDENTITY\x12v\n\x18\x62ox_encodings_clip_range\x18\x11 \x01(\x0b\x32T.object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"+\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\"\xbf\x04\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0bmask_height\x18\t \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\n \x01(\x05:\x02\x31\x35\x12*\n\x1fmask_prediction_num_conv_layers\x18\x0b \x01(\x05:\x01\x32\x12\'\n\x18masks_are_class_agnostic\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\'\n\x18share_box_across_classes\x18\r \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32') + , + dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER = _descriptor.EnumDescriptor( + name='ScoreConverter', + full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='IDENTITY', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SIGMOID', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1754, + serialized_end=1797, +) +_sym_db.RegisterEnumDescriptor(_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER) + + +_BOXPREDICTOR = _descriptor.Descriptor( + name='BoxPredictor', + full_name='object_detection.protos.BoxPredictor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weight_shared_convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.weight_shared_convolutional_box_predictor', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof', + index=0, containing_type=None, fields=[]), + ], + serialized_start=116, + serialized_end=516, +) + + +_CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor( + name='BoxEncodingsClipRange', + full_name='object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='min', full_name='object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max', full_name='object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1029, + serialized_end=1078, +) + +_CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor( + name='ConvolutionalBoxPredictor', + full_name='object_detection.protos.ConvolutionalBoxPredictor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5, + number=6, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.8), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7, + number=8, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=4, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9, + number=10, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_depthwise', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_depthwise', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_encodings_clip_range', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_encodings_clip_range', index=11, + number=12, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=519, + serialized_end=1078, +) + + +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor( + name='BoxEncodingsClipRange', + full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='min', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1029, + serialized_end=1078, +) + +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor( + name='WeightSharedConvolutionalBoxPredictor', + full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='conv_hyperparams', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.conv_hyperparams', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_layers_before_predictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.num_layers_before_predictor', index=1, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='depth', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.depth', index=2, + number=2, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernel_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.kernel_size', index=3, + number=7, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=3, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_code_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_code_size', index=4, + number=8, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=4, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='class_prediction_bias_init', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.class_prediction_bias_init', index=5, + number=10, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_dropout', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_dropout', index=6, + number=11, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dropout_keep_probability', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.dropout_keep_probability', index=7, + number=12, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.8), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='share_prediction_tower', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.share_prediction_tower', index=8, + number=13, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_depthwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_depthwise', index=9, + number=14, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='score_converter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.score_converter', index=10, + number=16, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_encodings_clip_range', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_encodings_clip_range', index=11, + number=17, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ], + enum_types=[ + _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER, + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1081, + serialized_end=1797, +) + + +_MASKRCNNBOXPREDICTOR = _descriptor.Descriptor( + name='MaskRCNNBoxPredictor', + full_name='object_detection.protos.MaskRCNNBoxPredictor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.5), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=4, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=256, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_height', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_height', index=8, + number=9, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=15, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_width', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_width', index=9, + number=10, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=15, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_prediction_num_conv_layers', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_num_conv_layers', index=10, + number=11, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=2, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='masks_are_class_agnostic', full_name='object_detection.protos.MaskRCNNBoxPredictor.masks_are_class_agnostic', index=11, + number=12, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='share_box_across_classes', full_name='object_detection.protos.MaskRCNNBoxPredictor.share_box_across_classes', index=12, + number=13, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='convolve_then_upsample_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.convolve_then_upsample_masks', index=13, + number=14, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1800, + serialized_end=2375, +) + + +_RFCNBOXPREDICTOR = _descriptor.Descriptor( + name='RfcnBoxPredictor', + full_name='object_detection.protos.RfcnBoxPredictor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=3, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=3, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=1024, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=4, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5, + number=6, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=12, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=12, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2378, + serialized_end=2627, +) + +_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR +_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR +_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR +_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR +_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( + _BOXPREDICTOR.fields_by_name['convolutional_box_predictor']) +_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] +_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( + _BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor']) +_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] +_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( + _BOXPREDICTOR.fields_by_name['rfcn_box_predictor']) +_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] +_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append( + _BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor']) +_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'] +_CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _CONVOLUTIONALBOXPREDICTOR +_CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS +_CONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['score_converter'].enum_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE +_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR +_MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS +_MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS +_RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS +DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR +DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR +DESCRIPTOR.message_types_by_name['WeightSharedConvolutionalBoxPredictor'] = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR +DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR +DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR + +BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), dict( + DESCRIPTOR = _BOXPREDICTOR, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor) + )) +_sym_db.RegisterMessage(BoxPredictor) + +ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), dict( + + BoxEncodingsClipRange = _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), dict( + DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor.BoxEncodingsClipRange) + )) + , + DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor) + )) +_sym_db.RegisterMessage(ConvolutionalBoxPredictor) +_sym_db.RegisterMessage(ConvolutionalBoxPredictor.BoxEncodingsClipRange) + +WeightSharedConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('WeightSharedConvolutionalBoxPredictor', (_message.Message,), dict( + + BoxEncodingsClipRange = _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), dict( + DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange) + )) + , + DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor) + )) +_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor) +_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange) + +MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), dict( + DESCRIPTOR = _MASKRCNNBOXPREDICTOR, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor) + )) +_sym_db.RegisterMessage(MaskRCNNBoxPredictor) + +RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), dict( + DESCRIPTOR = _RFCNBOXPREDICTOR, + __module__ = 'object_detection.protos.box_predictor_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor) + )) +_sym_db.RegisterMessage(RfcnBoxPredictor) + + +# @@protoc_insertion_point(module_scope) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/box_predictor_pb2.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/box_predictor_pb2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d6335bc2bcb04daa98876b2c098bd0f724f208c Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/box_predictor_pb2.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration.proto b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration.proto new file mode 100644 index 0000000000000000000000000000000000000000..6025117013fb4fbce726fe322d33a7f2e2218830 --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration.proto @@ -0,0 +1,90 @@ +// These protos contain the calibration parameters necessary for transforming +// a model's original detection scores or logits. The parameters result from +// fitting a calibration function on the model's outputs. + +syntax = "proto2"; + +package object_detection.protos; + +// Message wrapper for various calibration configurations. +message CalibrationConfig { + oneof calibrator { + // Class-agnostic calibration via linear interpolation (usually output from + // isotonic regression). + FunctionApproximation function_approximation = 1; + + // Per-class calibration via linear interpolation. + ClassIdFunctionApproximations class_id_function_approximations = 2; + + // Class-agnostic sigmoid calibration. + SigmoidCalibration sigmoid_calibration = 3; + + // Per-class sigmoid calibration. + ClassIdSigmoidCalibrations class_id_sigmoid_calibrations = 4; + + // Temperature scaling calibration. + TemperatureScalingCalibration temperature_scaling_calibration = 5; + } +} + +// Message for class-agnostic domain/range mapping for function +// approximations. +message FunctionApproximation { + // Message mapping class labels to indices + optional XYPairs x_y_pairs = 1; +} + +// Message for class-specific domain/range mapping for function +// approximations. +message ClassIdFunctionApproximations { + // Message mapping class ids to indices. + map class_id_xy_pairs_map = 1; +} + +// Message for class-agnostic Sigmoid Calibration. +message SigmoidCalibration { + // Message mapping class index to Sigmoid Parameters + optional SigmoidParameters sigmoid_parameters = 1; +} + +// Message for class-specific Sigmoid Calibration. +message ClassIdSigmoidCalibrations { + // Message mapping class index to Sigmoid Parameters. + map class_id_sigmoid_parameters_map = 1; +} + +// Message for Temperature Scaling Calibration. +message TemperatureScalingCalibration { + optional float scaler = 1; +} + +// Description of data used to fit the calibration model. CLASS_SPECIFIC +// indicates that the calibration parameters are derived from detections +// pertaining to a single class. ALL_CLASSES indicates that parameters were +// obtained by fitting a model on detections from all classes (including the +// background class). +enum TrainingDataType { + DATA_TYPE_UNKNOWN = 0; + ALL_CLASSES = 1; + CLASS_SPECIFIC = 2; +} + +// Message to store a domain/range pair for function to be approximated. +message XYPairs { + message XYPair { + optional float x = 1; + optional float y = 2; + } + + // Sequence of x/y pairs for function approximation. + repeated XYPair x_y_pair = 1; + + // Description of data used to fit the calibration model. + optional TrainingDataType training_data_type = 2; +} + +// Message defining parameters for sigmoid calibration. +message SigmoidParameters { + optional float a = 1 [default = -1.0]; + optional float b = 2 [default = 0.0]; +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration_pb2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..3c381f2197a5a557e82a0b1e5c90b9f737e9cfdd --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration_pb2.py @@ -0,0 +1,589 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: object_detection/protos/calibration.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='object_detection/protos/calibration.proto', + package='object_detection.protos', + syntax='proto2', + serialized_pb=_b('\n)object_detection/protos/calibration.proto\x12\x17object_detection.protos\"\xe4\x03\n\x11\x43\x61librationConfig\x12P\n\x16\x66unction_approximation\x18\x01 \x01(\x0b\x32..object_detection.protos.FunctionApproximationH\x00\x12\x62\n class_id_function_approximations\x18\x02 \x01(\x0b\x32\x36.object_detection.protos.ClassIdFunctionApproximationsH\x00\x12J\n\x13sigmoid_calibration\x18\x03 \x01(\x0b\x32+.object_detection.protos.SigmoidCalibrationH\x00\x12\\\n\x1d\x63lass_id_sigmoid_calibrations\x18\x04 \x01(\x0b\x32\x33.object_detection.protos.ClassIdSigmoidCalibrationsH\x00\x12\x61\n\x1ftemperature_scaling_calibration\x18\x05 \x01(\x0b\x32\x36.object_detection.protos.TemperatureScalingCalibrationH\x00\x42\x0c\n\ncalibrator\"L\n\x15\x46unctionApproximation\x12\x33\n\tx_y_pairs\x18\x01 \x01(\x0b\x32 .object_detection.protos.XYPairs\"\xe9\x01\n\x1d\x43lassIdFunctionApproximations\x12l\n\x15\x63lass_id_xy_pairs_map\x18\x01 \x03(\x0b\x32M.object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry\x1aZ\n\x16\x43lassIdXyPairsMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .object_detection.protos.XYPairs:\x02\x38\x01\"\\\n\x12SigmoidCalibration\x12\x46\n\x12sigmoid_parameters\x18\x01 \x01(\x0b\x32*.object_detection.protos.SigmoidParameters\"\x8b\x02\n\x1a\x43lassIdSigmoidCalibrations\x12}\n\x1f\x63lass_id_sigmoid_parameters_map\x18\x01 \x03(\x0b\x32T.object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry\x1an\n ClassIdSigmoidParametersMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x39\n\x05value\x18\x02 \x01(\x0b\x32*.object_detection.protos.SigmoidParameters:\x02\x38\x01\"/\n\x1dTemperatureScalingCalibration\x12\x0e\n\x06scaler\x18\x01 \x01(\x02\"\xab\x01\n\x07XYPairs\x12\x39\n\x08x_y_pair\x18\x01 \x03(\x0b\x32\'.object_detection.protos.XYPairs.XYPair\x12\x45\n\x12training_data_type\x18\x02 \x01(\x0e\x32).object_detection.protos.TrainingDataType\x1a\x1e\n\x06XYPair\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"0\n\x11SigmoidParameters\x12\r\n\x01\x61\x18\x01 \x01(\x02:\x02-1\x12\x0c\n\x01\x62\x18\x02 \x01(\x02:\x01\x30*N\n\x10TrainingDataType\x12\x15\n\x11\x44\x41TA_TYPE_UNKNOWN\x10\x00\x12\x0f\n\x0b\x41LL_CLASSES\x10\x01\x12\x12\n\x0e\x43LASS_SPECIFIC\x10\x02') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_TRAININGDATATYPE = _descriptor.EnumDescriptor( + name='TrainingDataType', + full_name='object_detection.protos.TrainingDataType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='DATA_TYPE_UNKNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALL_CLASSES', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CLASS_SPECIFIC', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1508, + serialized_end=1586, +) +_sym_db.RegisterEnumDescriptor(_TRAININGDATATYPE) + +TrainingDataType = enum_type_wrapper.EnumTypeWrapper(_TRAININGDATATYPE) +DATA_TYPE_UNKNOWN = 0 +ALL_CLASSES = 1 +CLASS_SPECIFIC = 2 + + + +_CALIBRATIONCONFIG = _descriptor.Descriptor( + name='CalibrationConfig', + full_name='object_detection.protos.CalibrationConfig', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='function_approximation', full_name='object_detection.protos.CalibrationConfig.function_approximation', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='class_id_function_approximations', full_name='object_detection.protos.CalibrationConfig.class_id_function_approximations', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoid_calibration', full_name='object_detection.protos.CalibrationConfig.sigmoid_calibration', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='class_id_sigmoid_calibrations', full_name='object_detection.protos.CalibrationConfig.class_id_sigmoid_calibrations', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='temperature_scaling_calibration', full_name='object_detection.protos.CalibrationConfig.temperature_scaling_calibration', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='calibrator', full_name='object_detection.protos.CalibrationConfig.calibrator', + index=0, containing_type=None, fields=[]), + ], + serialized_start=71, + serialized_end=555, +) + + +_FUNCTIONAPPROXIMATION = _descriptor.Descriptor( + name='FunctionApproximation', + full_name='object_detection.protos.FunctionApproximation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='x_y_pairs', full_name='object_detection.protos.FunctionApproximation.x_y_pairs', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=557, + serialized_end=633, +) + + +_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY = _descriptor.Descriptor( + name='ClassIdXyPairsMapEntry', + full_name='object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=779, + serialized_end=869, +) + +_CLASSIDFUNCTIONAPPROXIMATIONS = _descriptor.Descriptor( + name='ClassIdFunctionApproximations', + full_name='object_detection.protos.ClassIdFunctionApproximations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='class_id_xy_pairs_map', full_name='object_detection.protos.ClassIdFunctionApproximations.class_id_xy_pairs_map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=636, + serialized_end=869, +) + + +_SIGMOIDCALIBRATION = _descriptor.Descriptor( + name='SigmoidCalibration', + full_name='object_detection.protos.SigmoidCalibration', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sigmoid_parameters', full_name='object_detection.protos.SigmoidCalibration.sigmoid_parameters', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=871, + serialized_end=963, +) + + +_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY = _descriptor.Descriptor( + name='ClassIdSigmoidParametersMapEntry', + full_name='object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1123, + serialized_end=1233, +) + +_CLASSIDSIGMOIDCALIBRATIONS = _descriptor.Descriptor( + name='ClassIdSigmoidCalibrations', + full_name='object_detection.protos.ClassIdSigmoidCalibrations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='class_id_sigmoid_parameters_map', full_name='object_detection.protos.ClassIdSigmoidCalibrations.class_id_sigmoid_parameters_map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=966, + serialized_end=1233, +) + + +_TEMPERATURESCALINGCALIBRATION = _descriptor.Descriptor( + name='TemperatureScalingCalibration', + full_name='object_detection.protos.TemperatureScalingCalibration', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scaler', full_name='object_detection.protos.TemperatureScalingCalibration.scaler', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1235, + serialized_end=1282, +) + + +_XYPAIRS_XYPAIR = _descriptor.Descriptor( + name='XYPair', + full_name='object_detection.protos.XYPairs.XYPair', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='object_detection.protos.XYPairs.XYPair.x', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='y', full_name='object_detection.protos.XYPairs.XYPair.y', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1426, + serialized_end=1456, +) + +_XYPAIRS = _descriptor.Descriptor( + name='XYPairs', + full_name='object_detection.protos.XYPairs', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='x_y_pair', full_name='object_detection.protos.XYPairs.x_y_pair', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='training_data_type', full_name='object_detection.protos.XYPairs.training_data_type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_XYPAIRS_XYPAIR, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1285, + serialized_end=1456, +) + + +_SIGMOIDPARAMETERS = _descriptor.Descriptor( + name='SigmoidParameters', + full_name='object_detection.protos.SigmoidParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='a', full_name='object_detection.protos.SigmoidParameters.a', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(-1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='b', full_name='object_detection.protos.SigmoidParameters.b', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1458, + serialized_end=1506, +) + +_CALIBRATIONCONFIG.fields_by_name['function_approximation'].message_type = _FUNCTIONAPPROXIMATION +_CALIBRATIONCONFIG.fields_by_name['class_id_function_approximations'].message_type = _CLASSIDFUNCTIONAPPROXIMATIONS +_CALIBRATIONCONFIG.fields_by_name['sigmoid_calibration'].message_type = _SIGMOIDCALIBRATION +_CALIBRATIONCONFIG.fields_by_name['class_id_sigmoid_calibrations'].message_type = _CLASSIDSIGMOIDCALIBRATIONS +_CALIBRATIONCONFIG.fields_by_name['temperature_scaling_calibration'].message_type = _TEMPERATURESCALINGCALIBRATION +_CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( + _CALIBRATIONCONFIG.fields_by_name['function_approximation']) +_CALIBRATIONCONFIG.fields_by_name['function_approximation'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] +_CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( + _CALIBRATIONCONFIG.fields_by_name['class_id_function_approximations']) +_CALIBRATIONCONFIG.fields_by_name['class_id_function_approximations'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] +_CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( + _CALIBRATIONCONFIG.fields_by_name['sigmoid_calibration']) +_CALIBRATIONCONFIG.fields_by_name['sigmoid_calibration'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] +_CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( + _CALIBRATIONCONFIG.fields_by_name['class_id_sigmoid_calibrations']) +_CALIBRATIONCONFIG.fields_by_name['class_id_sigmoid_calibrations'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] +_CALIBRATIONCONFIG.oneofs_by_name['calibrator'].fields.append( + _CALIBRATIONCONFIG.fields_by_name['temperature_scaling_calibration']) +_CALIBRATIONCONFIG.fields_by_name['temperature_scaling_calibration'].containing_oneof = _CALIBRATIONCONFIG.oneofs_by_name['calibrator'] +_FUNCTIONAPPROXIMATION.fields_by_name['x_y_pairs'].message_type = _XYPAIRS +_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY.fields_by_name['value'].message_type = _XYPAIRS +_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY.containing_type = _CLASSIDFUNCTIONAPPROXIMATIONS +_CLASSIDFUNCTIONAPPROXIMATIONS.fields_by_name['class_id_xy_pairs_map'].message_type = _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY +_SIGMOIDCALIBRATION.fields_by_name['sigmoid_parameters'].message_type = _SIGMOIDPARAMETERS +_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY.fields_by_name['value'].message_type = _SIGMOIDPARAMETERS +_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY.containing_type = _CLASSIDSIGMOIDCALIBRATIONS +_CLASSIDSIGMOIDCALIBRATIONS.fields_by_name['class_id_sigmoid_parameters_map'].message_type = _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY +_XYPAIRS_XYPAIR.containing_type = _XYPAIRS +_XYPAIRS.fields_by_name['x_y_pair'].message_type = _XYPAIRS_XYPAIR +_XYPAIRS.fields_by_name['training_data_type'].enum_type = _TRAININGDATATYPE +DESCRIPTOR.message_types_by_name['CalibrationConfig'] = _CALIBRATIONCONFIG +DESCRIPTOR.message_types_by_name['FunctionApproximation'] = _FUNCTIONAPPROXIMATION +DESCRIPTOR.message_types_by_name['ClassIdFunctionApproximations'] = _CLASSIDFUNCTIONAPPROXIMATIONS +DESCRIPTOR.message_types_by_name['SigmoidCalibration'] = _SIGMOIDCALIBRATION +DESCRIPTOR.message_types_by_name['ClassIdSigmoidCalibrations'] = _CLASSIDSIGMOIDCALIBRATIONS +DESCRIPTOR.message_types_by_name['TemperatureScalingCalibration'] = _TEMPERATURESCALINGCALIBRATION +DESCRIPTOR.message_types_by_name['XYPairs'] = _XYPAIRS +DESCRIPTOR.message_types_by_name['SigmoidParameters'] = _SIGMOIDPARAMETERS +DESCRIPTOR.enum_types_by_name['TrainingDataType'] = _TRAININGDATATYPE + +CalibrationConfig = _reflection.GeneratedProtocolMessageType('CalibrationConfig', (_message.Message,), dict( + DESCRIPTOR = _CALIBRATIONCONFIG, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CalibrationConfig) + )) +_sym_db.RegisterMessage(CalibrationConfig) + +FunctionApproximation = _reflection.GeneratedProtocolMessageType('FunctionApproximation', (_message.Message,), dict( + DESCRIPTOR = _FUNCTIONAPPROXIMATION, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.FunctionApproximation) + )) +_sym_db.RegisterMessage(FunctionApproximation) + +ClassIdFunctionApproximations = _reflection.GeneratedProtocolMessageType('ClassIdFunctionApproximations', (_message.Message,), dict( + + ClassIdXyPairsMapEntry = _reflection.GeneratedProtocolMessageType('ClassIdXyPairsMapEntry', (_message.Message,), dict( + DESCRIPTOR = _CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdFunctionApproximations.ClassIdXyPairsMapEntry) + )) + , + DESCRIPTOR = _CLASSIDFUNCTIONAPPROXIMATIONS, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdFunctionApproximations) + )) +_sym_db.RegisterMessage(ClassIdFunctionApproximations) +_sym_db.RegisterMessage(ClassIdFunctionApproximations.ClassIdXyPairsMapEntry) + +SigmoidCalibration = _reflection.GeneratedProtocolMessageType('SigmoidCalibration', (_message.Message,), dict( + DESCRIPTOR = _SIGMOIDCALIBRATION, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.SigmoidCalibration) + )) +_sym_db.RegisterMessage(SigmoidCalibration) + +ClassIdSigmoidCalibrations = _reflection.GeneratedProtocolMessageType('ClassIdSigmoidCalibrations', (_message.Message,), dict( + + ClassIdSigmoidParametersMapEntry = _reflection.GeneratedProtocolMessageType('ClassIdSigmoidParametersMapEntry', (_message.Message,), dict( + DESCRIPTOR = _CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry) + )) + , + DESCRIPTOR = _CLASSIDSIGMOIDCALIBRATIONS, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.ClassIdSigmoidCalibrations) + )) +_sym_db.RegisterMessage(ClassIdSigmoidCalibrations) +_sym_db.RegisterMessage(ClassIdSigmoidCalibrations.ClassIdSigmoidParametersMapEntry) + +TemperatureScalingCalibration = _reflection.GeneratedProtocolMessageType('TemperatureScalingCalibration', (_message.Message,), dict( + DESCRIPTOR = _TEMPERATURESCALINGCALIBRATION, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.TemperatureScalingCalibration) + )) +_sym_db.RegisterMessage(TemperatureScalingCalibration) + +XYPairs = _reflection.GeneratedProtocolMessageType('XYPairs', (_message.Message,), dict( + + XYPair = _reflection.GeneratedProtocolMessageType('XYPair', (_message.Message,), dict( + DESCRIPTOR = _XYPAIRS_XYPAIR, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.XYPairs.XYPair) + )) + , + DESCRIPTOR = _XYPAIRS, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.XYPairs) + )) +_sym_db.RegisterMessage(XYPairs) +_sym_db.RegisterMessage(XYPairs.XYPair) + +SigmoidParameters = _reflection.GeneratedProtocolMessageType('SigmoidParameters', (_message.Message,), dict( + DESCRIPTOR = _SIGMOIDPARAMETERS, + __module__ = 'object_detection.protos.calibration_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.SigmoidParameters) + )) +_sym_db.RegisterMessage(SigmoidParameters) + + +_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY.has_options = True +_CLASSIDFUNCTIONAPPROXIMATIONS_CLASSIDXYPAIRSMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY.has_options = True +_CLASSIDSIGMOIDCALIBRATIONS_CLASSIDSIGMOIDPARAMETERSMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration_pb2.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration_pb2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..495a968a9cf45e10fa172a72ae62bfaf9ab58611 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/calibration_pb2.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net.proto b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net.proto new file mode 100644 index 0000000000000000000000000000000000000000..d72001fafa1b71f70d2c45a8457ba8e14793c16a --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net.proto @@ -0,0 +1,296 @@ +syntax = "proto2"; + +package object_detection.protos; + +import "object_detection/protos/image_resizer.proto"; +import "object_detection/protos/losses.proto"; + +// Configuration for the CenterNet meta architecture from the "Objects as +// Points" paper [1] +// [1]: https://arxiv.org/abs/1904.07850 + +// Next Id = 16 +message CenterNet { + // Number of classes to predict. + optional int32 num_classes = 1; + + // Feature extractor config. + optional CenterNetFeatureExtractor feature_extractor = 2; + + // Image resizer for preprocessing the input image. + optional ImageResizer image_resizer = 3; + + // If set, all task heads will be constructed with separable convolutions. + optional bool use_depthwise = 13 [default = false]; + + // Indicates whether or not to use the sparse version of the Op that computes + // the center heatmaps. The sparse version scales better with number of + // channels in the heatmap, but in some cases is known to cause an OOM error. + // TODO(b/170989061) When bug is fixed, make this the default behavior. + optional bool compute_heatmap_sparse = 15 [default = false]; + + // Parameters which are related to object detection task. + message ObjectDetection { + // The original fields are moved to ObjectCenterParams or deleted. + reserved 2, 5, 6, 7; + + // Weight of the task loss. The total loss of the model will be the + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // Weight for the offset localization loss. + optional float offset_loss_weight = 3 [default = 1.0]; + + // Weight for the height/width localization loss. + optional float scale_loss_weight = 4 [default = 0.1]; + + // Localization loss configuration for object scale and offset losses. + optional LocalizationLoss localization_loss = 8; + } + optional ObjectDetection object_detection_task = 4; + + // Parameters related to object center prediction. This is required for both + // object detection and keypoint estimation tasks. + message ObjectCenterParams { + // Weight for the object center loss. + optional float object_center_loss_weight = 1 [default = 1.0]; + + // Classification loss configuration for object center loss. + optional ClassificationLoss classification_loss = 2; + + // The initial bias value of the convlution kernel of the class heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. See "Focal Loss for Dense Object Detection" + // at https://arxiv.org/abs/1708.02002. + optional float heatmap_bias_init = 3 [default = -2.19]; + + // The minimum IOU overlap boxes need to have to not be penalized. + optional float min_box_overlap_iou = 4 [default = 0.7]; + + // Maximum number of boxes to predict. + optional int32 max_box_predictions = 5 [default = 100]; + + // If set, loss is only computed for the labeled classes. + optional bool use_labeled_classes = 6 [default = false]; + } + optional ObjectCenterParams object_center_params = 5; + + // Path of the file that conatins the label map along with the keypoint + // information, including the keypoint indices, corresponding labels, and the + // corresponding class. The file should be the same one as used in the input + // pipeline. Note that a plain text of StringIntLabelMap proto is expected in + // this file. + // It is required only if the keypoint estimation task is specified. + optional string keypoint_label_map_path = 6; + + // Parameters which are related to keypoint estimation task. + message KeypointEstimation { + // Name of the task, e.g. "human pose". Note that the task name should be + // unique to each keypoint task. + optional string task_name = 1; + + // Weight of the task loss. The total loss of the model will be their + // summation of task losses weighted by the weights. + optional float task_loss_weight = 2 [default = 1.0]; + + // Loss configuration for keypoint heatmap, offset, regression losses. Note + // that the localization loss is used for offset/regression losses and + // classification loss is used for heatmap loss. + optional Loss loss = 3; + + // The name of the class that contains the keypoints for this task. This is + // used to retrieve the corresponding keypoint indices from the label map. + // Note that this corresponds to the "name" field, not "display_name". + optional string keypoint_class_name = 4; + + // The standard deviation of the Gaussian kernel used to generate the + // keypoint heatmap. The unit is the pixel in the output image. It is to + // provide the flexibility of using different sizes of Gaussian kernel for + // each keypoint class. Note that if provided, the keypoint standard + // deviations will be overridden by the specified values here, otherwise, + // the default value 5.0 will be used. + // TODO(yuhuic): Update the default value once we found the best value. + map keypoint_label_to_std = 5; + + // Loss weights corresponding to different heads. + optional float keypoint_regression_loss_weight = 6 [default = 1.0]; + optional float keypoint_heatmap_loss_weight = 7 [default = 1.0]; + optional float keypoint_offset_loss_weight = 8 [default = 1.0]; + + // The initial bias value of the convolution kernel of the keypoint heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. See "Focal Loss for Dense Object Detection" + // at https://arxiv.org/abs/1708.02002. + optional float heatmap_bias_init = 9 [default = -2.19]; + + // The heatmap score threshold for a keypoint to become a valid candidate. + optional float keypoint_candidate_score_threshold = 10 [default = 0.1]; + + // The maximum number of candidates to retrieve for each keypoint. + optional int32 num_candidates_per_keypoint = 11 [default = 100]; + + // Max pool kernel size to use to pull off peak score locations in a + // neighborhood (independently for each keypoint types). + optional int32 peak_max_pool_kernel_size = 12 [default = 3]; + + // The default score to use for regressed keypoints that are not + // successfully snapped to a nearby candidate. + optional float unmatched_keypoint_score = 13 [default = 0.1]; + + // The multiplier to expand the bounding boxes (either the provided boxes or + // those which tightly cover the regressed keypoints). Note that new + // expanded box for an instance becomes the feasible search window for all + // associated keypoints. + optional float box_scale = 14 [default = 1.2]; + + // The scale parameter that multiplies the largest dimension of a bounding + // box. The resulting distance becomes a search radius for candidates in the + // vicinity of each regressed keypoint. + optional float candidate_search_scale = 15 [default = 0.3]; + + // One of ['min_distance', 'score_distance_ratio'] indicating how to select + // the keypoint candidate. + optional string candidate_ranking_mode = 16 [default = "min_distance"]; + + // The radius (in the unit of output pixel) around heatmap peak to assign + // the offset targets. If set 0, then the offset target will only be + // assigned to the heatmap peak (same behavior as the original paper). + optional int32 offset_peak_radius = 17 [default = 0]; + + // Indicates whether to assign offsets for each keypoint channel + // separately. If set False, the output offset target has the shape + // [batch_size, out_height, out_width, 2] (same behavior as the original + // paper). If set True, the output offset target has the shape [batch_size, + // out_height, out_width, 2 * num_keypoints] (recommended when the + // offset_peak_radius is not zero). + optional bool per_keypoint_offset = 18 [default = false]; + } + repeated KeypointEstimation keypoint_estimation_task = 7; + + // Parameters which are related to mask estimation task. + // Note: Currently, CenterNet supports a weak instance segmentation, where + // semantic segmentation masks are estimated, and then cropped based on + // bounding box detections. Therefore, it is possible for the same image + // pixel to be assigned to multiple instances. + message MaskEstimation { + // Weight of the task loss. The total loss of the model will be their + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // Classification loss configuration for segmentation loss. + optional ClassificationLoss classification_loss = 2; + + // Each instance mask (one per detection) is cropped and resized (bilinear + // resampling) from the predicted segmentation feature map. After + // resampling, the masks are binarized with the provided score threshold. + optional int32 mask_height = 4 [default = 256]; + optional int32 mask_width = 5 [default = 256]; + optional float score_threshold = 6 [default = 0.5]; + + // The initial bias value of the convlution kernel of the class heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. + optional float heatmap_bias_init = 3 [default = -2.19]; + } + optional MaskEstimation mask_estimation_task = 8; + + // Parameters which are related to DensePose estimation task. + // http://densepose.org/ + message DensePoseEstimation { + // Weight of the task loss. The total loss of the model will be their + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // Class ID (0-indexed) that corresponds to the object in the label map that + // contains DensePose data. + optional int32 class_id = 2; + + // Loss configuration for DensePose heatmap and regression losses. Note + // that the localization loss is used for surface coordinate losses and + // classification loss is used for part classification losses. + optional Loss loss = 3; + + // The number of body parts. + optional int32 num_parts = 4 [default = 24]; + + // Loss weights for the two DensePose heads. + optional float part_loss_weight = 5 [default = 1.0]; + optional float coordinate_loss_weight = 6 [default = 1.0]; + + // Whether to upsample the prediction feature maps back to the original + // input dimension prior to applying loss. This has the benefit of + // maintaining finer groundtruth location information. + optional bool upsample_to_input_res = 7 [default = true]; + + // The initial bias value of the convlution kernel of the class heatmap + // prediction head. -2.19 corresponds to predicting foreground with + // a probability of 0.1. + optional float heatmap_bias_init = 8 [default = -2.19]; + } + optional DensePoseEstimation densepose_estimation_task = 9; + + // Parameters which are related to tracking embedding estimation task. + // A Simple Baseline for Multi-Object Tracking [2] + // [2]: https://arxiv.org/abs/2004.01888 + message TrackEstimation { + // Weight of the task loss. The total loss of the model will be the + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // The maximun track ID of the datset. + optional int32 num_track_ids = 2; + + // The embedding size for re-identification (ReID) task in tracking. + optional int32 reid_embed_size = 3 [default = 128]; + + // The number of (fully-connected, batch-norm, relu) layers for track ID + // classification head. The output dimension of each intermediate FC layer + // will all be 'reid_embed_size'. The last FC layer will directly project to + // the track ID classification space of size 'num_track_ids' without + // batch-norm and relu layers. + optional int32 num_fc_layers = 4 [default = 1]; + + // Classification loss configuration for ReID loss. + optional ClassificationLoss classification_loss = 5; + } + optional TrackEstimation track_estimation_task = 10; + + // Temporal offset prediction head similar to CenterTrack. + // Currently our implementation adopts LSTM, different from original paper. + // See go/lstd-centernet for more details. + // Tracking Objects as Points [3] + // [3]: https://arxiv.org/abs/2004.01177 + message TemporalOffsetEstimation { + // Weight of the task loss. The total loss of the model will be the + // summation of task losses weighted by the weights. + optional float task_loss_weight = 1 [default = 1.0]; + + // Localization loss configuration for offset loss. + optional LocalizationLoss localization_loss = 2; + } + optional TemporalOffsetEstimation temporal_offset_task = 12; + + +} + +message CenterNetFeatureExtractor { + optional string type = 1; + + // Channel means to be subtracted from each image channel. If not specified, + // we use a default value of 0. + repeated float channel_means = 2; + + // Channel standard deviations. Each channel will be normalized by dividing + // it by its standard deviation. If not specified, we use a default value + // of 1. + repeated float channel_stds = 3; + + // If set, will change channel order to be [blue, green, red]. This can be + // useful to be compatible with some pre-trained feature extractors. + optional bool bgr_ordering = 4 [default = false]; + + // If set, the feature upsampling layers will be constructed with + // separable convolutions. This is typically applied to feature pyramid + // network if any. + optional bool use_depthwise = 5 [default = false]; +} diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net_pb2.py b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..425283b755e4b84df79aa8a21e74fea0a8efcbde --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net_pb2.py @@ -0,0 +1,855 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: object_detection/protos/center_net.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2 +from object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='object_detection/protos/center_net.proto', + package='object_detection.protos', + syntax='proto2', + serialized_pb=_b('\n(object_detection/protos/center_net.proto\x12\x17object_detection.protos\x1a+object_detection/protos/image_resizer.proto\x1a$object_detection/protos/losses.proto\"\xc4\x17\n\tCenterNet\x12\x13\n\x0bnum_classes\x18\x01 \x01(\x05\x12M\n\x11\x66\x65\x61ture_extractor\x18\x02 \x01(\x0b\x32\x32.object_detection.protos.CenterNetFeatureExtractor\x12<\n\rimage_resizer\x18\x03 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12\x1c\n\ruse_depthwise\x18\r \x01(\x08:\x05\x66\x61lse\x12%\n\x16\x63ompute_heatmap_sparse\x18\x0f \x01(\x08:\x05\x66\x61lse\x12Q\n\x15object_detection_task\x18\x04 \x01(\x0b\x32\x32.object_detection.protos.CenterNet.ObjectDetection\x12S\n\x14object_center_params\x18\x05 \x01(\x0b\x32\x35.object_detection.protos.CenterNet.ObjectCenterParams\x12\x1f\n\x17keypoint_label_map_path\x18\x06 \x01(\t\x12W\n\x18keypoint_estimation_task\x18\x07 \x03(\x0b\x32\x35.object_detection.protos.CenterNet.KeypointEstimation\x12O\n\x14mask_estimation_task\x18\x08 \x01(\x0b\x32\x31.object_detection.protos.CenterNet.MaskEstimation\x12Y\n\x19\x64\x65nsepose_estimation_task\x18\t \x01(\x0b\x32\x36.object_detection.protos.CenterNet.DensePoseEstimation\x12Q\n\x15track_estimation_task\x18\n \x01(\x0b\x32\x32.object_detection.protos.CenterNet.TrackEstimation\x12Y\n\x14temporal_offset_task\x18\x0c \x01(\x0b\x32;.object_detection.protos.CenterNet.TemporalOffsetEstimation\x1a\xcb\x01\n\x0fObjectDetection\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x12offset_loss_weight\x18\x03 \x01(\x02:\x01\x31\x12\x1e\n\x11scale_loss_weight\x18\x04 \x01(\x02:\x03\x30.1\x12\x44\n\x11localization_loss\x18\x08 \x01(\x0b\x32).object_detection.protos.LocalizationLossJ\x04\x08\x02\x10\x03J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08\x1a\x8e\x02\n\x12ObjectCenterParams\x12$\n\x19object_center_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12H\n\x13\x63lassification_loss\x18\x02 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12 \n\x11heatmap_bias_init\x18\x03 \x01(\x02:\x05-2.19\x12 \n\x13min_box_overlap_iou\x18\x04 \x01(\x02:\x03\x30.7\x12 \n\x13max_box_predictions\x18\x05 \x01(\x05:\x03\x31\x30\x30\x12\"\n\x13use_labeled_classes\x18\x06 \x01(\x08:\x05\x66\x61lse\x1a\xac\x06\n\x12KeypointEstimation\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x1b\n\x10task_loss_weight\x18\x02 \x01(\x02:\x01\x31\x12+\n\x04loss\x18\x03 \x01(\x0b\x32\x1d.object_detection.protos.Loss\x12\x1b\n\x13keypoint_class_name\x18\x04 \x01(\t\x12l\n\x15keypoint_label_to_std\x18\x05 \x03(\x0b\x32M.object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry\x12*\n\x1fkeypoint_regression_loss_weight\x18\x06 \x01(\x02:\x01\x31\x12\'\n\x1ckeypoint_heatmap_loss_weight\x18\x07 \x01(\x02:\x01\x31\x12&\n\x1bkeypoint_offset_loss_weight\x18\x08 \x01(\x02:\x01\x31\x12 \n\x11heatmap_bias_init\x18\t \x01(\x02:\x05-2.19\x12/\n\"keypoint_candidate_score_threshold\x18\n \x01(\x02:\x03\x30.1\x12(\n\x1bnum_candidates_per_keypoint\x18\x0b \x01(\x05:\x03\x31\x30\x30\x12$\n\x19peak_max_pool_kernel_size\x18\x0c \x01(\x05:\x01\x33\x12%\n\x18unmatched_keypoint_score\x18\r \x01(\x02:\x03\x30.1\x12\x16\n\tbox_scale\x18\x0e \x01(\x02:\x03\x31.2\x12#\n\x16\x63\x61ndidate_search_scale\x18\x0f \x01(\x02:\x03\x30.3\x12,\n\x16\x63\x61ndidate_ranking_mode\x18\x10 \x01(\t:\x0cmin_distance\x12\x1d\n\x12offset_peak_radius\x18\x11 \x01(\x05:\x01\x30\x12\"\n\x13per_keypoint_offset\x18\x12 \x01(\x08:\x05\x66\x61lse\x1a\x39\n\x17KeypointLabelToStdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a\xea\x01\n\x0eMaskEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12H\n\x13\x63lassification_loss\x18\x02 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\x18\n\x0bmask_height\x18\x04 \x01(\x05:\x03\x32\x35\x36\x12\x17\n\nmask_width\x18\x05 \x01(\x05:\x03\x32\x35\x36\x12\x1c\n\x0fscore_threshold\x18\x06 \x01(\x02:\x03\x30.5\x12 \n\x11heatmap_bias_init\x18\x03 \x01(\x02:\x05-2.19\x1a\x8f\x02\n\x13\x44\x65nsePoseEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x08\x63lass_id\x18\x02 \x01(\x05\x12+\n\x04loss\x18\x03 \x01(\x0b\x32\x1d.object_detection.protos.Loss\x12\x15\n\tnum_parts\x18\x04 \x01(\x05:\x02\x32\x34\x12\x1b\n\x10part_loss_weight\x18\x05 \x01(\x02:\x01\x31\x12!\n\x16\x63oordinate_loss_weight\x18\x06 \x01(\x02:\x01\x31\x12#\n\x15upsample_to_input_res\x18\x07 \x01(\x08:\x04true\x12 \n\x11heatmap_bias_init\x18\x08 \x01(\x02:\x05-2.19\x1a\xc7\x01\n\x0fTrackEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\rnum_track_ids\x18\x02 \x01(\x05\x12\x1c\n\x0freid_embed_size\x18\x03 \x01(\x05:\x03\x31\x32\x38\x12\x18\n\rnum_fc_layers\x18\x04 \x01(\x05:\x01\x31\x12H\n\x13\x63lassification_loss\x18\x05 \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x1a}\n\x18TemporalOffsetEstimation\x12\x1b\n\x10task_loss_weight\x18\x01 \x01(\x02:\x01\x31\x12\x44\n\x11localization_loss\x18\x02 \x01(\x0b\x32).object_detection.protos.LocalizationLoss\"\x91\x01\n\x19\x43\x65nterNetFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x15\n\rchannel_means\x18\x02 \x03(\x02\x12\x14\n\x0c\x63hannel_stds\x18\x03 \x03(\x02\x12\x1b\n\x0c\x62gr_ordering\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x05 \x01(\x08:\x05\x66\x61lse') + , + dependencies=[object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CENTERNET_OBJECTDETECTION = _descriptor.Descriptor( + name='ObjectDetection', + full_name='object_detection.protos.CenterNet.ObjectDetection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='task_loss_weight', full_name='object_detection.protos.CenterNet.ObjectDetection.task_loss_weight', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset_loss_weight', full_name='object_detection.protos.CenterNet.ObjectDetection.offset_loss_weight', index=1, + number=3, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale_loss_weight', full_name='object_detection.protos.CenterNet.ObjectDetection.scale_loss_weight', index=2, + number=4, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='localization_loss', full_name='object_detection.protos.CenterNet.ObjectDetection.localization_loss', index=3, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1034, + serialized_end=1237, +) + +_CENTERNET_OBJECTCENTERPARAMS = _descriptor.Descriptor( + name='ObjectCenterParams', + full_name='object_detection.protos.CenterNet.ObjectCenterParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='object_center_loss_weight', full_name='object_detection.protos.CenterNet.ObjectCenterParams.object_center_loss_weight', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classification_loss', full_name='object_detection.protos.CenterNet.ObjectCenterParams.classification_loss', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.ObjectCenterParams.heatmap_bias_init', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(-2.19), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min_box_overlap_iou', full_name='object_detection.protos.CenterNet.ObjectCenterParams.min_box_overlap_iou', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.7), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_box_predictions', full_name='object_detection.protos.CenterNet.ObjectCenterParams.max_box_predictions', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=100, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_labeled_classes', full_name='object_detection.protos.CenterNet.ObjectCenterParams.use_labeled_classes', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1240, + serialized_end=1510, +) + +_CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY = _descriptor.Descriptor( + name='KeypointLabelToStdEntry', + full_name='object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry.value', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2268, + serialized_end=2325, +) + +_CENTERNET_KEYPOINTESTIMATION = _descriptor.Descriptor( + name='KeypointEstimation', + full_name='object_detection.protos.CenterNet.KeypointEstimation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='task_name', full_name='object_detection.protos.CenterNet.KeypointEstimation.task_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='task_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.task_loss_weight', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loss', full_name='object_detection.protos.CenterNet.KeypointEstimation.loss', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_class_name', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_class_name', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_label_to_std', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_label_to_std', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_regression_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_regression_loss_weight', index=5, + number=6, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_heatmap_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_heatmap_loss_weight', index=6, + number=7, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_offset_loss_weight', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_offset_loss_weight', index=7, + number=8, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.KeypointEstimation.heatmap_bias_init', index=8, + number=9, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(-2.19), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_candidate_score_threshold', full_name='object_detection.protos.CenterNet.KeypointEstimation.keypoint_candidate_score_threshold', index=9, + number=10, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_candidates_per_keypoint', full_name='object_detection.protos.CenterNet.KeypointEstimation.num_candidates_per_keypoint', index=10, + number=11, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=100, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='peak_max_pool_kernel_size', full_name='object_detection.protos.CenterNet.KeypointEstimation.peak_max_pool_kernel_size', index=11, + number=12, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=3, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unmatched_keypoint_score', full_name='object_detection.protos.CenterNet.KeypointEstimation.unmatched_keypoint_score', index=12, + number=13, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='box_scale', full_name='object_detection.protos.CenterNet.KeypointEstimation.box_scale', index=13, + number=14, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1.2), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='candidate_search_scale', full_name='object_detection.protos.CenterNet.KeypointEstimation.candidate_search_scale', index=14, + number=15, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.3), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='candidate_ranking_mode', full_name='object_detection.protos.CenterNet.KeypointEstimation.candidate_ranking_mode', index=15, + number=16, type=9, cpp_type=9, label=1, + has_default_value=True, default_value=_b("min_distance").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset_peak_radius', full_name='object_detection.protos.CenterNet.KeypointEstimation.offset_peak_radius', index=16, + number=17, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='per_keypoint_offset', full_name='object_detection.protos.CenterNet.KeypointEstimation.per_keypoint_offset', index=17, + number=18, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1513, + serialized_end=2325, +) + +_CENTERNET_MASKESTIMATION = _descriptor.Descriptor( + name='MaskEstimation', + full_name='object_detection.protos.CenterNet.MaskEstimation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='task_loss_weight', full_name='object_detection.protos.CenterNet.MaskEstimation.task_loss_weight', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classification_loss', full_name='object_detection.protos.CenterNet.MaskEstimation.classification_loss', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_height', full_name='object_detection.protos.CenterNet.MaskEstimation.mask_height', index=2, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=256, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_width', full_name='object_detection.protos.CenterNet.MaskEstimation.mask_width', index=3, + number=5, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=256, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='score_threshold', full_name='object_detection.protos.CenterNet.MaskEstimation.score_threshold', index=4, + number=6, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(0.5), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.MaskEstimation.heatmap_bias_init', index=5, + number=3, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(-2.19), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2328, + serialized_end=2562, +) + +_CENTERNET_DENSEPOSEESTIMATION = _descriptor.Descriptor( + name='DensePoseEstimation', + full_name='object_detection.protos.CenterNet.DensePoseEstimation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='task_loss_weight', full_name='object_detection.protos.CenterNet.DensePoseEstimation.task_loss_weight', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='class_id', full_name='object_detection.protos.CenterNet.DensePoseEstimation.class_id', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loss', full_name='object_detection.protos.CenterNet.DensePoseEstimation.loss', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_parts', full_name='object_detection.protos.CenterNet.DensePoseEstimation.num_parts', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=24, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='part_loss_weight', full_name='object_detection.protos.CenterNet.DensePoseEstimation.part_loss_weight', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coordinate_loss_weight', full_name='object_detection.protos.CenterNet.DensePoseEstimation.coordinate_loss_weight', index=5, + number=6, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upsample_to_input_res', full_name='object_detection.protos.CenterNet.DensePoseEstimation.upsample_to_input_res', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='heatmap_bias_init', full_name='object_detection.protos.CenterNet.DensePoseEstimation.heatmap_bias_init', index=7, + number=8, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(-2.19), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2565, + serialized_end=2836, +) + +_CENTERNET_TRACKESTIMATION = _descriptor.Descriptor( + name='TrackEstimation', + full_name='object_detection.protos.CenterNet.TrackEstimation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='task_loss_weight', full_name='object_detection.protos.CenterNet.TrackEstimation.task_loss_weight', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_track_ids', full_name='object_detection.protos.CenterNet.TrackEstimation.num_track_ids', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reid_embed_size', full_name='object_detection.protos.CenterNet.TrackEstimation.reid_embed_size', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=128, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_fc_layers', full_name='object_detection.protos.CenterNet.TrackEstimation.num_fc_layers', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classification_loss', full_name='object_detection.protos.CenterNet.TrackEstimation.classification_loss', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2839, + serialized_end=3038, +) + +_CENTERNET_TEMPORALOFFSETESTIMATION = _descriptor.Descriptor( + name='TemporalOffsetEstimation', + full_name='object_detection.protos.CenterNet.TemporalOffsetEstimation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='task_loss_weight', full_name='object_detection.protos.CenterNet.TemporalOffsetEstimation.task_loss_weight', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=True, default_value=float(1), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='localization_loss', full_name='object_detection.protos.CenterNet.TemporalOffsetEstimation.localization_loss', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3040, + serialized_end=3165, +) + +_CENTERNET = _descriptor.Descriptor( + name='CenterNet', + full_name='object_detection.protos.CenterNet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='num_classes', full_name='object_detection.protos.CenterNet.num_classes', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='feature_extractor', full_name='object_detection.protos.CenterNet.feature_extractor', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='image_resizer', full_name='object_detection.protos.CenterNet.image_resizer', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_depthwise', full_name='object_detection.protos.CenterNet.use_depthwise', index=3, + number=13, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='compute_heatmap_sparse', full_name='object_detection.protos.CenterNet.compute_heatmap_sparse', index=4, + number=15, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='object_detection_task', full_name='object_detection.protos.CenterNet.object_detection_task', index=5, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='object_center_params', full_name='object_detection.protos.CenterNet.object_center_params', index=6, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_label_map_path', full_name='object_detection.protos.CenterNet.keypoint_label_map_path', index=7, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keypoint_estimation_task', full_name='object_detection.protos.CenterNet.keypoint_estimation_task', index=8, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mask_estimation_task', full_name='object_detection.protos.CenterNet.mask_estimation_task', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='densepose_estimation_task', full_name='object_detection.protos.CenterNet.densepose_estimation_task', index=10, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='track_estimation_task', full_name='object_detection.protos.CenterNet.track_estimation_task', index=11, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='temporal_offset_task', full_name='object_detection.protos.CenterNet.temporal_offset_task', index=12, + number=12, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CENTERNET_OBJECTDETECTION, _CENTERNET_OBJECTCENTERPARAMS, _CENTERNET_KEYPOINTESTIMATION, _CENTERNET_MASKESTIMATION, _CENTERNET_DENSEPOSEESTIMATION, _CENTERNET_TRACKESTIMATION, _CENTERNET_TEMPORALOFFSETESTIMATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=153, + serialized_end=3165, +) + + +_CENTERNETFEATUREEXTRACTOR = _descriptor.Descriptor( + name='CenterNetFeatureExtractor', + full_name='object_detection.protos.CenterNetFeatureExtractor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='object_detection.protos.CenterNetFeatureExtractor.type', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='channel_means', full_name='object_detection.protos.CenterNetFeatureExtractor.channel_means', index=1, + number=2, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='channel_stds', full_name='object_detection.protos.CenterNetFeatureExtractor.channel_stds', index=2, + number=3, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bgr_ordering', full_name='object_detection.protos.CenterNetFeatureExtractor.bgr_ordering', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='use_depthwise', full_name='object_detection.protos.CenterNetFeatureExtractor.use_depthwise', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3168, + serialized_end=3313, +) + +_CENTERNET_OBJECTDETECTION.fields_by_name['localization_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOCALIZATIONLOSS +_CENTERNET_OBJECTDETECTION.containing_type = _CENTERNET +_CENTERNET_OBJECTCENTERPARAMS.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS +_CENTERNET_OBJECTCENTERPARAMS.containing_type = _CENTERNET +_CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY.containing_type = _CENTERNET_KEYPOINTESTIMATION +_CENTERNET_KEYPOINTESTIMATION.fields_by_name['loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOSS +_CENTERNET_KEYPOINTESTIMATION.fields_by_name['keypoint_label_to_std'].message_type = _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY +_CENTERNET_KEYPOINTESTIMATION.containing_type = _CENTERNET +_CENTERNET_MASKESTIMATION.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS +_CENTERNET_MASKESTIMATION.containing_type = _CENTERNET +_CENTERNET_DENSEPOSEESTIMATION.fields_by_name['loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOSS +_CENTERNET_DENSEPOSEESTIMATION.containing_type = _CENTERNET +_CENTERNET_TRACKESTIMATION.fields_by_name['classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS +_CENTERNET_TRACKESTIMATION.containing_type = _CENTERNET +_CENTERNET_TEMPORALOFFSETESTIMATION.fields_by_name['localization_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._LOCALIZATIONLOSS +_CENTERNET_TEMPORALOFFSETESTIMATION.containing_type = _CENTERNET +_CENTERNET.fields_by_name['feature_extractor'].message_type = _CENTERNETFEATUREEXTRACTOR +_CENTERNET.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER +_CENTERNET.fields_by_name['object_detection_task'].message_type = _CENTERNET_OBJECTDETECTION +_CENTERNET.fields_by_name['object_center_params'].message_type = _CENTERNET_OBJECTCENTERPARAMS +_CENTERNET.fields_by_name['keypoint_estimation_task'].message_type = _CENTERNET_KEYPOINTESTIMATION +_CENTERNET.fields_by_name['mask_estimation_task'].message_type = _CENTERNET_MASKESTIMATION +_CENTERNET.fields_by_name['densepose_estimation_task'].message_type = _CENTERNET_DENSEPOSEESTIMATION +_CENTERNET.fields_by_name['track_estimation_task'].message_type = _CENTERNET_TRACKESTIMATION +_CENTERNET.fields_by_name['temporal_offset_task'].message_type = _CENTERNET_TEMPORALOFFSETESTIMATION +DESCRIPTOR.message_types_by_name['CenterNet'] = _CENTERNET +DESCRIPTOR.message_types_by_name['CenterNetFeatureExtractor'] = _CENTERNETFEATUREEXTRACTOR + +CenterNet = _reflection.GeneratedProtocolMessageType('CenterNet', (_message.Message,), dict( + + ObjectDetection = _reflection.GeneratedProtocolMessageType('ObjectDetection', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_OBJECTDETECTION, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.ObjectDetection) + )) + , + + ObjectCenterParams = _reflection.GeneratedProtocolMessageType('ObjectCenterParams', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_OBJECTCENTERPARAMS, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.ObjectCenterParams) + )) + , + + KeypointEstimation = _reflection.GeneratedProtocolMessageType('KeypointEstimation', (_message.Message,), dict( + + KeypointLabelToStdEntry = _reflection.GeneratedProtocolMessageType('KeypointLabelToStdEntry', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.KeypointEstimation.KeypointLabelToStdEntry) + )) + , + DESCRIPTOR = _CENTERNET_KEYPOINTESTIMATION, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.KeypointEstimation) + )) + , + + MaskEstimation = _reflection.GeneratedProtocolMessageType('MaskEstimation', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_MASKESTIMATION, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.MaskEstimation) + )) + , + + DensePoseEstimation = _reflection.GeneratedProtocolMessageType('DensePoseEstimation', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_DENSEPOSEESTIMATION, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.DensePoseEstimation) + )) + , + + TrackEstimation = _reflection.GeneratedProtocolMessageType('TrackEstimation', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_TRACKESTIMATION, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.TrackEstimation) + )) + , + + TemporalOffsetEstimation = _reflection.GeneratedProtocolMessageType('TemporalOffsetEstimation', (_message.Message,), dict( + DESCRIPTOR = _CENTERNET_TEMPORALOFFSETESTIMATION, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet.TemporalOffsetEstimation) + )) + , + DESCRIPTOR = _CENTERNET, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNet) + )) +_sym_db.RegisterMessage(CenterNet) +_sym_db.RegisterMessage(CenterNet.ObjectDetection) +_sym_db.RegisterMessage(CenterNet.ObjectCenterParams) +_sym_db.RegisterMessage(CenterNet.KeypointEstimation) +_sym_db.RegisterMessage(CenterNet.KeypointEstimation.KeypointLabelToStdEntry) +_sym_db.RegisterMessage(CenterNet.MaskEstimation) +_sym_db.RegisterMessage(CenterNet.DensePoseEstimation) +_sym_db.RegisterMessage(CenterNet.TrackEstimation) +_sym_db.RegisterMessage(CenterNet.TemporalOffsetEstimation) + +CenterNetFeatureExtractor = _reflection.GeneratedProtocolMessageType('CenterNetFeatureExtractor', (_message.Message,), dict( + DESCRIPTOR = _CENTERNETFEATUREEXTRACTOR, + __module__ = 'object_detection.protos.center_net_pb2' + # @@protoc_insertion_point(class_scope:object_detection.protos.CenterNetFeatureExtractor) + )) +_sym_db.RegisterMessage(CenterNetFeatureExtractor) + + +_CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY.has_options = True +_CENTERNET_KEYPOINTESTIMATION_KEYPOINTLABELTOSTDENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net_pb2.pyc b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net_pb2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c7f25a32f029d4bc0ff3aae2d46ab591837cf47 Binary files /dev/null and b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/center_net_pb2.pyc differ diff --git a/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/eval.proto b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/eval.proto new file mode 100644 index 0000000000000000000000000000000000000000..e7b96881c79a5a1d002c2817e09ccb81db56c67f --- /dev/null +++ b/workspace/virtuallab/src/robot_classifier/scripts/object_detection/protos/eval.proto @@ -0,0 +1,164 @@ +syntax = "proto2"; + +package object_detection.protos; + +// Message for configuring DetectionModel evaluation jobs (eval.py). +// Next id - 35 +message EvalConfig { + optional uint32 batch_size = 25 [default = 1]; + // Number of visualization images to generate. + optional uint32 num_visualizations = 1 [default = 10]; + + // Number of examples to process of evaluation. + optional uint32 num_examples = 2 [default = 5000, deprecated = true]; + + // How often to run evaluation. + optional uint32 eval_interval_secs = 3 [default = 300]; + + // Maximum number of times to run evaluation. If set to 0, will run forever. + optional uint32 max_evals = 4 [default = 0, deprecated = true]; + + // Whether the TensorFlow graph used for evaluation should be saved to disk. + optional bool save_graph = 5 [default = false]; + + // Path to directory to store visualizations in. If empty, visualization + // images are not exported (only shown on Tensorboard). + optional string visualization_export_dir = 6 [default = ""]; + + // BNS name of the TensorFlow master. + optional string eval_master = 7 [default = ""]; + + // Type of metrics to use for evaluation. + repeated string metrics_set = 8; + + // Type of metrics to use for evaluation. Unlike `metrics_set` above, this + // field allows configuring evaluation metric through config files. + repeated ParameterizedMetric parameterized_metric = 31; + + // Path to export detections to COCO compatible JSON format. + optional string export_path = 9 [default ='']; + + // Option to not read groundtruth labels and only export detections to + // COCO-compatible JSON file. + optional bool ignore_groundtruth = 10 [default = false]; + + // Use exponential moving averages of variables for evaluation. + // TODO(rathodv): When this is false make sure the model is constructed + // without moving averages in restore_fn. + optional bool use_moving_averages = 11 [default = false]; + + // Whether to evaluate instance masks. + // Note that since there is no evaluation code currently for instance + // segmentation this option is unused. + optional bool eval_instance_masks = 12 [default = false]; + + // Minimum score threshold for a detected object box to be visualized + optional float min_score_threshold = 13 [default = 0.5]; + + // Maximum number of detections to visualize + optional int32 max_num_boxes_to_visualize = 14 [default = 20]; + + // When drawing a single detection, each label is by default visualized as + //