diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..4b47ba4eed6721506482f3ecf09cda71c330ea64 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +assets/videos/apt_exp_1_all.gif filter=lfs diff=lfs merge=lfs -text +assets/videos/apt_exp_2_all.gif filter=lfs diff=lfs merge=lfs -text +assets/videos/baodao_exp_1_all.gif filter=lfs diff=lfs merge=lfs -text +assets/videos/exp_1.gif filter=lfs diff=lfs merge=lfs -text +assets/videos/exp_2.gif filter=lfs diff=lfs merge=lfs -text +assets/videos/gf_exp1.gif filter=lfs diff=lfs merge=lfs -text +assets/videos/gf_exp1.mp4 filter=lfs diff=lfs merge=lfs -text diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f49a4e16e68b128803cc2dcea614603632b04eac --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ORIGINAL_README.md b/ORIGINAL_README.md new file mode 100644 index 0000000000000000000000000000000000000000..14a076351f9a9cb2107c52565f081578aa69c2cf --- /dev/null +++ b/ORIGINAL_README.md @@ -0,0 +1,166 @@ +# Sa2VA: Marrying SAM2 with LLaVA for Dense Grounded Understanding of Images and Videos + +[\[🏠 Sa2VA\]](https://lxtgh.github.io/project/sa2va) [\[📜 arXiv\]](https://arxiv.org/abs/2501.04001) [\[🤗 HuggingFace\]](https://huggingface.co/collections/ByteDance/sa2va-model-zoo-677e3084d71b5f108d00e093) [\[🎥 Introduction\]]() [\[🧑‍💻 GitHub\]](https://github.com/magic-research/Sa2VA) [\[Online Demo (Sa2VA-4B)\]](https://5512470799b6b35fbc.gradio.live/) + + +[**Haobo Yuan**](https://yuanhaobo.me/)1* · [**Xiangtai Li**](https://scholar.google.com/citations?user=NmHgX-wAAAAJ)2*† · [**Tao Zhang**](https://zhang-tao-whu.github.io/)2,3* · [**Zilong Huang**](http://speedinghzl.github.io/)2 · [**Shilin Xu**](https://xushilin1.github.io/)4 ·[**Shunping Ji**](https://scholar.google.com/citations?user=FjoRmF4AAAAJ&hl=en)3 ·[**Yunhai Tong**](https://scholar.google.com/citations?user=T4gqdPkAAAAJ&hl=zh-CN)4 · + +[**Lu Qi**](https://luqi.info/)2 · [**Jiashi Feng**](https://sites.google.com/site/jshfeng/)2 · [**Ming-Hsuan Yang**](https://faculty.ucmerced.edu/mhyang/)1 + +1UC Merced    2ByteDance Seed    3WHU    4PKU + +† project lead * the first three authors equally contribute to the work. + +![Teaser](assets/images/teaser.jpg) + +## Overiew +This repository contains the code for the paper "Sa2VA: Marrying SAM2 with LLaVA for Dense Grounded Understanding of Images and Videos". + +Sa2VA is the the first unified model for dense grounded understanding of both images and videos. Unlike existing multi-modal large language models, which are often limited to specific modalities and tasks, Sa2VA supports a wide range of image and video tasks, including referring segmentation and conversation, with minimal one-shot instruction tuning. Sa2VA combines SAM-2, a foundation video segmentation model, with LLaVA, an advanced vision-language model, and unifies text, image, and video into a shared LLM token space. + +## Model Zoo +We provide the following models: +| Model Name | Base MLLM | Language Part | HF Link | +|:----------:|:-----------------------------------------------------------------:|:-----------------------------------------------------------------------------:|:----------------------------------------------------:| +| Sa2VA-1B | [InternVL2.0-1B](https://huggingface.co/OpenGVLab/InternVL2-1B) | [Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) | [🤗 link](https://huggingface.co/ByteDance/Sa2VA-1B) | +| Sa2VA-4B | [InternVL2.5-4B](https://huggingface.co/OpenGVLab/InternVL2_5-4B) | [Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct) | [🤗 link](https://huggingface.co/ByteDance/Sa2VA-4B) | +| Sa2VA-8B | [InternVL2.5-8B](https://huggingface.co/OpenGVLab/InternVL2_5-8B) | [internlm2_5-7b-chat](https://huggingface.co/internlm/internlm2_5-7b-chat) | [🤗 link](https://huggingface.co/ByteDance/Sa2VA-8B) | + +## Gradio Demos + +We provide a script that implements interactive chat using gradio, which requires installing `gradio==4.42.0`. You can try it to quickly build a chat interface locally. +```shell +PYTHONPATH=. python projects/llava_sam2/gradio/app.py ByteDance/Sa2VA-4B +``` + +## Quick Start + +Our Sa2VA model is available on 🤗HuggingFace. With very few steps, you can try it with your own data. You can install the `demo/requirements.txt` to avoid training-only packages. + + +**Option1 - scripts:** + +Supposing you have a folder (`PATH_TO_FOLDER`) that contains images of a video, you can use the following script to chat with the Sa2VA model or segment the objects in the videos. + +```bash +> cd scripts +> python demo.py PATH_TO_FOLDER --model_path ByteDance/Sa2VA-8B --work-dir OUTPUT_DIR --text "Please describe the video content." +``` + +If the output contains the segmentation results, the results will be saved to `OUTPUT_DIR`. + +**Option2 - Jupter Notebook:** + +Please refer to `demo.ipynb`. + +## Demo + +
+Demo 1 +Input Video (Source: La La Land 2016): + +![Error](assets/videos/exp_1.gif) + +Instruction: "Please segment the girl wearing the yellow dress." +
+ +
+Demo 2 +Input Video (Source: La La Land 2016): + +![Error](assets/videos/exp_2.gif) + +Instruction: "Please segment the main character." +
+ + +
+Demo 3 +Input Video (Source: Internet): + +![Error](assets/videos/apt_exp_1_all.gif) + +Instruction: "Please segment the person wearing sun glasses." +
+ + +
+Demo 4 +Input Video (Source: Internet): + +![Error](assets/videos/apt_exp_2_all.gif) + +Instruction: "Instruction: "Please segment the singing girl." +
+ +
+Demo 5 +Input Video: + +![Error](assets/videos/gf_exp1.gif) + +Instruction: "What is the atmosphere of the scene?" + +Answer: "The scene has a dark and mysterious atmosphere, with the men dressed in suits and ties, and the dimly lit room." +
+ + +## Training +
+Installation + +1. Please install the python and pytorch first: +```bash +> conda create -n vlm python=3.10 +> conda activate vlm +> conda install pytorch==2.3.1 torchvision==0.18.1 pytorch-cuda=12.1 cuda -c pytorch -c "nvidia/label/cuda-12.1.0" -c "nvidia/label/cuda-12.1.1" +``` + +2. Install mmcv: +```bash +> pip install mmcv==2.2.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.3/index.html +``` + +3. Install other dependencies: +```bash +> pip install -r requirements.txt +``` +
+ +
+Pretrained Model Preparation + +You are expected to download the following pretrained models and place them in the `./pretrained` directory: +- [sam2_hiera_large.pt](https://huggingface.co/facebook/sam2-hiera-large) +- [InternVL2_5-4B](https://huggingface.co/OpenGVLab/InternVL2_5-4B) + +
+ +
+Data Preparation + +(TODO) Please download the training datasets and place them in the `data` directory. The download link is [here](https://huggingface.co/datasets/Dense-World/Sa2VA-Training). + +
+ + +
+Training Script + +Please run the following script to train: +```bash +> bash tools/dist.sh train projects/llava_sam2/configs/sa2va_4b.py 8 +``` +
+ + +## References +If you find this repository useful, please consider referring the following paper: +``` +@article{sa2va, + title={Sa2VA: Marrying SAM2 with LLaVA for Dense Grounded Understanding of Images and Videos}, + author={Yuan, Haobo and Li, Xiangtai and Zhang, Tao and Huang, Zilong and Xu, Shilin and Ji, Shunping and Tong, Yunhai and Qi, Lu and Feng, Jiashi and Yang, Ming-Hsuan}, + journal={arXiv}, + year={2025} +} +``` diff --git a/assets/images/teaser.jpg b/assets/images/teaser.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ee04ff4d8b0b716cb688f45da1f82237bab6bb4 Binary files /dev/null and b/assets/images/teaser.jpg differ diff --git a/assets/videos/apt_exp_1_all.gif b/assets/videos/apt_exp_1_all.gif new file mode 100644 index 0000000000000000000000000000000000000000..d8f30f31ff9c25f09f41040e35843fffa95d67f6 --- /dev/null +++ b/assets/videos/apt_exp_1_all.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddf6e915c5f5f00e11136b4342c63b601fd446f714967333db4995c6ee4b797c +size 1106754 diff --git a/assets/videos/apt_exp_2_all.gif b/assets/videos/apt_exp_2_all.gif new file mode 100644 index 0000000000000000000000000000000000000000..8e79ebe5bd3cade1d441b8800b306035d0783bca --- /dev/null +++ b/assets/videos/apt_exp_2_all.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb9a946270dd9d3a1f1f0b30ff55d70abea9cf54bc52499cb07813e80a8f1e33 +size 1223629 diff --git a/assets/videos/baodao_exp_1_all.gif b/assets/videos/baodao_exp_1_all.gif new file mode 100644 index 0000000000000000000000000000000000000000..dd4df79436cf071d48a3af0da86a214d8b74a0b9 --- /dev/null +++ b/assets/videos/baodao_exp_1_all.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e762e253dafb71ecf90d48144422bcd6fdcdf9c6a3c67571ee1a9d0232e32f03 +size 2954305 diff --git a/assets/videos/exp_1.gif b/assets/videos/exp_1.gif new file mode 100644 index 0000000000000000000000000000000000000000..36e0f1125b7fb5d8931c419fdd538158045f519c --- /dev/null +++ b/assets/videos/exp_1.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b63b1465808dbe658761936b61a10f3e72bfc04f0b144a9e9103fcfaa810147 +size 4256930 diff --git a/assets/videos/exp_2.gif b/assets/videos/exp_2.gif new file mode 100644 index 0000000000000000000000000000000000000000..5ea717bf8f1b7c2b26538eca2cdd56b57991736c --- /dev/null +++ b/assets/videos/exp_2.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fad52f51a9f4238106923217e1d60c3ebc563c77117c49988496a67699ead397 +size 3836871 diff --git a/assets/videos/gf_exp1.gif b/assets/videos/gf_exp1.gif new file mode 100644 index 0000000000000000000000000000000000000000..ed052bb47b0b67f26fa27625d3836152b534a1ca --- /dev/null +++ b/assets/videos/gf_exp1.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cb7962fa6d20f4535b07e526c8a65edfcee55d5c2ec79308f98dde24c209842 +size 4821825 diff --git a/assets/videos/gf_exp1.mp4 b/assets/videos/gf_exp1.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..46d88094f81ccf95e4bb729312c224a853ce3f50 --- /dev/null +++ b/assets/videos/gf_exp1.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:272f4246fbb62aa690811e01d5f8aecaac3d157cc01a9859de79675ee5d4f7cf +size 15332128 diff --git a/demo.ipynb b/demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4b082fd5a13adf128ffcc1c51c364187325c9c9f --- /dev/null +++ b/demo.ipynb @@ -0,0 +1,226 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# import the libraries\n", + "from PIL import Image\n", + "from transformers import AutoModelForCausalLM, AutoTokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c053617238304cacab10af714e2062eb", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/7 [00:00\n" + ] + } + ], + "source": [ + "# Then read the video\n", + "VID_PATH = 'assets/videos/gf_exp1.mp4'\n", + "vid_frames = read_video(VID_PATH, video_interval=6)\n", + "\n", + "# create a question ( is a placeholder for the video frames)\n", + "question = \"What is the atmosphere of the scene?\"\n", + "result = model.predict_forward(\n", + " video=vid_frames,\n", + " text=question,\n", + " tokenizer=tokenizer,\n", + ")\n", + "print(result['prediction'])" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAErCAYAAABDzICRAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs/Xm8LUl214d+V0Rm7r3PdMeqW1U9T2rNSEI8DUgy8PDw+MhgI8DgJ4MBG57gMQmLwTJg4Y9lYwwePp7N52H8sIx5lm2MJIRkMBLq1tBqDd2tnrurq7vmutOZ9pAZEev9sSIyc++zz7nnVrWq8Uc3um/tfXJnRsa41m+NIaqqPCqPyqPyqDwqj8qj8iu2uC90Ax6VR+VReVQelUflUfnClkdg4FF5VB6VR+VReVR+hZdHYOBReVQelUflUXlUfoWXR2DgUXlUHpVH5VF5VH6Fl0dg4FF5VB6VR+VReVR+hZdHYOBReVQelUflUXlUfoWXR2DgUXlUHpVH5VF5VH6Fl0dg4FF5VB6VR+VReVR+hZdHYOBReVQelUflUXlUfoWX6rI3NuI/Ly90YvhDUdaTHwqCoCRAqUS5MvW8/caEX/OVb+T2UceP/OxnOVlF1AEqTASuzKBxwuOzmq999y1uHDh2ZgoaCV1CUkI1oikRkxJDJCUQlJASnUJISqfKfAnPv7LiZJU4WSiCo6lhfypc2XU0E5hMBVHwIjiBJBHvhdrD7eOan/vknLtL6HLXGg87Hg4aYaeCqwc1ezPlyn5DheK8EkWJCqFNHJ4EjlfC3SPlaKmsAiwTdFGJKkQUUVDsu6oAYiMowmZCSRF54JycSUIpoIAXtT4mRTfq8k45aIRalN1GuLrnubIrzOqGSsBJwHnhZCF8+oUVpxFWQQHFIzQOdio4mDquX62YTZRK7X0O8CpoSuDsnUmtTUETq2Xi8ERZrCAkoUsOkcSkEaYTofJK5UBEcd4hgCZFnOBE8QpownuH8/a+k1jxqdstq06ISQhYnyuUxsPMefYmSlWDcw5UWKwCyxaWQVkmWAXoEkTAeU8XlJjEZkcSTiAoJH3wnJyZI0lUjWNnD5wTVnMIKwhBEYHxFKpNPE6UnV1HPU2IA5IQWogBUkxogkkFVw4E74TDhVWSEEiKd0p0QsLTNA21c8TY0cUATqnrGWhDNWmAjuPDu+zt7YIIx3dPia2iCt4Lk5nn+rU963+XaFeKdonoIGqimTgmrqXZTZyeKHfvJ+IKnrzZcP1KolPhxVdaVp3DOUe7jKQkRFWbWwRwoFhfSaSkgMOJ0Hi4uevZrQKqRnuSgEZBVAgkIqDRoQqV07wCJH/mrdFPnSBSdh4kICocd0pQoa4drsp7MiaCKkntmZT3kojgnEPV9nTZdw7BdopCSthH6u9HhKTJ6ICAiENjRIGEgioOu7e8Z7zHY4yos2tOHc4ZDVCx9zontogBFQWx9jqEGKPVyUA3bDxzO7TQdc3jJaP7Nle1Q2Pus4ckCafGH1JSZDQoksdEnNWTYplztWvYHs+X+raQL+Gtjc7ldUKyce8n1OWZ1vU2C/kdZ2msquZVJ6ja9ZSUFMl/D89slm2Jf8+j1ZdJEnwRnRdRljE+sI5Lg4HPX9G1j1IyyUdIOBFqESRFXr634kd/4pOcdLBY5QWmZZErlRe8KPeXgZ/+6PNM68jurqNpKlwSHOCcoiRcBhyQcA6SKlEyY1WIwdGhOC9Matu8XoRVTNxdBmZasSMOcYo4a4tTR+UE75RFZ3WBEeh+gpziKwFR2rZj7gTfdExqICTUe1SFEIxQIJKJjiIoE3HggWTjlPIGsg0yLIKLFs15v21bRGWTAfj8XfvtONxRvmmCGBIheDoXiRqMGUchamUUVLExTwORKEAjhkTw5bpSYXMbXUKTA3HElBAEe9zhJCLOxsOLbewEhB4cKZknoKp4cWhSkjNGJ87hcCQS4IhGQxEVVJPNQaYkTmzNJAaCZoTFIT7hFRqE5AV1iiRAbI3kaexVcMJZ4nxeKXNT7k1dQqJn2ngObgpKxYsvzAldIWCun7dClFeLSAhCUiV1xjjLEE1rwXs4mUMk0TQ1uztCM6m4fXtBG4Td3YYbV2fcun6Vnf0JL91+hZfuHPLUrQNuXD1gsYSr+1fxCO/5qUP2qoaqEmRnie5AJTCdCDduXONNT72BpnY0PnF0umJxumLRLmgj1M4TQ6CNc1xYcOxbkgin80hdC6tVZLUQQoKYojEDyWzaCU4cKWbmXZjRsIxJKZGSWwO1TtWYHeDV1sBpSITkmFbCxNu2E0YgwDlj0oa4jH2IEIGgCk4QcSSBkEATiKahEuUMYxG1BaV5rShWT2E2KaVti4MBU+rap+T3jNeBMVd6MCEI4gRJxiRToTfiemGprFVG73G5/ymmtbVp38+2UyTjGR3GcLz+rX7bWyjELCyM7x2GOzEIPkCeO0UMCJx5tyCZ5ogKTjMScyn3dZ32KXavc6OxLJul8FJn62ro96g9ZLChMOa966Dj/LI2Jq+hjOsxPnS55153MKA6WlvjIpD5AV6g9oIX6Ii0Cqtkk+J6pGjMxYvQiO26lIQ2Kr5LqAtIZghOjaB7kbJ+jOgDQSNJba5TzEwXcME2ckGQKUByEOuEuISSkbpiDMbBYtXlRb8uqjkHVeUQDUT1tEFok4PksubCNlhMSqcQEVLeIMmBJMWpUjlHVEHFGKNRg4uZysMePVEkCcmUSQDvsnSd+6aAE8WJ2MZRJUYlJAgpUjmIuY6u60iabMOWTZEJqSBojLTRiHGRdqLAigQqODGylPKGF4SYTFNgm0Z7aSulIpVk0CAFPBjwQ2wsvQoSE5oUlzyVF0JUnGbibuLZMCa5vVmxgSYjfIWQO4RKHI3Lv5lsjct1KZqJrRjzGU3JeONvmyvVLK2oQzWxmCe6OVQ17O16JLmeWBpEs/VhzwohGMgsjLEww0kNkwnsNY6Dg4YX7q1IMVKpY3UaaVtPVSshdty503J075SrB7ss28DiUJnPKu68fJt7R3OcPMu0rjhdBO4f3S3CLJPGM5s17O7u4WSHe/fv8cStN3Dv3iEvvnLM0ekJhDkpKUEr2qBEPMtFxAXHtIFlF3n5jo1RCNqDNu9NQ5BSNMZoo9kTF5GiabQ5TUCX1CTsfnAdZK1XTIkuCcsotDHfW8Osoqc3IiBZE5HyFldMi5REiDjUGSNNmmwhiyfhcBkojMHaaJKHjx5trgPBwkBN2h00db106hwxxjW77wAisqiVBilendEvXN5XKaN1BNF+QfV7ttC0AQhtW69jBpmFiL795zM5o2UDYxgAWx6DQjN0pKWRdVDhRPLQyTqzFpsrweGK6FT2eSILdfZ+kTHTzpqTESkvNLC00zmHptQDnR7AZJptYyRZc7fO6DeB/toobrl3fN+2v88rYy3Ng8oXQDMwYpajTogYEfMK3tm6VAFNLqvxyTPhQRJOE04xdWYFOFORqQoxOWI0yVMRY6hAVEPC/aw6k1yVRAhZ+rKd0su+CTNtRBU6ARczcxBBcFQiSFRIkURFEkO3xksHtO+dAdIUIiqe2CmtRox0tKhaG2L0pGQqx9CzFTWAoBCjoWMdr8rXUM5bVIJRdHVGHAb1H1kNOxAvkxSE1AnRCSoRvIGBmIxQFuIgDJJRQq1PndJ5JYi9q/ZCEkE7Ax1GFzPhx4haxJEhGSFlc4mYil9ClizE5q68XURIxAEcYgw7VWpmGNSk60wB1GVmUYBjcKgkYkyIOGLM/Uu2blKiiFj2UTQ8o/0oI4CybQ62lV5Cw9EFxTXQRmURWyYHICtIwdS/KQEqvQoZDLBIHkdRxQtUIqgXWhXmC6GqG1IIxNOAVo5qUnN9f8qNm3s8//JdXnplyb2jI2oRlp3y2eeOCBpIzjQux6vO9l4EVUflIUZhoQ7XCv6kxdeO09Ty8x//LK+8fMq0clytEpV3HJM4DTCdWf2ptRVi6nXNjL8wB4hq0qltAxtgI5JlrHRtBBUhBCV4h9PUa95Snt8IdEFJyfXS8SLYequ99EzEqQGSiBDVETOYUIdZKfK+LOtRs0DSS7gb4E/VGLEbMc8kRiuEsaawqKEz0GPY/7rBDArtWWM4W8wFqthmypoVgKgxo5x8r7P1V1TugIH2C1es4pwfMbXt67to4IYLZrIhA+dM0dFMjwstLW03pYUM22vUxwE8GU0RsT3qCuDQYnIZtBnbWpl0MC+U3zcl/c3vPq+XGFMPvuj38HrZ1JJs0oLLahUuKpd9/nUFA2uNypsRiu0t4sYoNE+eYgS+00R0gmgAyEhc0ahILVmdbURCo9mUbD8qHlOfRjFULdlm5JKh/JjRdkpCjI6IB+fRGFAR2pSYVJL3iJklxAuVy1oH53BVRQsgHTAI7UYghTYkDmZTnnzyOteueqYTT1U7fOWom5qqmuCrmk8//Ry/+OGXOFp4lkGJCdokaDSVZipANm+CzcWUMnJ/rTAhDeTJFnSWHBIOlyLOmcp26oW92YS3vPEKe7ueujK1vfNGyG7fW/LCvWO0MyakqNnvnFD5ilu3rnPlWkXjEhGHk2T39TAcYwJq2pwgcOfeCccnC7rOmHFEqBvHradusL9X27yU9ZWFClEDISYMSb/OKjF7ud474vn79wgi2TRhAA+puPnYAU++8RpV47IkoawWgU998kWOFq0xrGRzFVSJmJYrqY1jT25K/0dlvPlTpoNj2rlOILLUhtLMBPXKzlQ4uGKMp+s8R/cCi1PbRw7AwbXHrrJ/ZZ/nn3nWnFlEaAN088QiKfd1xRe96w3ExX2Wd4/ZPZgxqyfUtef6jZucLgN3XjmlRogBYjQQtnPlGrFrWZ3M6TqzXTXOMZs4po1ybV+4eaviztEKXMPN608wrWp2dybcm6zYE0XbQKod+7sTVhqhbWmAlaTMRpOBv2jSHVI0MkUUy2ujX/EFtLqe4ZOFhJUqMTOWpFmtr6ZRKn4cE29z5DKYCgkzDZgqysB+AjGVGEkGoCxpaEMPfNF+Xn1uqzpwWlT0WYIUgWznLhqlwp56WXXE6IZlVOz0qfS01xClRE9LNa93MxdI73cEIElQjzHTAt4zYFY0g/ARiCntpcxBpjk6rOeiOSvPbdra7Vphdm6QYMfYgOKLMBZGykwPgEXLeGcZ04aucP1B8zDWrkjRICTp97SNvRsBTFtzRv9szgUMQAEqxkv63SnZDFO+e3s2RjV+pOPZHMommBiDmW33/HKV1xUM9JNAsayXH7CNkO23SbOkJlCRcN5RJSHGgZQmlKAQxOzETe158qnrzHYrJhNH0wi+EnzlqSpH5b3VU3mcr6ibGuc93nsqX+OrmrZL/Mz7PsRHP/oyIVR0HezOlG/+pq/m1/76b2Fnd0Yzragqwdeeuq6pqpraN/iq5t79u/zV/+C/4Wff/yz97sCYRGprftvv/Ta+7Q/+XurpHs6B+AoRj3cVIg2I8vQnfpo/+P/8N7j74mF26FHaaGpiM0WnEeFjjZgMLk/nl4vs1bqxYRBB1Zt5o0yUmqwTg+Cdwznhd/7uf4Hf9a//HurJ1OZWzCanKrz03Gf5t/6Nf4sP/OILhB4dmwbo67/2S/gT3/vd7F2/iWgYCGC2fWs2FRQtonemefnQL/xj/sx3/mVefH5FTMKOU37jN7ybP/UXv5uDW28wOVAKQTIRq2x+6aUgI2ZOPCrw4Q++nz/zx76H42fv48TmThCuNI5/9V/7F/nW3/178NUEISI45qf3+J7v/l6+//vfQ8hgJY3Gv/GOqOa0Wpa4Q4pa6fz5cdoT7UytKJqgojpN2X40P+xYHjuaiaOqMnEeCVvew+OP7/EN3/gVPPP0c7zilFYcKSSTPoPd6CRy7/5tZqaKYRU77hzOmc0annzT40xnE2a1x3e2L1EjHC5FFouOLvssVJVy64bjDY95dlxkf6J0Yc6n7ySO4oonn3yMvf1dbly/Srto4f4piDAPiixaujYyX0Uz/1H4zCCRAeYv5LKtX3v1Rw+yhrU9fBZHri4oq8p8ORJi2iyKNtHurTNgE2dafhWhIxmDLMxGirSpAyDp908Ze99fc2WPMjAiJ27NZFF8XnrmphRu2QOPNe1cv09T/96BYWi/ZmwdA/gs5Wam1zO8rHpPQAYURdROWTOgWZtUmpX9t0caiUGrUdo2zMX6te2l0J1RXbYyWXMilOJUTPbtsTEqprcyE0VjuU0bUiZRR+3tnWg09W82gJJG2gBMSyhDe208xvMx9Ln4aIhgWjKc8a9+gM939t787EdpC93+fIKE199nAB1sQD3G035ikyoSUh5Ex80bDf/8t/5GVlXN3/6+H+L4JGTQIKgTlkGpNPHN3/Sr+VPf+z00+wfZS9ybxO6M4bpiDxMxhCcekfJpzjiqgf/f//hf8mf+2H/E6UJI4rh5Y8rv+Pbfzjf/5m8H10CWyVOPHLOTgSqhfYV/9A9+ip//+eez+jCiHsQLNXBjVvHYravI5AA05EXZoWlF1AWalKv7cGOvYlaZd/AqDVKu65H4tgVQts9rnJ81NaL2xE81b5YefCgdjsY5vvhL386tt385yJUsgmuvLr/2+FO8/R1P8KFffKFvpQCVF97x1lu89d1fQpq+GU9ExBkQyGObesKQct/MDfTL5ISD/V1C7GiTsOsT73jrTd7xZV+E7L0FGXkG00tKMN5Lwx7yJIV3dQuuX5nx3POH/YYWHHUtvP3tT3LzyTeAzHC5VQfdjCefvGlOi9lhsEguPeFMRYrKUhPrQG6zeFGmB8JkV1gdw+o0oaGXg0rLCSGxXCRicASFdpn6MSr3Vg1Mp47KtXz8F36Wk5OW2sMyRQN2IybrKscqOdqg7E9qghMScHy8ZHF0ineJhLIIiag2LyenLU0MuFrwdaKqhJ2ZZ2dP2D8QDrxjfqQ8f1eJrqY9bXnpxdtc3a+J3YqT+YJJilSImQ+6wGRW471jsezwDoqpanBdzXQi2bWUJ7UXLGTYGZl2Uzzfjfg6gpqzcIlOSf1KkV7iLQxZnbN9nvfcwFylX0T9/kAzyNRe87TJvG2wM3DUlJmpNWZ8v+o6kTdgWHbOIEbZM8U/JPe6CMRZ4h47A4/BfkTNj04GJrvOawZGL+KGEdKe/SFuLGmXIRkDgSIlD+aNTeY1aG/GAk529C4q/BG+kxxOUDQGgmlnKbte0tr9Y03cdrAyjN2a9qL3t9AREM+tPIcBqw4goF+r+X3eY86MUYnxfBrwsE6EF5kRCki8rN/YF8RnAFjjZ7o2efajw+EdvPnWLr/p1/0qHvuiL+LHfvQ9nJzeNlSuQpX9BUQtdO+pJ6/hD/ZHdVkYkTGxQEoBSNkBLKAxkVIkabRFHVp2w12e2FHuxIB4ZRIDz/z0T/Cut+0Y2k8RYiLFFmK00JgYAOXe/WM++4EP4fJCciK9fbqTxA/8bz9AFV7myp7HOSWlQNKOFAPL1YpuGXnmuXu4+W3euC/cXyYkRyikmNWa/eZxa5t78GNY3wD9cD/AXnVe6ZmMmFRm77BNWAvUknjhI7/A8uUP4Hf2sypNIIeiLu+9yOruEbWDVs1Rq3LmsPmZpz/DnU+8h/033Sa6Kns4GYHTlHJYU9ncyf6fhPmnPsbb9xvuziLzCDMvPP/Rz/DJ//PvcvXNT1JPJjhX5w4UzlwYSSGsmdGoQ8Xx9C/+HN3dQ6aiOdzUwt4Wq8D7fvjHeeeTV3rNBwrHR/c5feYTXG+UIx3MAykTZBmNeSFavQlsy/yUMZ7uVEz3svu1Cu0JpOQREiIJ818wRmjEJVP0XkLOzLOzcKL7UZlcTVy/UtO2icVp9kMZNaP2njc8dpWX777IXDtWRxHUEUPi6U+9SBJYtebHorkf5qFtYLCaJHCJagLJKXePIi8eJu7cSXR4NGstnvnsiyxOT1gsVpycdGhdUXlFxFPXQlPXLDWwWAZcZe8vzCblfgkFWG2ykPGazZKrjoBgXsltSNS1+YIkJJsibbhD1grgzR+oCIwW1udGTMQmKyUDB5qBI2ck9PXvA6HPkN2Z+a/UKYNBu2f0gyQ5JvybKuRh/Wh2Yu3pxAaz7R3ukByJWfaHjOoaOeY5N4yRjF5oYnDfR02ao262MagCYtbD+daDJHRAzf14DX1H1iMGeto3Zvaw5o/Tm0g2aNx5oGBc1pwTndt6j+ow9mNTXgFza9EVYkKEr8x8ZdEt2UfpkuVBzsavtYhestbPR56B89QesraIQDXhRdit4c3XKv6pX/MlvOPL3sl//Nf/Pi/cXiA55rauYH8CtQhvOpjyL3/bt3DriT1ILaSApoimSIz5XwjEEEgxWN6BEAgx0oZIjInFMvCJZ+7wzPOnnKxAI9Qebu4KTz0xZdoIvlJqD2CONtncRBLhcFXx3o+c8sKJ0uWdU4kxqysTYddHHt9v2JvB3p6nrslx7p4QI12Co3nicJ64cz9xZxmZB2ij2ee7lKWhHqcX1dqrL+epqgq59c4IRyx7EnPqaxzsNI5buzVvueb45q9/M48/tsP1G4+xc3ADXzWgwqeefo6//n3/iOfutazUbLUeofaOx6fKP/MNb+PXftNXMtnZs7C3nAsipoAjGs1h0BKkKBwerfjYx5/hp9//Ue4fB5J6pqK85YmGW7dqnnzjLa5eu4n3vpeOysZ0VKbZ0Q4UnCTENXz0Uy/yQ+/5JPdWEKJt1CRC4xxP7grf+LVv4k1vfEOP7lftik9+5kV+/pde4HAO86Ask9Ll/V9VnlWXCNBzL8EhTkZRD8OYawZ60x1HPYN6AjEIJ/cD4hzTmUV1nNyPxChUdaJphNUqEUNRc56dXwfcuOq4dbPm+KTl9j04zSG6hpOEphK+5J37rFhweNxy/34idoJTU2WXsC9XVNkYY/SiVJUy23VUU2Vvt2JaCcvjlvuHSrcSfG2q+DbZfnKapSRRGic5F0OZX+hSImr2FcggR4s0KmbrPldmGuOsDcxVQNjEwf5EIFmES6Q4jlpUkGbdrtnLyetmJLn3onf5KEBvDPbWmUkvPYus3WO8Lr87+ySg4w6UMlLvu8EXouyNApKMYWe/ovJL0YxobodgwtQGIDxvQMVlSbyHX4zGgmzCyHQka2zWKx0k5MI8e03ZUCVFWTz4ROTxTyXMd13TMp5XLxkcSxFeBnBczCTbojP7dm/r+UWSuZR+Wv8K6EDpnattjGLft6H/mWar5Lw3ZY0UwLTehvM0BRdpENZ/M3PWMp4zAKPyuvsMAH2Si9EvoC67rJm9sBJovHA8j/zDn/gg/+A9H+LO8bCwyiITLMnMK0cL/vu/9SPsT2HawHRa9aFvaiDYQktkGCgnJYGH0nWRNsDJPOGd0PhECTw4apX2hQVNDbOdhsYr3qfetunUJMLjTozolT5lNZkTpa6sdydtoEOJE0ejmj1PAzEqnToWhfiK1VvlHAMOxQOdWqKVWBb9CIlujvNFyHf8+7a/e60mlmCoxNl6B3X+V+WxvHvS8v4PfpY3PXmDp94w5frju1RVwonjhdsrFl3uUFGlZl3vqnN85BMv8tQb3sDjT02zhoW8QWyukppmNcSQozRqutiws7vD7syxWECIkSRw+7ij2p2ys5xRtTNqb333vkKJ2ZO+Ma2QqoVHqkO8I2mdJUWrqwD2hDH6F2+37F+BZmIhkDBlOm3Ym0C7srXinCUdSmD5BlxOnlQIJyPP/tGUDLZLWJ5G2qVjuitU08TBTWFnBzRWHN0zRy4vytV9z/VrE5577pR5gG1TXJybUvKsooFHo7NFf2RtnO44knRoCkwnwv5ehZcJzlUkjUQR825PcLJoWSxaHEJTO6Y7ntluzXRaIyjL+YrZdB+RjtC2zHZnuLqijRbx0LUrHJabwyEGvLLE2IXEfBmZLwwIGLNLfT96Qfe8otu/j9d1SIllELyzEYrm6m9OxGL7WGy1GCDoVfiyVq1kxl2AQLm2aR5Ym9+Nv8dOcfnC2nXnhrwRQJ9EiKLByDQiaXEsFoiWo4MezGheCVmjo8UZbqi6Z9J9W0yKzwOSGfQAaAb6Qp/op6yo0v510lLAsM93FSI5aG2KNqLQdPtVKE6Vazb+jTIawaFdawvgfFr4IO3omolgg8ZKHuPBiGX/Geos7xzaVRokjpwYzXzhimf4VrpwQbnITDDu42XKFyTp0LCoBpSdFUdZ8rSsZSKWGfA0mlQck0GvHo1jUuYk2xeXCgQhVhZuJqJItLAQT7YyOiPI3pn0URbhKppzlGZGIOUFYhkKVyk7lwfzmjbtdQkbss04D9qHGvV9zd/KZuzMjZl5l+iASo3gpARBA6ugOYtdlsIyMvZ5g0dNxDxgmgYP1e1qrIsX0zYi1UsvWQdhzk7GNJw4vCRqsdA0h9KGlq4SVupxO9fxOzfo0gSNDrzQiid5Y0AFoRcfCBXldBl4+d6CnRtZzYqFkEl2HlKNObwPcBazHVJk2a5IJLwz4pac0JJo9q8gk12WOUqkckK3ak0Fqwnvurw7HarRAEFMdCkSdVhXPdACQlSOT+fcPT5iN+ZsbxG6VYt3Sl1Bg6LRNnMSIYranGmReOizuW2X/vIVcaQIi7lyMBF2ZjWLk8DpUaBdgCZLohWjsFrqmnR1Zn6LF7arODltiUFpOzM1iFiWvf0rjps3hDYuqWvY3anY35myv3fAdHcf1YQ4j5caxPHyy7f51Cefo+1AKmH/6h5vefMbaKqa+ckRJ4d3uXn9OovFnOOjuzz2+E0Orl4nqpBC5N7hIaeHh+zv7zKdHZh01C3oYsf8dM4rLx6T2kizU5NUmS8jy/ZsHx8EdMf3DH/bHgqSXVoySHMyeIEriqZkXvWjdVDmq8Sd93KhZu2ASJ8lcEygt4GC/jNXsB3I93es7cvyW5/droT+xsE50djKIKEXn6xeS5DDsMFRet07rPYCU25/YVK5rsExruS3SP3bBLYt6TJ0wLq5xSHFV5gieUl27jVgMPgZbEu8VOZsKLo2RsOYXo5prtV0geS93q0Mg9IQfXHxmsz1J9uD3puAFKP22ovNx1+PSAL4AoABWwyD5NevBhm2nnPmhJVUsyRsEpeO0TjkGFionamFLK490UVBgtUjChIV72yblI2sfYooW9Qxmkex5iQcYxqQ1EIWJQkhCi3k9LZZSsxqo1XQLNGMtqOaPdmIhuSsZMokGJCI0ezDIadLDp0liumS2djDCPn3AMFEgxxmJiYFjFbQRWj3Ig/VTTvf4CqUJSZJprVxDi858ZAXECW0K5wkkq5ol0oUoZrUENoMWgo8zkBGQJ2ji4HF4pDF/A7kGGfnLNOLxpzgST0pmV4g5LTM8+UpMUSzaUrKMeiCSCJpYLU67QGCpoiT2rQcsgAsnauqedKjjrZb4VwOOyy+C1nsU5T5ouXo5IhVFy23hDrm8wUpJVuvzuHS4LCT1Bi3rdeRxNVLRIWLb58PjRbqenSv5eS+oDEzATGp9f5h5PQkIs6hbgAFsllXztK3XCXq2iNVsvCplGgmnmvXa7x0iHiqKlG7xNHpnFUbeNOkwoeOajLBTSqUgEsBSZYYLMZI6CKh7VidnHJ8eJ+uW3J4cmL7oRNu3z7G5zDduEqsTlcc3jlkuVry1Bt3mTQ1i5CoKkcMHbEL1F7Ym3jTfoljFQISpc8WOHbCvGg9DxKq5syARhP6cECMHaY8Rz3DdQ6iDnSAkW9Oz27XfyttMSZ2vm+INS+HJVLq2yybz27s594ngIJGBlpV9qwM42Q+F2rgJ6ujxZV6DVioxjxWA/BZe+eaxGoXxrMxqPdHwtCIoY61l4U+xhF9KSarsn82gVOpZ93RciRy6+Af0ptRNhq/SRJ1rX2Xk8I321FGQDXlaC+GddqvzRG40uHvHgw6oXIWJRRDAQWDlmAbkHkYGn/Z8oVxIMzhYpJBwGC/ylntRCyljJakLo5OM3jMa78wxZJLIDu7k6JlXBNnC95hiSZMq5BwabzYoXiBa7K/u5DDGzFGbOjV8tk7xXwQHDnVrNnvxAuapHceI9u4EsZcklrylMYLV2/ssHfg2dv1VA7qusL7CvEekZq7h6d8/JO3OWk9q2ja+S5rDpSci51CA0aalUsulvMQ7xmiCpDVsyWvOsn141xnDQsJrhzs8iVf8Tbe8q43s7O7m0MOPVJ5rnZzpo3r6yzBO06F2dTzzq98K2/+yndy5eo+ZNLoRCxxUap7/plIkJQ2Bu688jIv3zvipK1YdEobhNlOxdu+4i285YvexO7ODpV3eCegFn3ipMaJB+0sJ0Dsem0B0rCHIO5zZibAEkiogqTEzSev8s4vfxv716c03hhh6CKfu33MYVtlvw5YRaXN4UcFwCXcqOdjie3iDVt5YWfXE1NitVDCcvCAJ++FELPpSzfrK4ZLpfLgYmTZmce/d1B75cpVTzOD1LUcLuH61Rm1rEhdJITEzs6UdhE5vneH2d4eV2/UzBcrnn/uDm2b2N+fEGLLajXnM5/9LCkJoQ2kLnB8epfpdMrdeysmjaWnjqsl3XGgaxOroFRe8S7RhQWvvHIbXzfU9RRpVkh2vLBzRAbGaOFw60BgcLI7S6TJ4MnAWtb0mOqmZ0NRTVCw+U7mAOtMytCYev+OMUF247rYZDLS47zxZc3aKPs+2MFl4/ltZoaBkWQNhPF1swow+A6Qh2erpqFITpnZmlo69vUXx0Sy8NEnCtJ8v8vAShkxqREwc+v90PzOoSlFizmKxujBSPl+FgCMx2WbP8b6+3JdxYGAwhcu3msXOQeeZ1YYr7VUhL9zAGA/thvjUMCZiAkUIlBVkrUE2o/P5dp73nsvX15/MJBHuVdzwYZEY3+kcmsyu7zmHSAZABSGGCCn6FXqqqKqhcnM0dTmL2C2GUG8DU7ls3rJOZx3+KrCO292Zed5/rm7vPTynFVX0XXKtFLe+Y4n+bKv/BJ2Zw2znQnTnQlVXVHXE+pmQtNMaSZT5os5/9P/+Hf5yZ/5DKuYc9mLQ3M2vm/8pq/h93znH+Hq9cfxlVB5S7zjXIPzFeIrXnn5Wf7EH/6TfPQnPkM1cdRTx8lhi2bGbDHOeZzG6ou1IV5H0w+jSh2et4UdKYDM0q46EVZRcSSmlWfqHL/lX/qtfPt3/CGaya7pE3IKUMQxP3yJj3zyc7z0ox+mj+cWYeIdX/2r3s2//Z/8e+w//jZ8tg33lFQAtQQgWsK7VAgaeOZj7+WHf/TP8PGXI20UGpTf9C1fwV/4y9/Llcee6hPzFIxp0ltWP2YqbaYmA6JRPc995pf4wEe/i8MPP4tCzmkh7NcVv/cP/Cv85m//dqjM+dAB85O7/Dt//t/lJz7wY2YPpwA0Y8CQtTo9ZMvaHBkHAY7GfEQkzRMeJo0lLlmcRItEjfTBFSXOW9X8OMzRbyA4VW2+BgcTT+qU05WAVEybxFM3dnnXO3cJacULL8359MsL9nccR62yWim4hquzGbWvmOzuI77mxRfvcOfuEQB7+w7VjtAlnFOmlXBlb4+j0xUvH95nmhwnizkxCreu7eMqSCGgGjIRhZ3JlFs3rvPyvTu0yeGWgXrXE1WZrwyQiTjLDKo9be8lz4sYQ7+mBcyBLDOZ3v5d9o2ZImNmji4LD1K0bSJrDGH4FNMw9Kr1sm/o39tL45jQsMlchNSfE1Cczs9XTQ/XixNgoX/nlSKtr13rU5lnTUevms8ag8KUk2bcUtbkpvZiFEKnjAJ21rUk5V8apexlVJuWbINnwM9G7ze0PePva8Anf5Y9J0gWHtfNNmfNo71yYWjjOXz0PBNQDxJyxsYYbe69Lwy9AIghZ0Q/Fj0gyloCcYSQch1212U0F9vbe/l7f9nBwOZmVRjyrhSP0Z7VmTTdRWPy3jlmE/jitz2FXNnlF3/uU6wWsQffIdmpfgtRbl7d5V/+134nb/uSL2I2mzFtGirv8ZXHZ+nbe0dV2XdX1XhfUeVP7ypw8OP/5w/xp77zL/Py/SWq8LYbM37fH/h2fsPv+L1I3dhkCgzEJGs4EIgnLBcrPvALf42wtNhsTQl1wsxXfOk73sS7v+rL8ZObRtWLnKxlYzgenyTe/OQVO/mPhMcSfoQ+jTJkD4QB5VtLLoUYL4MW1+arz2sra5oJ1KR7qLj1xHUmV68jbh9x1i+XmfdeDU88cdMAXUkUkofr+v4Oe1f3qGYHQ0IeCaM+DhqFQnSdKjefeIKd6Q6nq/usFK5UwlvefIvHnnocPz3YGJuUv6dck8/EKFFCwmp13HziBtcPplTODqZSFdQ5aic8fn3G/rUJ6iZ5rh3NJHDz6r5ll0w6UmH3cKZ/A6P2S7/zpV/HMpawsJ9TErq5Qzy0i0gM2RM835eytkYcNI0HIk6g7cxUduVKxVO3Km7uTXjmmVO6Q+HwKCCqXDno6FZHXLu5z8lph+qKqt7hdB54+c4xlUYm4RWuP7ZHPZ3x8p373Lk/59reLgezivuHJyy7BOo4PYmExQJZReppg/PQhUCKymxnwqpdgquYTiYsZGknOorQdS3PPP00NJ6r1/bpTpccnixYhmRaltMOJyZd9nBqK7CVcwm3+boUX5XUr6uhiqwN6JmUHxJ7Fa/9c8H04IG3TYJUKbOtaxklzTEw0z0pDGKdyRjzPKsaXjtrINcp2Yw1Vor3K22EVYoQIVok2+G3cdv6OntpN9OMsqjX3jMYwMpzm3VtalXOjFOv7loHeNvu3VZK/W4c1WCVZM3F2Xs3y9n1s/bUxvvOznd5d0wpZ8ItI0M+UKu8RIexh56u9gcuqdElEJyn15psgs71tp/vo7K9b+eXX3YwsJVBDUqdAc2N46Tzpk3AFz11hT/6//rtvO3rv4bv+Nf+PB/6xWcos6zJ0pRq5Xjq2gH/4m//zTz2jncgrqJsGBUdreGU36lFjLP2lTamjne89XEeP2hYHLYoyn4F3H2RdPhZpK7MnpwiGiNCQnMIHAqr1YoXP/0ZNGRVkBTbHKxC4n3/+Kf4uh/5fg6uH4CLpNiRUkBjtIN+Arz03IucPPsMb7wqHLeJVbBjkE0lOCBIOJvj/kHlVdmSyuLvqYGaVkXtpMYkgZ/9hz/G13zp25jsTG0sSho8J9y/e8SnP/QpazspgydHRPjgBz/JL/zg3+Gt73wzgpJSB8ScKc1U+4Wo2Xjamnjm4x+hOj3m1o6wTNCI8vH3f4Cf/v7v4+qNK5bOFSXFDlKXn885JZI5QlpQQ0TU41CefuazPPv0c1nlZwQ7aeI0KD/wff8b8eg208acB5MIx0dHfOy9P8X1iXIUYBktI2YBijoSM6Qn3gasiutqnhU2CU4WlnjlJTtIuXfaylIONrQlwSV1rezOsuNhC6susbvrcD6wClLccmk7IzSHxx337sLe3pTVPJKiMJvt0HYtSU/oonDaJmbLFaujBXfuLQgImhKhbVkuIvOVrQMFIp77pytk0RpBS1k7IpFFq1y5eoDGDrfTIKcrrlzdZTKruXN4ZL+tAof3T7h3HFl0JQ/CyE14CJwfluUZutJzq36tSx5I2ydnJciBadqVGBOSLNxXMkgYbxljJoVIxzXmsqaNUPqIDZd9ksrzxrwNCKbse0FPyAeKuC0yoWfSY/C43hlrR2HeBWTY7hpc/XIq5/XIALu26ainffOG/hVwYyk7tNeUrJXSvHOkcKs7rT2w6YA59H874Fqvd32MtPjvSJG8H0wsh35u0kldu2dzzsvfJVKq15qY69EgALnNfgx7P/uJY2YDu+YrMz8XB9ANMrEVfF00Ng/sv17yzs9HnoH+pRtS7biomPTnBCaV8O6npnzbP/2r+cpv/ga+5y//D3zww8/3J9A5FXYnwrWJ44lpxe/5vf8cX/k1X0wz8ah2aAykEAid5RaIsSNqJISOFDr7LQRiF+m6juWq41NPv8B7f+ZTHJ8IsVP2a+Etj1W88923aGaC9xGLUjBP9JLytvEVhwvPD//kS3z8pcgimVe5d3aq4vWJ4/GJ8u43Nhzsw2wHmsZBMik/RKFtOw5PI8/dDdw5drx8FLnXas6YZmGLsQg4KpnB2ojC+ZP+6r1RzeLtxKRQIwxGpCsHswau1I7HGuWL3zJlfweaygicc4BT7s8dP/fRBbeXFrGR4y/wTjjwyrtv1rzllmc62dhw2CmCCDn5TD68SeHoJPHyvcCdU1gFJUSYeOHmlYbdKTSNedw3lfQmAxFjUkJ2fswJIirjadxdwQee6zjqlDbY3Kma38F1Bzd2hYMp1JUR1i4qhwvl3lw56WAeB0DgvCPENDpOuaeM5qCI9qatAfxuk3yLpKSIz/40FVQTI3Aph2Be23fszjz3DluWnR2UcvN6w95uQJPw3OcS945LHn1hWsG73rbDW968x4c+fJ/nXgq87a3XODmZc/vOAodw9QB2Gk+7SswXgb29KW9/2xvAJZ7+zEvcvrvIPgw5AVE+2hW1/uztVuzuNly7usvjj19nvmp56aXb+NRx69Zj4Byha1ktltx+6Yjj40SbvJ0fgOLV0pDHZJEThSltc7ezoTUH1zKO685bw1iqlrMCSvz9QGWLFFykZhn9K8/a7dKfczGW9NaYgy/nDAyyRv/FOpKBdm6rmJRcNIXjfhWGvl421kpmZH17lRwCm5/LYRCiZ8fP+mCgpfhIFH8MWPfaLxBt8Hw/37nNudxPiuTLKBQxM+3xFG08f1nHvvJML7nklvbzsWGXWzc1lOcLUNkEA1u48Jn3jttcJPm882XkiC2SE0uN6iymko02j9sWUzZbpuxnsoWen0/7jX78E5dnYCgmIW1OtgCiFv898cLBRFgcL/nBH/xJfuj/eB+feWm1pg5DSmhf4mi14vv/xx/kx37g77G7A7Opo7Ig3P6oSpedCs27vYQVCiFatMCqg8NFohZlry6haXD3OPGBD36OyRR2dxxN43EaTeHsTV3oRThsa+4tgp2wSF5dhVCirFBeOErcW0auXqmYTDQfVmIW8zY6TtpIiwwnH5J9HtScGVtKkhazb8YNIPAgxn/Z+/LNiJTDP7PTk+YTGFXNfi3KUYRPvNRydWZn11feQhAFZR5rlimHJDHSMKgS1fPCcSB62J+pOSU6C/nzTvr9UmyOqB34tApKlETtbaM5gYDwymngsFWmU2gqsWOwvWIhhc5OKlQLN/QCmsocCsvkCIOsuDYGS4E7q8RShKl6i1EXj/pIVSWaApSwCBAVO9WuHJ7Vn1kgAzTIQusa3dku7Zq04L2wt1dZXn4svWs1rQzUuMR8GWhzRIzzymLRUrmKGJVlWyi3sZQuwnMvLXn5zpK798zXYLnqCNE0Zw6lricsVonlPFAhVClxenqMn3rUKfXUEyO0q9j3Z9Z4mtqzXHaELpLaDuk62tWc/eku3LhG1y1wmeqIRk5PTlm1KW8VU487NdDlG2GxyoO0lpluO+ErUtVmVMxaGdHeYfjHiXyGMDwlq5+NMG28a8zI19tmDno25imvB5eZjJT25+tojmBhOGSs5BkxZjnY7c8vkpVxVqFJmZqPaR8BD2VQnYuOHOus/jMMuACLkmq7MEsZmOf6mKw/XyTk0gfNdLEHPaOymeXvsgLMWHtSohl6zQgg/Zk3254Z+rHeFz3n+7iv238vB0Gt/yr9M8OpjANAQhlFd4zDSHPEkzM+lcq60bPjc8YBvAdplwNT8IVMR5xLj6goSMqy3jWV0DhzXru/iiwXkWU3eM4Kdqa596ZGrx3Mo3J7EThNsKd2ep1SUKolz/Hie6RtR8+aV3aH0LaRNtqEOQH1oCRWmFQao8Ops9DFlJ1jQjKVrSgnXaTk1ymoVzHfh0ntaXwkRGW+UqoVtDFmlZAdapOAZaskvB0lWiSTzPDs6FllyG8uXFYF9rBzYt0wKdZ516urvGTHTMgABVCLX182FS6RY+xtDLvYBztZXzMHFLHMYTEpp21EKqFxCdGI9+bhnYoExjDnKoAXqtpRdckkcZGc1tWS/yRVYoTKe8Rn3wbNjlcaTSrP4CpIohJH7AFHHoOi0xOlBHRGteOxzQKS8N68/psoqCuhRZZLwgvWh5z+uEChMm7ajy89nT/Xv0PN9ti1dihQzCoh59UyWIrSNC57mRtYPFkox8cdDqELVpfL9uIE3D+K2XxgkTu3XzrCVbnLDpz3NFPHatmhSVgsO16+fY+dgx1WXSCkSF15QmvankqE3cbTVELXQjPxPHbjCo/dvIpWSrda0h4fUk1qJKY+bHNnd4cYWqo6oEslrkKO+8+H+ah1dnBuv1hK69Ww52gGKMwQHU7e6w03wzMDMZV+9kr9RfIzQl1OrVuPakABVxi8ZrBj+6Jva3lbjohYVx+vz/9m2b7n+13Sg2cbiiEKod9DhQkJWbuTNurUta/D3hgLE2Mpdvu8xDgAgovKRTTsPA3BuQywgA4tY7tpdih9YDRno85eomyCiPJ8ATV25PYAQgaAZCe69mN3BvOsC3flSjkW2U5xPx8oPZSwt1FeFzDwoIaVjVHOGW+8MK20uPPQJQhR+hAOJ6NNo0pdCbUkkEibFB8d82XAe3J8seKTZgJDLyHZsbPRVPSqtK0dl5qAKEKURCVYQokgdChtW1RbmTDkPAEJZd4JKeXDligucM4kFmyhhBDBJRatEWMnChqIqnQxsmwjbfTW54Shd2dtKj4CimYHsuxIyOUXwKszGRgz9LiRx/VYyi1Sgw6SV7a9+gJoHNnpQfr8EEWoMqCsqHNoilTiUVIO9zQmL86c6qJiXvTZFGsAOg9MssOcnFo9KSUzbyikGPE5m1RMiheHmQxkUF+OCU5mKLbmHKREChByKGzJU1D678SiXgI5lC2peW57sSO1bfR74EshcOfQnrV5MiGBrrVQ1KqC0CldOzi/aVLEFxov4DzzVbB4+SxdiJiXs+QxLEwRDLTVYu+1PA4r055ldbc0jhs3r9FMakKITCcV3SrRLWJ/+M+qi3g8TpXawer0mHtxSb0zNc1bGzk5XTCZTU0L4Suq2lNParoQcQRqb6zXzCyDhHoRjR5UtfRSVy97FlwwEiIK1B0Oayqcrodn/RLo58iVJ02wGJiNrjEVLQ8CkgYhcHDaKxtkeEhHKFR1xKzzb2ecg3tbwDB//SN9qOWghSiAwMwXIyldyimF6853ayBER28Y8a/1rXIRIz/3pzWzyqavwCY42QYIHvT7cJ915ozvxQPa96CyqR25jA1fVS15nSvRFv0vG2Dw7Px6b1lsS4qc83wXXm15XcDAWbSyvoB7MIDtOZ/T/JIsqkAR7Lh0l0NjyrOmuvZOKaaYEISVB4+ji4ONzJLkZISqFm6kKceCJ8s+mJIQgqLeE8UkwZ5OOCHFhGsTXXL4CjSFnqlFhUVIPcExpKAD48YTNdFMKiYzx2zPUdeOyjmcr4zxi8fNO45fPGW+gnmnRBwpGlOLWmLXhwjhdfuXbiz0yyHI7eq5EsJkKqpCXDVL2CJDiKdzwt5ezf6ViskkazCwZFCyBJEug/VChm2yxCn1RNg5qNjZyZng1FKWlmQwtj7sZMkQbEwlYVocdZbZThN4pZk4Jg3UNXhxOC+kDBq886hY+lmnBuxQU0cDxMqI8HCKnWkEyrqpZxVNY3HAOCXiWS0jqyR0CB2mESljhMsgJGnf45Iytj8r6YIy2KLLRhcL5fMRVym+MaASO7u/XSXqiQHfpBCTHQUcgz2bElRVJkb5yOV+dYgZnA72d5ivOto2IuLxjaclMtltgMCqW3F0eB/nHHvTmuASq9NAtzJfjuOlJX/SCO08crgMtE1LfbJgun/AdGef+y/f4cX7J4gIVw52mU4SJ8dz5vPY+wYgIM5ZBE0qFPeCtZz/a6r9oUjx3lJ68G5n0K/BWIZUPQXEj/ZAYdg5k2l/mFB+ccrztAYIJIe0re0r+yyq3gKEndvw+h+DgHP371jO1l64AdbCJwfdpKnKpfgyqq75ZBbQsKZyXxuFUncBZwMYuYgRjVXxm2Xct20S/vrf5wzDlmfGzxbpvAAye9mZlpQa1tpzeV+F899f/h5rJewaaNQeoPe5bvqBznPfAyRrn2StrO3hi9t18frZXl7fswlGIKCku7UfhlXpcs7ymOw8cbKtJGhhPsW2ZLVFNftsQpC6Zjr1TCZCM81hhc6brdg7fCX5eFGHcw7vXB9y6HLSn+dfvMPTn7nD6VIJrSUKevyxHb7+m7+K69euMp15vPfUjadqGiZ1Q1031JMpixD523/77/Pj7/kEXQ4Fc1mqX7aJL377G/lDf/o7uPHETZra4StrR3Keyleoq1mcHvMf/MX/gE/80IeZB2Gy42hXShvK1r54gh+Enh90//o1RcURk5DUMv0VaUfFzumeBuFrf9W7+BN/6ju4euNq1hIMNvr7h/f5D//yf8WPv/fjBCR7wVvo1q3HrvJHvvP38+YveRuNE0M4SbP9LNh58tmOndTaoAqL+Qn/3X/zffzYj32YGI3x/7p/+lfz237Xb2Y2ayzRU0qZ2hqVlqxFUWfOUjFFgkYa5/E4XnrlFf6z//Rv8tGPPm82V2+b0AFf/TVfyu/6fb+T3Su7oJGgymK54Pv/1v/O//H3f44umM9JzBJIXXm6lAhxWPVl7ixlwkhUvXg2KdqE8leI5hgZoyUqUWfMXZxQ1y4fDGSUvq6F1JnToKq1x1J9G/Pu86EjOPHcvHmdu0eHHN49ofGeg/2G5bRFvKfyDSrC4TywO21wdLSLSBtSv/+COlZRabyB3JginXoqX3P/ZEE6POX4qOM0wsHelOmsodKEqyuSg1UbMedUGal7+5Voa3jr+jdkU/pdIjbIcSXjekTdwBd6YVd6p+RCWba9JyVzqBWfJTpnzLf4w/RgREetHWOHohVCsiOdAe3yRsMsembPnm2LbnDIIZuFbqwZMD+pwfRXAIyjD0dh+NhkXOXaGUGDs7Rluyp/aKrIUH8Zj01hZpvjYDnlUTWv+dFzY7CxCTxUddPVY23LrTd3bKffRhMfrEXY7Mu27+O6zgpv5QbJa2gAU4VmSOYpugUgPwyI2VZed58Bc3yDkWKrn6ComCogKW1ODbs7cXzVV30RV25d5x/92Pu5c3eFMQdwSekCHC2g3pnxB77zX+dLvuorqGux3AKV5RpwzuEqS+ojOMv25xzO1/bde5zzgPC5T/0Sf/hf/Td55rmX6BSuzjy/6Ru/ij/+7/57NPtP4l0mFOIzMSmhIA6Yo9WUn/6Zv8oypH4TOgFJ8Na3vJFf9et/PfXBG3BqKXcta6HPfVIIh3z5V7yLH/x7H0YRJrUzByuGVMRnMfvZch4qfNBi2ZRmTLPhes/xMfHQJJDgy7/s7fzqX///gMn1TEaTgTaEp9J9vurH38t7f/ITdKkcu+wQFd5y63G+5Tf9s+w89W4qrVEi5vBjI1MOqBG8aYSKOKOnfOhDn+EHfuRDzINjt3L86q/9Kn7Dv/BbkWqf4hA2ZJcvxCcz5BwFknLdiLI6vc0//Afv4WMfe2Eg6GK2uq/7mi/lN33bb4HJPpBIIhDnvPLyPX7gh36BZVeOLyb7SuTDpFRGhCw7gz3Uht3Iva6ZuVeCJtenKK4amO14fK12sFBraU27tlhQlBKql5KBgZJ8EQycJRW8r5jUNSnC0dGS2VTY26mJIdK2S5rZjBs3DuiWLYvTJculgRNxgmQpPiRjNtOZY6epmEwa9q/s8OwLd5ifBFDBKxaieALNpGbVRYIk6qm3PPtJabuYfSAKtTifEg9ssAAIWSfe5zw6lp56VXxhLD0gKBJb1iFoAXSjevqMd+s0TWTwus+kfE1l7XKyKMugui4pm/SnPY3pW9Uvne3AvvdzGO/jjW8Fi5pfzCBYjevZbMv473Up93xhY+xhv9nGdWZpfdvUFqwJJbpex3Afa9/Pa9u4PAjEvBa1+xj4XPR7qb8ILSU0U9fQzaC76udWsBTw2OmqVp+7cH9ctrxuYGAcYlHK+CxnI/rGRrqYUDugjaeeuMLv/t2/lV/zz3493/VH/yI/+Hff3zvC2ClzMG+Fpw4mfO3XfSNv+dW/FpXKmEmxhVkD8vbOaDE7s/VnTqsAgTe+6ZRbN3f56CdtU+3UDub3YX5EtfsYElucJsw2F0GT5QpQSGnF6Z3bOLWYU9vgsSfET3/sY3zqx3+Ap97xdkTsuRQiXUzE0BFiZHVyzNFnPsw7bzU8fy/SpQ4P1AhRrM82WCWrlwGRzXX7sI4ka/eNiB+Q/asysRVzRKtEqZzS1MIzv/QhPvmTP8jNN74xp4FO2ZGmYjmf89LHP0WTYJ5PFxRvTP7FF17iU+/5h7ztq1+y2H8K6rfxXc+1oSgeRWiXS+596sM8sVNz2MGEyMd+8j186h+/m52r18DlpB29k4WSYsgHiVjdaMpE3JEQbt9+hZc/8yxOB58UTRBJfPhnf4Gf/bv/M7vX9xAsV0G3WPKZn/oJbjTCvZhoVQn5nA3NDoQlVWwvDY0knwfPx+YfJuGmBN0qIs6TokkJ9RSoYl5vjtAmuqUyHIuqmCrZmFGICcQy/I0lrBeff5kQOxBPRLl9Z8nuiTCdeBZtS1wldndmqCrLAMug5swpSsyA0VeOSePY2Z9ROeX+8YKq8VRC9vuwddMF5WjRsgOczhO+cnboVFQCUDeesCzBs5cYr+JYMyrbTGjnPY3mEFroY9PtwREAxtaUOeGPkca6PVpGJ3OWOnTUhqISHnLYM9ImlFoLfDCaZVLiOrPc7GvRpqQNZpdyHveeYeT3Fs/3TW3C+Z7pw9/bxnMbMNjGWDdzGTxY4h6+bzuwaCjrEQlW+dm6xtqKIm1f9v0PKg+pmWfY1xY6bErv0khsaW7VuNh5KCFZaLvgLwBml2vU66sZ0Jw7r6xJOTvQhqgdonaMsQ8tL3/ygyx/1RuoU0clZqN1anblypnn5snRnPf9/f+dneYUqb05kMVgh5+EjhgjmhIptsR2ZZ9xRdcuCG0gdoGu63jlxfs0Ry/wrpvCqlWmVcsnfvYT/Pd//jt57I27eAKaTA7UmCzNajKnsvnK88Pvfd4cu7BNmLAwwa5Snv7sHf6zP/dXefOtmt1daGaCdnZiXhcTXVLmC3j2lZaZF2YuEJPDo+bcpXZ08ZBqu6g8z5c4t6H6bdfXCIHAcHyjETXBGJzNoTkTNuIQ5/mlX3qRv/Sdf4E3PtEwm5rjoDNlBydzx/vef5+gFs2REDTZiY2ffvk+f+nf+o/4srdM2NvNiX46WySSSjx4PpdBskOlCieLxDMvLLhWK9PMWN73kx/h5U/8Ba7uOnyT8FUO5yr7KiZqX8CTLTzJiT4CFony9NMrkNTvQ1TpFH7iZz/Jc9/5l7ixL1SNImoJol45CRxUijbKMsIimspc1CJMnJSEP3lsGdtxH7RBS/DoOE7Z6ogxS+IZnBVp2DnHahEJHSMgwNqn7TmX8cEgxiSUo5MVVWVK58Y5qspxPO84OU3Zl2fwz6imjqko06rGV57T05blPEIXaSaOaSVEcUSBw9MFJ8tgQNGbxmQ2q5nteE5PWvMNCZCyhKSaLEXwWtu3l4Hojcb5nDW+/bm1q5kgrQOA8fc1tb2u3zZmkpvq8SLsrDvKlXasS77Fz8mN1OObZayt6P/WAj4HpmiSJ33Cqt5sckZ6vlh42AQKFzH7zfu3aQP6vl6Ke57dL9ud9fTMO7bymLW/dTRP4zGR4d5CEs8AifX3bNa9rWtnrw1raQ36lfGjmJQYrRtbeM7Z0eKbq+MsEHhwjgHgC5N0CCkeuMXBr6CDYcA9if3Gc2MKTx7UPP7EFd7/9CHP3WsNCakR250Gdhthqsrbrje87fEps0aZTsD7aEk08gYwfJ6yU1c+hAQhRDsNr4vCyRLuzZVlEFbzhPMVtYP9GexOE7NdMSe1qiaFlPMX2Il481DzM5884bN3IwHzsHYOGqdcnXoO6sT13Yr9mePgyoTp1CMpoWKHw0RVlq1yNA/cP4Lbh4HTJCy7HC6npoYtx1/HohHoF+LlIOyZDZgJi45+l3zdSTJHvP5sCHPudOKoPRxMhZ1Kub7nuXnVszutzAHUJbwoy87zwafn3F2oHbiU3+FE2KngaiO84XrDjT1lWiVSciTnsDPrPDFliV69tTApqxA5PE4cniqr1uzf4h0TL+zMPHWT8FXO/a7l+GoDM4Lic9RCOcAmaSTg+diLgbvzRDveYE7YEceedxzsiOWGcJb5qe3gZJ5YrGCVhn+aAdWyg6gelWjJj9SR8kYWcX2I2vnzZJuiAD56NjQwShHBVYqrNEcc6LkHs2wS0MFkV4BiYjL1dF1kWjkO9hyrVjk9jQjCwa7j6s0pMXZMqorKeXANqy5x5/YxhyfBEg7NHDevT6kbQZ0gvuLweMnxyZLppOZ0Edjba/BEFkvBeWHZBkJnRLDJzoOL1SZxPc/Z9Syg3WRyo1rOeabcmfq/x+r5ASBkJ66MBLKuMdPcbVwi23jLm/vkXes5EcZ28DGj3VR5G9OXoSclU6Myum8AM8X00V95CCn3TFdGwoRVZaPgWG/nuE8XmcUe5MP0MCa1s7eN5+7svcXhXDfaMAYvdm/qx2/8nnWwV8yf6yBtEyCU3zeBYnnvll71/7VglgxER4dIDUcfl8iys0Uk/ZOZdEjR0QIevAespHxioXmi194yur1wHHju5DZ3TxJJ3RCOlkvtlQq4vwp89IVTmhp2disqb0mDxJmV3WU1KoBkO33J5BdCICQ7djWQTIuADWQUYR6FtnUsvGMSxaRMLNtcUpP/VrEjFPVHRt/GQ83O74Cui5yI4qYdqxjsHHIXIamlJE6OmBxRo2XcS5ZDQcTi10VAkjlYFjDzquZhvOkYE5ey4UsymcxEs6FUTCFFJRbBUZZYFxLLzuGrkB3AoPaOaHEhuS47OlfEHEU9ICnRhsg8xMzEhOS6TO8UkZTjzRMkk0yTGANxLlnoqIJg5wmEpDkhkp0T7rImAjF7b+XNsa1IBDEp3lU581r2My+7VQWvdtKfSLK5SJKNFSa3V87Wgg2epVPtsDj6SQYaBQSp5DMM+75dTOwK4RjNTn89fwMgdkrsCjG+HBCATY+ELNWqUnkLQYwRvPc4l0hR6Dplseioq0TqOtrYsgpL7h9HupBovDGGaWMHOi3nS6rKUU9rDnYbNCkxJXZmjmltByjtTKCZeibBc3rY0ibNWQf77l2qbHfYMtC/Xs4TarKEnr/2qZ/78Sv3pJzUJwsxUmITTLuybi4YnNIkazzKveO8BGut2CrxbmeMpv0v/jFjr/Xh+Z7Z5HDeFNff81ocziRLb2WcttnqL8/MtwG58+49K2Hblh1fLPvr7H4YlGlngcv2F0r/QilSEtv6OICw86o6Tyt+3vu1PFOakPmJXTNn6yFyZTu4upTyhS8AGBAEKTH6jAdRclhZzCrJHFOZlKSJLjNG0eFstnJCVOMdLgZiTCxRknPQBars/VtCOIqGuByBo6o5D74BghQTIbg+axjevMSJSiv5SGPvcThC6CwTXWd5BJDEIpjHtmq/JGzDJHApUaHEAJ0obVsYukedZT+KSehSpA3QpaycLwhSLSTOzCw5xCkvgp6ZP2CDn6sC7CHFSGpCzDciL/4SAuUk9cCmJB8CCEHpgtAGy4JeOWcOnjGAmP3cMqsVSmsyheUOENRVRFeuJ8Cbl7yMQvRUScQcZyuougxINEu6imLzF5J51TtNFlGSKXzQSO83UrKBqaU+TqN5o4RTCj2xRYUYgCofaZPs2UHlC04ttFJF8iFPUMmQOrqwJh0ddPNayrDZ5cLqzqhxRyvUfgdwxKDUjeVLOJoH6qknVeYOOtltUKCZTpnWlflUrAKs5swmDRIjk0nDzkGDiFJFl3PvK5Ii3SrQJuXG1R0mTcVh20FIeDw15vzonGPZJTtzJNOF8/pwueIefMslyhnHWh0AgaJ2INkWJtjvuURW+ZLT9GbwUfbwBXt3HSAM4FDy2u1P/2PQMGwHBsV8cVbb8CCGsc3EOHYm3NQCPJxUvx08nHUypN9nZ8vmfjrb983vIGfed9actMUP4YJyRul6zqND24bnLhq7onDor2eBy7Sf2ZQ6AgRj0+Jly+sKBvopyE45/dGZRf2BEWXnTC2SVFnlEL2YRlq4gqpGOQS8s5P9FDtTQGul0pyHPqcENsasPdBQNflXkylMU8IcoUioM5WuRmdMxtLLUYdM8WWQIDVFEpZ/oEsll5mWRIGosyRGnQptAEmQTpV6krIDSKTEgodo2ol5q5wGWEUpQqclJsr2c1Vd00puhlA9cC4etFmzSttlCbmoIpNCPk4OryX5kadNyjwotKbmbbwlDorJ5g4GQCY5h0DURABWHSw6R0ARnywCJKeJzk+a/0UyxzdNSqt2pHJUy1+PU6JAFGcprRVccpavIOUxFPDigZTV9jYOlbO2WAy4A3X0EQcJOrEogWU0r2+PheLFBB3WBwusGAJmzbne2xoShZTXrEiu/9VDgbOMY50APKiYXLMJ/4yQxAQz5/DewKmizGZ19gUQ6qYGKlYBqsrTzISb1Q4SEqINvq4zYNM+jC8EM9f52nG1mTKbVjgv7OzO0ORwLqJdi6/N9BSD9JqOy4zF+ev4wZRwzMwedHvP/EcqYYU+yVi5x2iL9v9KoKNpELNpL6sgijOhuIF5FYl1XfU+EHsdKkDVEm2hZBPUtjbnd5Wc+duYzYgpnVc2GadmFGEk4fyQxIvqG9byxe8/qwkYP7cuDG22d9Oc9KByFhBstvuBVWzcU0JMS5svVtmfNzdr13Joqjjp/WzWwUARmrMG/BLlC5KOWPuB8cNAi2Kw2Zg1WRpTyTb9NJK4S5/FHokpu17kJEJOBB98TsxjkqxiKW7tmADTSaRkTC6qob8UpU8LrBG6zhiZKLjOQMvRKlJV9LqmlJk0uJxq2NQ4iRLPa20/7oR5Z85f4HDziDgMDACoxanHZNdCUkIyR72EqVe1zxqSpYr8ddBEXEzPznMmHH7LNlDN/sVqyUpSVr/3z2oGO1ZRf+b8YZuoK8E5xbtAI5YdchG1BzvlVECPsAoQAixi5O5CqWxosjmHLLIYcDDcbxkENdmYzleWTbAMhDihbk2tT2mjmAueiDFsl/X0RX1YOds0lUAXTHuRSGaWyOl7FwkWQamjw6/IKZI1p7IWUrA12GH+HOLMftfF1Nvly5ogr7WHguznlMs5Xw33Dk5J29aJ7bWolomz8gaSJQlaKzEkTo4Ck4kymQZSG6jqirpxOI2cnrSgjulutD0UlNU80EwaSwktlmpa4ooYHV3XEtvA7nQXrSqOThYIQrdKxGDzo1vbudn3VwuptknhoyPFGZmMWJe47e/8bi05BqwGJwODh5yboBz0U7RNW6Tf4iVvAkYJjVyP9ui1EXJWih33ZCyln+eT8DCS+/ax6n/IQPuslL3l1pFWZbi+zYny9SjbNAFnAcQAvC5bz3ZTnZzzvTwD0oM5OftOHUv7ozmNg8bHOc2gIJtHH5LEvM5Jhx7ArLK0FJIxxhDt7pJLflPySSnRIhy3LtubXa/q9UvN3uSmdUiqOUpI+o1lgICcIRDItt2QF3hUYwySSg4vsJPRcqhQH9JniEEQamfStGWzs06rwEmnGdTkSWoz45UiVRigUKfmIZpclsLNJlfq2kTSw1IV5DUQxn4ORmhfMGas5GyMDCrK/v4gnOa0v9LFHgGbb0FiUkHtc7Y86LUsCWPoyVkCHVmm3nqgPbGKoJKla+uoU0FFeofKtXArZ4jYbPolFMsiIQrBKtK/ZO2GpIg6ZVYLdTXkICjSXcDZ2RUpoW0O7M9Or7E4v64hcnBqwDPktoW1ackhDOiFe+H1LprVT5ocXbBcCYiaU1+MeA9diKzagDsVask+G2oHRnVJWXWBw5OIWvZmNAlu0VJVSuVtw62k4/DEzuTYn3re9a6b7Mwq3td2vHiv5WQeSCnnmshDO1b0rxHvLDAgfQdefTnn8XWV+8X7yznXt8/W0VDHJpPpATcjYMF2afZBquxtrSrHAW8+f365PLjaZg4o7drMyT+0e/z8+PprpVljZu3Wxudy/gcPCvUdxmVT2u8p7yVNPNuurc/35ebgLPjYzC9hK0syb9JLmsq+sAcV9UQ0286QHIpXulNs1iYROxl1S43Md1G4v7QjZbVMWGYkGURn5qbGsHVz39tNLt8smRGmVI5ONb2CZCkfzelKHVRO8V6oJrXFSSOE1YpVGykpQhWz51TOHPEmE3j8sV2uXL1CXTVMpo7KQ13XiK9BhK6DF158ieefu83pvCNSUimPGN8YPJZ+XmJfnYeEz9x35rkyKYPdsRKhEcXXlh5o1jh2po5JnbUOzuO94/C4Zb6wse80m1EEfC00Tri627C3k+d3pHFJ6vs22lEyYump80l8JwuT5jWro51T6gomlaM3y0LOaGjtr5AMMBJaORI19cSRCDgVYhJCiHmDOUJ0tF0ixkjts+94snXWBWXRKZ0a49d+BZfXlWsbRUcx3/+ElCGVlRGU3b0aTYHFIh+YVdJxe0+KiZWS/VYkJ8MCnOSMn45WA8Epde1wYsnBUDtgatVGoihvfOKAp24d8OKzr3DrYMrb3/IU7/vA57h9Z2ktehAhHzMCYevxvJctfYKqwkgYqh4DgnLPppatr2ft+tlrvSYgL0rRAhz0gUxyW3TB2Xc+eH+ftcWXfg3XHkZxtWZqOae94/cNdY9MI/br1ndvv7bZr23S9sXr5zxGvdkX2WJ+Oe+dn5/yYEK+1RwjRZgZ2lYyXF6mfMFPLdyU9we8TGbEwMhpsN/7QlabD5vDOWFn6tjfbWgmdghKM6mZTmqq2lO5yg6mYZDGnQgkh+BxHkuII8Lx4THPPfsKi3mXTyi0zG2Vkx4ENDXszhxXru6wv7+L8zUf+fRLPPvSnJjVjWD+DBPvcF6ZeXhqJnzZO65x5eA6k0mN75lnRUywapVXblT8XDjlU892LNqsls4oT7LN2yRk6dfkZSSBByPh0RIXk6wHrYP1yaR+C/+sK8esstDO/Qlc2bPwO5ez3EWFdg4LGep25LMDxNNIYk8ijzfQeM2pNk2yiWr2Vs1moKLCDxHmApOgrDBHQRU7lnjSQJMPuSpAUMoBhAg1NpeLpMxTJDqY1OB9Bb6mbVd0rSOliPPK1UaYzg6IXYQUiV1LWgVL7NMmTkWYR5gH7VNjC+Q8GDqCBqPxvSSlfRgzwHnPbmMea/d5ZTKtqJuG1aKl61IGT3k/7UxYxUToOhSHSxZB08UEWE6Buql4/MYeTz15wKypuP3SXe4enQBC4z3zRce9k0iMicYnugy63/LUHvePDvnpj7zIqk38U18z42u++Bo//Uu3OT5NhBDJirgHgqdt9Hobc7q4kvJxMQHdPpbl6GAjwOsJgsax7PkNCmTtwNl2Du/ZVPMXsHyZto3NBWfvy+0QRgxZ8rWz4OSid2z77byxNtNHomTmHI/NWDC039Y/Ly5Gn0QuJwVfig5u/L4O1i71mv5d2+p+kMbnYUpZR5uCbhHcLlO+AKGF68qQ0Zax/63t6hwNsEX0VQUh4sRymeOhAQ6ccqNRHrtRMZ1V1LWjrqCuKpq6wUm24irE2LHqAqqBJBWWYti0EVdcDceO+5qIXVbtO2VaC7MGO2a3gp2JsutadlRIqc6nJ457ZBJv5SzUbCKJ4ztHPPepz7C6/iKVVyQfg5rEEYLSRuH+8YrFydwYZ3GUTaDEXgLqHUZew0K6SJUmOdSTrHoa0q1IjvgwjUmxTwWUZbDjiKtk9voIxszV7NGKSepJkh3nqrBaJU5XjtCDAe3DDHvHSNH+HPQQBU22Viw9tJmWEMletaZdGiClh5TMUdAJS/Xcb5UVIJJYqrI/a9ipYDLbZ84JIp5uFem6Fuci+3tXWS4TnTpme7uQEod3F/ijjjpAI0KboWxV+Zz1rc+Ob/BTNCcLKg6Knx9P9/FclrLNHi45syCqeA+P39jl7W++zrve+iQ3ru3zyp37vHJ4zKTZZeICs0q5dvU6XYw8/9KLrLrElatXEYHTxQrvPTu1cGVnyuM3rtJIx/PPPsv9rqPq4MqVhkkNL606DnYq67skmhr2dyomJF54/h53TjpCcnz0mbs89fgOv+Zdj8Nkxkv3jnj+xRPu3l0wtsOXvo4NY2PP+ddaFB2Bi5L4pwDjjZfI+EtxqCVrEo1upUy/xrRu3NhiM19rg9IT8s1ogk3kc9Ee3n7dMbZPl/eM1c3jNmyr5yJys609dn+f1WL8y8Y71n+7bMKcAUBs1nF+G7fXs228tt378FqHB72n99vKj5bjkLdP7QYgK2rwft1c+Pqt5fU5tbDYVvpFto15KSXbXblXUIsKHm2WtT5mMbPylpdgopaRaXGy4thHwtIOa7GMgZa+sageVc07PeLMCVFz8hpnGzkGWBwFM/F6O4fcAVMHezs1Va0gAecSXeg4PgkEdXSrdghby7MjmXlOvFJjbT6ar3B1yIrvYhoRVm0kJc/xIjBfmrkBGbL/JZGcGrigwZypboTIL+PF+6BivhFWPDmKIKvx7fQ2U907Lcl8IAWlWyouCZ23aQspWA7tQX1hCTRQvFgWgqDCfJHoquLg1w9dP88lE5dkv5AubMZpDylQQtA+pLQ4pqpAK57jeeRwEZgHy0MwqeHGtYb9vRn7uxXTasahazmaL6jrihCgWyXSLHL92gG+9qCJ/f1dVo+v+OxnX+LO3TmsFAmCV8uNEEfgtQ9VLZ+9qSWLvZ+HcuF853GvKtsnIExq4dY1z56sCHdf5M7Ri3SryD6K1xXdasVJDHR37yNe2JXE229dY3+3AQV3c4rGQOo6QjdH5vDxZ1/mgx97jtvHkZOlcrhwVF64exRYRbiyU+PEIm7aVeK9917hZBWJnSNK4pMvzHnulVOuThve+ZbrvPuxCU8dTHjfB5/j3klaG6qx0KAMiXwuUplfVNaWm9KD0CLJb3tAM9Xt53bcorV3S4G1/bvObccWaXhNghy96LIS33aN4fmt0IGz2qseAgic164+Nn7LPWPzy2XKxd2+zCivt+MiYPAgB8PLzsGDJP91E5CN8eB7MQA0u7foQUagVTdNjw/XPng9zyZY68wI6W65L9NL8/wvgEe3LAIxM0IFTD1UmF1fnJ3PLp1lvStU2JVdm/PRo0rSjpQkx/TnxDNavPoHQDIyO+cT9ARNjhiVTiKijlaVqL7A95GqzVImOy99qF6Ilm3QJMRIUrNPd0kIKbDszA8iKj0YMqBS5N3ti/61O+SUpWfj40pii9J5jBB7BhAAdppYQmkjpNZS1xY1ueac/WKGf9Dyu2QPWGijy57X5R152hipOtW0FYqji9LfJ6M2x+w7UntPDMkcMitYJcf9045FtBTKzdSSPC2Ccv+4pUkJaSt2bjQcHFxh0QmroLiYiCExPzlFw4rHrs144uYtvK85iYEvftcbeOHlQz733F30qMP14ZA6HBmLoMUzPIeGbh33hyQwDzOflReu7DdUlR1vXIvn7t059+6e8nR2pnSuGjZZP95HCBCJ+I++jFDRdgbENEVSBoMHB3u8eO+U+/OAYjkb7s9b8gySRFgdhb7qrDbBO2E6gZ3Kg3gE5ahLfOiTL9NUiviKSoT9WWVSdom0iWqRPlbjqKevsqjLprfS/2EYNlBpdvo1rRgUA5qO7u8xfDk0c512yZgG6qj+/POaaeDBzPdS3esb4M65vuUZSu/GbSvPXa5d/Vw/4F3nlyF/wEWmi/H7BkB1nonkciaCs74DYw3NxXVsmmheiwlge92srVHJayoJFgavDz/erwsY2AYEhnCS4rhTXMSGM5hcPirUVMbrmw2KZGxSe+3BKzh1lkAmmoQYk6lHy57TnNM95fMEEs5i4SNYpEE+bEYgauoH2eWNXZFwsbX2l9hOb5K0C4KkcjiSmATohgXlMZ+G3VnF7tS+O1/lw30cSR2dCi/cOWLRKauQ1d1i7TOnOpMmi4/8WCWkfH4c00wJoLkXlihFEcT3kJTioZ+chdVVIyKqcZCqUj7ZUDKctcdTgXy9akxjNGk6g+FClG2h2/eoRmAt9A9z9lPTBpRDgYxYQ0oRcY6gjpOTyEITCYd3AjncS5xpM+bHyr0uEruO1L7MwZUdpi4SYmsHHCloFzhZrWhPWqSbcPPmFU4O7xHxXNmfEZ+8yXL5MmER89zQR68A/Tn22qcNVTYTmmyzv57veXy5UvaZc47FoiNlsOQ1GFgpSBv6MEw0+7DkLJOaElGFVYwW4loItJgTqaoQ7x/37S0ADck+LXktxL57ec9rTrTVWfrh3uSE4zSDUdVga3BE5HP+qY21P/Tj1Rd54P7Rc76v1SJFU5EXJMNeuigk8qyNWrbe179fx/de0OZzVdIbbd5gfuQ9lSn2he1+LQB20zei1Dl6Q7627f1ljMb7ZHs7x/VftJcur6FY37uX9UEYm2TGz6xHio3rzlxRxr8VrWIBSNLzzpSj2wYFz+X687qeTTCOfbWORQbgKJkY2J9eyEc1OiCiaqk0y8EvfTZBEa5OHXuN0nhlt2nMT6A2Vah3lkdfnDPpPGcjVBHE+fzpzKGtcpzOO55+5janS8saqA6aRnjXk1f50q9+F9ce22FaK00zwfuGqqlwlaeeTRA/5ad/6pf4X/7OT3K0AlQsdp7EzAlPXmv4I9/5e/jSX/PVNHVDVde4qsK7Cu8dzs1QEj/0d/4mf+nf/5+5c+y4cgAhJI5OTXsAwzJ5zaRviyQ6gDVrt89zlgoxIx+gIpLPKEg8vu/5dd/y5bz7S95CXZXYflPrL5ctP/4Tv8jPf+BFltmeL2JanP0dzzd93Vfwzi9+I17U/Dk0QU4BTQ7ddGKfUe351arjF37+o3zwQ8/Stoo4OznvySev8dVf/xU0uxNSiLz/fR/gU5++QxcEXGQ2cVS15e+fz00LpCQqdUxrYadWJo3jYBf2ph7vE516UEdTVaxWLYtFomk8V6/toHHF8ekSqSuqyYx5q7z0yinHp4FVp3TJAZ6QokGnQrNkQD1bNV5b5mg8TxcRzvOcxszGPi7rBNSurMWrUFZY7z7aa2sy6ZGBWVxGNl9fZ2f72NP6Ik2P3j9+bi2KgAy8X2to4SWLZM3AmNjC+lwUoqw5PXUPCnXQI+Tahh5sUZVv89B/ja3vPy/j1La9lH4Oz2wCibNVrJsuL6sB22pKpgiRD2jmea2/BNO+qC0P8s14kCngvDrO7um+ZtYPHbNr668xgUgyXVHVXoNdTrVrNZ7fuVxe3zwDa0gKepVVf1kG33UFonnwv/GJa3z5V38ZH//oJ3j60y8Ts6ageJifdIpX+I3f+nX8lt/129jb36OuHZX3VJXH1x7vG7xrcK5GvOC8s+RGvrJwKFchvubk+Dbf86f/LP/rD36QNlqEwdc+fsD/+zu+jV/7O/8gfucKggNXoVRQbOvZDf5rv+HHeO97f4HTzy0AG2DNSZSu7c/4Df/sP8ONL/t1Vkc/Bimrvy073td+06eYNv87QRKTpiT8K8BpS7jOhtRxrkPgQ2yCot4rLlMlfA+FmH07WnVUKE8+tsd3/bnv5q1f8XV9Hwr3UF3wxu/7a3z4T/0VFichE0ib4yeuHPCdf+Hf4J1f9S2YL0A+DyEjPe2V/5ZpQYmoOiR1/MMf+l/413//n+V+C0rk6tTxrb/1n+YP/dk/RT2Z8tkPv58/+eE/zWynYpISRGEycXRdB0G4MhEmleCrCkmgmph6i0TYr5Ubs4qd2Qw33cFJxXS6y8npivv377NYLaE9pRJlQqJbdswXHdMrB7zpjdf43LP36I47QtQMeEfEywYT58qF7WmJX4tD6LlzeuYvZSOKf1MpvP5c3wcd1tzanQ/V0u3vGVc4lvrPtLs0ZZ1Qvhop9eHMMya0PICNAbqWPOuscxwMICAzyxJK/QCAuP1928o6vS0S5HnlIrrxICZ4PhBYBw7nle1hiGc1BpcRgS50in6V++oyNv9X++xmPWWtWNTFdh8wm89NTcIIbPf/uXz5AocWytrqGaa7LCxLBPPN3/BV/Jvf+2f5X//nv833/sW/zmIBFJc7haAOUcc3/Npv4Ft+829DZR/B9TY9KWoThqxMogERYzIOU1EmPPs3rvOOt98wFXhmhpVTJumYyh/jqqnZZgDR4pxozEZjR1odjtSd5jWPWmuPjxZ85hd+htn1yjzjU0Bj+RdJXSSlyNM/+RNMU8v1BrqF+T/43GNr/MYsb5FQLlu2bUB7RSayObdC6uOwsbFFUW+q9/lp4IWP/zyPveEAqtqkey2JoAKru68gqRA98pwrYdFy+JmPc/qWm0jxK5CY00Pn3BMpjha1AhUaOlYvPsMVD5230xunIhx95mmOP/2LHB6e8J//lf+Wz3ziFXzl2Jl5UrDsdj4JT16peeqgYc8rWjWEriN2KzQlZk3F9YOKJ2/e4mB/H7+7z+7uPilGFssly8VjnJ6cslqe0oUloV0R1XHYCfe6hN+d4t9yk/bTLzPvLB0zWTocfN4GNeGrkfxejXRzVvuzKW0Mv71W59OHfe7SjnBgAHPEZEVHAGYL03kYB6/x58XEPdOtPIQXMcoS9YBkSa1oXzY0NSUaQjNYPE96P79cDAjKnI/7t7kmLiOpF8azTSsy3Feujz8319959Z8PCB6mvJo18MtVLtP+cT+3OROO79usc33ctP+vrbvLr6MvTDriSxNCS62opye4k1fY9RG3hgyLRJHoEjzz4Y9w/OwH8bMGyxEb0JiM2abMcEOHhoDGFg0BQgcx0IVIFyJHx6c8/6GnuTUTlm0kOOGF20f8t//N3+OXPn2Xx29dofKJFCIpBEIMxBiIKRKD8oGPvMDtV1oSOcwoh8A5EV457Pj3v+e/4u3/n79pEQouklIipUjsEqFLtCHxwu0VkoQ9b0dPxoiZMjTbg9bUfZcb89fiwCIi2XEx9SNuOEFBlOfuzvmL3/2f8sVv/1vsTO1IXcHS0LUBfu4jL7Fchj4tryCog88envLn/ux/xBe/+W8wbaDO6lewMMIY8/eyATD7dIqJTz9/H98qV2shOTNJ/PAPvY9f/PlPcG+54vnbc+ZBme15prW3A3Fi4vH9mi95wxXeem2PRiKRiq61+Q9dy3Qq3Di4yt7sOjdu3WRydZf9gxs4X3Fyco/F4X3mR4fE7iqL1YLUBpJGtG54/v6cT9y+A7Mpb3jqKoEj7h0GQhx7YBZQNKg8X8u8vJpyni/C5vfPh9PTecT8ckx3/Z7CUwYZWzhvFB9GDf5aHNvUHGr6c0hyjevE2RB0Pg+jZFKx66X9KaclL/3abHdp5/amnscwZcTAN544RwjYfG9R/6+DiNwvGNmxN+vf0poLQMC2tlxWNb+tzb8c5WHB04Pq2Kzr7G+X3xvD9Us9vrW8rj4Day8eTfR5ky4CjsRTVyZ8zVe+jRfuHPLBj75Al9blGnVCpfDWazVf86VPcWXXg7Y4iaCZ2QaTulPMxxPnf2iyXPcx9fnu7x8qJx0slspSLTfZBMds4thtHE0DUllYnWBJWMoWv3OcePYwWIa20hFVGi80DnacY+IdzQSmE1MXCxYSmUgW4tgpbXScLCPzAEu1yIaQUyeXvZjE2UHDqpnmnCXwD0Vs1/7WHsSg40ORCmPOyBNlWgm7tWengt0dC2GrxPQHbVTunwaOl3agjyo5PNHMKo0qVycVezOP93Ety2S/HjSfPKmgyUBWmxzH80CMdmDRUmERoEtC8h5xsDd1XNlxTCZCWK54bHfCG65WvOXKjMf3p+xOPJXAtJ4i7JG0Y7rXsDO7iVRTbj7+OH7WUFU1u1f2ODo94vTll5gf3kVjYtW1aAcuJkJsOepaPnXnDp+6P2de7/Lc3UNeeOmUwyNlsTIvey06p3JIVx/HflZiey3M+Dx/gs3yalSql7N5ng8AHvT+bVLruI6t7VKwA36y5P46FR3tB2P6BmCLFqgc/V1MBuNUvYOWcqjvQf4g2tc7aoPCtmQ728bwMuT+8hL6do3A0KZ15mS/b49meDhTTblvrOEqz51Vrb+W/XR2LRXaCJv9v+g9D9u/i57bfM86zcj3ZJ03ycwN3QMOR4IvuJlgKNsHySb6paPAj7znYyhiHuUbsoATYwjPH3cc/9znmFZCU8F0CnVFb4fOEVTGXHCopKy2Tln1Zylmo2p2kku9Q2MSZRETqYXGgcacA18cPoDm/PnzzKxMgimTlYGNKIFkJ/BFIGZAoS4zc7W8/aKsJc8QZeKgBrqcqz9GiywozlNy7hi+mrEvphOxI6NzSKOOAIfL8eqazLO/VUuo45Kjimrx7MlU+B2F8JW0mQXo2eC0JBYKlSoumdOiiJ1UmBuI4KicoJIPE8p1kIl/m5STCNHnUwWTHYW9N2nwEqinjjfdmHBrJhzUnmntqL1DYovGgHeRunK4WLEzabhy8xZ+OgEJSOzQ5YJGahZic6YEprWjqj3SKicnC3ZoedOeB2qePjrl+qzBP+a4fkV47qUjjk+70THJ28f+8y3VvFri/zBAYPP6EKN/OfXog65t02ScqXtDNf1qie9Dl3E/y3KVQnPKPTkDRnZiNi2A9Hv+TJUXtLkw2LH6+GF43LqW4axH+3YJ9excXmZ8x4BgMB1sF/7OWwfng+NC92T4W4YXvRoN17nMXHP9/cc4bPxy5dWsw/NMAWNNwjbNQBHm7Ey2yzuh/hMDBsaldLbkeU9JiQX1M3TUbrYsb96ZT0CrEEMiOXDRst4Z41KTHLA147Bsd+NkICkZKFCPMSAHLhXkbnfFpIRkkQchRUQTMfseIHZ4Tept46V9Jhn4rEtMMZ/gV0L2yEwXZ3FTpZFiZgHLwjgsBBkn8Ogn+uKVuW1zXKSRMY29jZ1zOayPEQEqC9M5LEFRjgRJgrO8PDZHSSGZxz8x1yfmhCX0QWrG4HOAbErFrjo0qIS8qZaDpUw9lJwli2oDvV1eKT4HSl0rU1GuVDWPTWuuNuC1ZXEaSDVM6qllCUwnOFehqSJ0R8TlDq6eEkKC6EjtwrQzyxUxrKiqihSUykNHR9SOqlKadsWtWWK1Sjg/pZ0HVBK3ru8yn9/PJ1lKbt+FU3a+FHzO3L4aBvgge/fDENXBqfD1k8zXS9kXD2YIrxYkPGhM1hnfOhNce46BqY+lzctI5JsSt30fzIfb9vdF8/nLDZhevYLrrHPt2MFuYPyj3x/yDefZ6tfLuvhZ9u+Zux6ghT2Pua/7E12uzQ96R6+JUr30mLx+SYe2LTxdo/f03DN7nIP2KLtow0vs85B5CXxO92vAwYhSTJKTBmEPloQ9kiXc/KymorjNQYtCf+SoqwTXKXZyn+DU1LsuJapUAKOOForicL0kQFYXiohlDXQWajipLfPdpAZfieVTwA54SUlZdZHFKtIpBEpctcXURyT7Ipyf0exh7FnDfYUgyTD2eegMgReJPm+cvoc5vCxl8ESEZExfclsG6SAv1Ly5UsmBDkiOQOhVoa4wdmuEiBC1JBkWQoQgYqdWhsSqnG6lBjycwLTxTCu42nhuzibs1Z5KOlJaEYC6ali2kVZPmNRK3UzxzhHClPv3XmGmOySdQDQAEzWxOlnhmBBDh3bC3cO7xLSibRcoHaodEwKPTRKdrLg7haNFS7sKNI0jroor5ni9X75sMyf8cpWH1Vqcl1hs89qrtbVuSrHn1X/R9dc6Zg8cgxGjOCNglrmDHBtk9Eh03MbLtU9k2xo4u562Sd+fDzNUbkWuZxtjPP+p8zQSZ+s4OyZ222BqGAvt47ovU7aNh43pRr/Om0dZ//tsu89eO2/9lKPOt79nNE6lPf0XOTPHBVQ+rOboddcMrKEjCrGnb/GA7IvqO/aMW7L6sTwzdNbYi8Ni3GtnUrhz4L3lGhAs25lJD/Zb5UDEm6QK4ISIcHrasQzQJotUQGE2cdx6fJcrB1MmE089aWjqmqqp8HVNM5nia8+nP/MyP/Ozn2HZWmody1VuEtOshm/91t/Al371VzLdqZhOappJTVVVuMpRVROc1Pzcz72X//q/+F+4v0wWjQCWcU2LHiNLyWW85Kw09kAp7kL13llppz9CGQyQUYiaOU/dvDHli971Bg7266wpSKQAXYTPfO4Vnv3cEW20sVDVDLqUWzev8La3PU7VgKRETDH3zRCESU8m6Uc1zUNS4ZWX7/PZ5+4zD47TYIGZIrZqnMBsIlw7mHLrsSsc+CVXvIJ2LFcrKgkkDZD9ImJIzJKgLrAjUNX74PbQdkKXghEJYLU6wRMBz6pdmBMpieVqRYwB5wUNQlx2TBCu+pabM8dhN+X26X2mM0/bpcH88YDy2on1F65cBrS82iRZ5xH8V6MafjVFR1LMNhZuQsWgcyzUrADfci2N6ih3bpD9i9uhm28977fNshnSXaowseiid51nLriovUNTLp6zs2U4SXIstAyPbGPKo2PlR228DABaF4zW31N8wsa/j9vw4DLOs54GTY0MfL3na+e0bdAoSl9X+W7a73JvaePDldcNDJxBT71sKWtXy209usEY+97BlK6LLE5bsrA6Qjx2bsAbn7zKr/mGr+TqtX12pjWTScOkrplOG+q6pmka6qbB1xVVXdPUjqqZUFcTmnpK1dR0oeW/+2t/jb//936ReesICk8deH7LP/MVfPsf/+Nce/yN1HWN840lCvIedRVUFTjlmY/8DH/od/9xPv30EctkjnfJQVN73nJzhz/4B/8V3vR139y3uWgWhoWiPPm2G/zNv/EjdK8csjP1dDGRgoGBHhlBb3e07+cj4stJeOvoe63upP3ZCaL0mhVRSE44qCt+/+/9rXz7H/oDNNMZaD5TQU1L8jPv/XH+9B/7i7x4e07USMqnUF6f1nzXd/4+fuPv+B2Iq1ENpGTaFRPyi7q/JOhRUEfUxM++58f4Y3/432Fxr8sJioy1VM4xa4THrtfcur7D3rSh7pbULpFWgeWiZVJ5YoTlakFVdUBF0obFfI7cW3HjWsXermN5EkkuUE8bknScHN1hMb9HWCmr1RJQQlK6VaJuPLuzCdN6im+EMD9hSuLmdMrzS49GAwEl6+GDeNVlpNz/K4CFi4DAq6lrm7r/QWOlWe6S8bVXrSHQ/nONQayRsTHT2GSGY4BQlFkj4IQM5pbh9sF0utH+QvcvC6rGjGKN1o4q2y7pXzTu49+2j+v2e7e177z5LNk6hxTNW48W7vm5bN1jBTDkWzak6jJ1m9qWYmJZl9+HdpyVzsd9HoSr8jk6KEqH8VdyVEraZhoq6aqEknFP01h4Xr93+H52DM4rX2CfgdF22jh3uaA7Efjid7yBP/Infz/PPPc5/ov/5H/g6LjLUmNRMSs7Hr71n/tG/uT3/gWqnQOk3/xQtAHl8J2SCrfnaPmqakJT4OOf+Fl+6Ac+QAjmP7DTVHzZm2/y5rc/gTu4Ybfn9KmqihIhdWgKzKSjIVnmQx36JsD8ZMFHfuo9HFxpjGGmFSlYitgYWrousFqu+Pmfez/76ZQnrjiiKvOVEh0Wvi9lQdgKWrcdvrqY3LUZ6W2LeSGLkvJiL6lmFDujXsnphqOip0c07TFVlZ0uiwZHEntVpMbGw2MOkE6FKsFktWCqS0Q7ys4oYYxIGvVJQIK1JSUe3xF2ph6ly2g636NKUwk7jXB1p2GvqZjIDOlOOJ0viJ3SRUv9XNWe1TKhqSN0MG0qJAp3b98jRs/08T2qZsrRySHLcEqKLb7ZpXGKuIaQWmaTCZNqh8o7NLWsVqccLpZ0UXGVo04d1ycTZnXNvB1yJjxIzf9q5/KX3WHu81RKKqnzZdstz4xsu2evrdcyEORB1OqZ35Z6H2bchHJw1yUaPeqnlKc3eGevdVsPK8jWTT3DXMr3VMbvHGlyXDYPCuo7wib7ljNXzq9zUxNz9rmzjPACgKhF2h3aYQw7hyOXPA29hLxtj4zft6kdWL/nPA3DwJrLvZLHb1NjcHafbl9L29o4gMKxLqa4jGmPNIb29e3O39d9HUbGxzNg5HLz+XkHA9tSiG4twgYAGCTjYc5MK3Bl6njHYzMOpjfZmXiOTsLIgc02kSbh9jOf5fjTH2P36hU0mRo4pZYUOzRENOeajyGRukCMK7oUSKEjhkDXdZzMOz7+np/miT3luMrpdboV/9vf+ce8eHjIY09cRbQjxUiKAdQ+U0yoJj7x6Xs8//wJbVYFu6wubEPkzonwX//n/19+4G//LSZNYmdmyYliNLQbgrJcKXeOAz4mDprE4dwmqXbmDBKVrILMkrpcfrIvU9YXs63M3jQxgNJ+Cp2DNiX+9v/0o3zoZ3+Gqwc1lStZExUVx9PPn3D/3mmW7C1PPgJHy8R//J/8DX707/8A+zsOYsLh+qiPkGJPBJwTSBHnHDHAp1845vDeMmtLrH1OzPTjUKZVzaxpmNYeXXWczOecLgLtShDXUk08aZWQpOxOa4jKatExcYKbVpyslqQ7L9CGwIuvPM+8m4Mqk6Zh4ic0VY0QqKslosdIAieRSCA5YbFKtCctna+4UicevzLjztFxZiJjDdiDieT/Fcqr8WEY3/2gw5wfDIw2BYkzVynplgcyXO6/fLvH0rQxhg1mcIG6vrCY9Xu2c/LyXDnFo5cg1xgw64P4wLZvH8PLzt3689u0W2fR0cPg2UEIGc+N1XvW1HCeBqL8ug4Wiq9B0S70AsYDB3Boz1kQMHzf7MdlS6+RGWOYXmmQwdHop+H34aHx46+mDf0zesmd8PnNM7Cp7hojnMx0cq9nlXBrB952a4eFwoc/e8I8OCSlrBqHSQVThDfser7kbbvs78O0VupqNJAxIcGYUdEmlMRA5WCdroPjNvHyUeSkg+NTRVdAI9QV7FewM4PpVJhMrH0eOxHOZc747D340LOBeSwL0hjjbiPsezhoKqY+0UwSV65UeJcl/QwGumSnGS46OJwnjueJToVFUHMoTJbJzPIejrKWjcplPKcHFerG9f6ZHCHgNjZCv8FcP0e1U67tePZ9ZG+SqKuSn92cMY9Wyp1TZRVNYSvicj4BoXHKlQquzISKEv4pOYlLtilncCCpgAThxaXw6aPIIpndzYviRdmb1tw8mPDUjSlvf+I616cVaX6f48M7nMw7YnD5SOGIJKGqPJWDWVMxbWoqlNUqEELk6t6M6zeuMt07IDlltVpxcnJECJFJPaNxdgCWdx6nSuiWLFcL7h+d4BtPNZlw9/AUZlPuxJoPPHvM/RUsW6XrLJnUGOFvztX/1TUDl2EyhUFugoFXRcx6RlBO0mAEZuk1iSA54+XDgJh1sGH2XosiGpxqz0p69sBGX2SQ3Hp1/0YEjTLSeiAlG0XflkGlXcrFpxEWZrhZtvW/aCi0Zzrbnt0mTQ91bpZtPiTnSfZnGP8GuJPetAqshc4VqtOLSvmEydKGsyB8OChv83r/tqEdo74MmpDh3aNdvN6fUeu0f5f2d+rGe8rPKgU66pCTomhpFURdv2ZkrS3rWgFVZXUJP6UvoJlgvGW2qDM0UTs7mriLyqefP2GehGWnyOiAI+eExllCn4UmPvbsCROf2JkJOzsV3mPe5WrH7prUatKpajIpO1p8fwhwGiLBmXPbpMoOhN523ilC18LKOyaVbXsPVHg72U7Ms73TEtxmE+8EO1mxUhIdSywcru7ySXFiURAhRbpkEQNBlTZkYJSPEa6zer5LtlhLAqJX44alw67YUEL1l/vixI6kLRrXMQFwWPhljLDy4HEk5yyZkoDgiL7EZ+Z6SmKhTJw751k5T3AJkYRTwYsyhJLkCI+UUzu7iqOuteRTgoVlCtSVp/JKJRGvyZgznnYZ6JJDRYgOuk5xlcf5xMkyMJlWtIvI/eOVjXcSJo3n5mTKzcdu8va3fBF13dCGyPHykNt3XuHkZMG9+0esVi0xLghtR0ot3nmkabi3WNGd2smA4XjO/v6MW1ccbu44POk47pQkbiQtlnF/eCY4Lq/dJn5+0Q3Ct23drN1/iTaMSe3nxbt9JD6dBbrFBPXqtAK9XL/FTDG+Y/iSifsIDPStkoGpqI5CpbOTsPGaIhlqHzg4qIZHkqFuk9LXS2FAZ52DezF01C4ZMbtza8zPr9e/Vue5bTmrURnaM+YLwwmf9saRw90WLccAkLK4p5rBQA8HRhqB0pZNELXZxzT6PrS/v9Zz6fIfpTh3O0o+lMHnaVMI6/f/Ws6EXOWojwUQrI3tGI9sDMerESq+YGBgXRoak5dslRaYVELtTL3XiaNTPbMGHELlYKcWKokksVwDdXK0SfBecckyEXoRYoTKO4h5USY75hhVgprDICJ47/BOSXY0nwGK0tik2ME9BiJA8V4IMRKieagXhYcToRKHg5xMp2gjhBCU2ucjeEQIalkQozoWHbTBzo1PGKCJKY+P2JHKqkrSi4HAZby6++WZ+z6ov4yHV97C6rIOI2cQNJWEQ3BqkRy1ExpvSZmcyxtTlFZyngFHb2xVTflIaIdX7SM/HHYSoqglPfLeZe/71GspJKUcSpolgSz9KTCdVHhJhG7F4WFHnHhC19EtO4jm/xCFnOrYoiHaRcSjTBqPqxwzsbnvQuLOy/do4qepq4aTkzm3j2/zyp3bzBcdWjvqpsmARKjqmrbtQODgYIf7xy33T1bs7HiIgWu7NZ1EJlVFDMr9hR0HLDzc5n2QWvKXSztwlvkXxfv5gKCUi0DK58MRsq9DjcDrcJLHxn3rGR8333/+2J2VEEf8urx6jeC7NSm3PDO6tuEjUIib8dU1iLi2J8eARNfuG1e3Hse+ITheqmzXGpxDRx42WP6Cd/ZVbmoIML+jjRb1v/cAaqSp6SXvUbbP11Q2xfwzP0p//PqQM83mbxNg9XhR828jrcUZzc7GmzJpHf2u/fWz6+Fy8/IFAgMZBWnqVW5ouWpMonaOxoN35jUXokmf49T8JRmQc45J7ZASh66WRTDmjHh9TG/+l6KzDINJUTEvb03GfFO0U/nUeZILxoCc9CF+Wuz20WPJkpW2s7wDEYiJIctc/iyOPuKcMUUxZJ8kkJyl/HXiqL1HkmfZKsvQgfeElFgFiGL1RpXcN/pFdNk9uBWJr8l7w9WeoGk5jhWQ4ulMBle2vXxO3rQuJgiSLI/CeKmaU6WBA8mehkZEox122O8JNa2NDm6LqCeliIhnNmlwp8H8DF32zlWIKRIEumSJfzxKCJG2M3AxqT24DMBiIKrifUVMkUUbkU7BRbQWnn3pLp974R6/wOcouRZcrdRVzc5sRiKwXLaIQOgiobONnzQhXhHvmc0mRA0QbF03Aldu7jPdFX7xY6+Yz8PnwTTwekQWuC1M5yLyuq1NWwnd57n9A4M8S0JLdsSz9z98ewZ5c3Rt7blt0ePDk0WYLhJv+b6Nufd0srA3vTjvQlElA9mZd3u9a016SDryakDnZZ+x/rkeE62tmzVp/Ezr+n4UDYJpB7Y7Yn4+S5alegl/oKxu1O8sYW00vkTOle/rZetK27jHof3ZMfa3TWm69Jh/4cwEOrDnQvzBuuBFcn6ATOAVgppknGTYRJkF5YWjeDFzgO2ZlBdTNIBh6fBMXhBFSJnxqMWHO0ftzWHwdBUIrdBpCXETagc3bu6zszNjNnPMZpYfoJo0VK6mrj2+afj05+7wyk8/TRdzYhEBdUYWVJRv/Kav5Su//muY7tTszCY0jYU5VlVDM5ngqimf/tSn+Cv/4X/HnWdPWXRCJ+b/0IVkuQZ6vuv6hCWboS2XXvB6EYmQnBlQ+0+H5WKwQ4egcpZNsKk9b3zTda5fmVgKaDHmJ0544eVj7p0cmyYDiySwDIXCdOa59eQ1rhx40FhgR7+R1ohgMEAWVTm82yL347Dgk+IbR+WsTUlh2QY02QZpg0UheE8GgGZjcCKWEKh2VLUjqrBKCUkG7KIKi07RmKhqqAV8DMxjxOcEWFVtx18b0HAgFcfzltVqYYc2ectcmVBEHd2yxWmFUyHisdRSoym5JDh4NU57n8/yajMN/nK0u1e5Z+jtct7O8e/nPfNqisgoGuIsms71b6+7p1+6ycwvHhPzpxoI/tjMUNp09iE3SKDn1NmbHty2m4pKf8sv43mUInk/OAf+g0rRfox9Ac6oxx9UhGF8ZV1/9XCg4DwpW7besjae/fhsA6VDdduydvZC11jLUYYad7bKjC+01D7c/E++mUBH/4AMqZKpvbPk20alE6ECrl7bZZkit+8siMmktCJzhgRtUG7d2OWbf8Ov5fq1K0ynjum0pm4qU/nXHu8rnPdU3jOZ1Hjn8ZXlF/Au/17V/Mg/+FH+h//2h7k7N4n8ylT4Lb/hS/n2P/odXH3yrfi6wdUV4p094xpE7Pnbz3+UP/r7/iTve/+z4AxEOAcxJSqp+R3/0rfx1f/Cb0elhpFEQM/W4K1f8Un+5t/4u3zs6VNWWcU0qS0HQsh29NL/sojOI7AXSQ7nXy+Aw4hA1CHPQEIgSQ4tVFYJZgJf93/7Cv78X/oz7D12DcHbYlRQIk9/9Jf4M3/83+GTnzkEUXO4Ezs34jf/8/93fs+f+A6a2QQVO7BBE9mOOqD8pAnVDlWIGvmH/+i9/Lnv/q9ZHgdULaGUaZSUSeWoKqGLWASCCp0qoQWINFXxg7CMkrNsWtibNkx2ZziFWVMTokn04is77ZJA2ymxC2jOM09KLENEI4gLJBJ7Bwcc3NjFVfssV0uWizlRLV9CM51yslxx92hpbKsk1TpnXh5EuF4PjcDm+841N20wpotAzS9Hu4vzsZUEXOz0/GrHdM3ue+6ttjsfNH+9hM8W+n7mWS0YArQ/N7GXqM6CGzc05QF9Oa9tm/ese+hvCiDnv+dBZZ2JOoaqe++r0uKLK5KN73nAXg30tDW80c7xay4AWOX5s/vFZSo/1COYTXWIIEm9xlSy+WjsU1CA6Jn2akkNX9oBRUt7mfK6goG1BS+FX0iPYopSTROEpMyDUgFf9MYbfNd3/2GWvuN7/8J/yYsvnhrjUEvLu0zC6Ur5si9/N9/1734P9ZWbCA5xpso3kFhUS2VwIuaYY0zZcuUnknaEmeNv/vX/g9Mu0Innxq7w5W++xdve9lbqx9+NUIMG0IDTgKYykZHdac2s8VanCH22KecIK+WXfuaneOO7b4E4C3lMHTEGQlihMdK2LR/8hQ+xuvMCTx7A6UrpECqXqEQszt7sA/l89C3j/ABC/KAyoFlrv0UtFH9m+y3lMVUxJnzz6oQ3vfFN+Cs3USozwyCoBpok3Lgy4RNanApTj4bf+uQN3vrOt9PV+5buOb+lrBfp35nbhuBIvPjZV3IGyZJpUmlqM8OsuoimiFMDMramhCSwbBOoM5DoIq5SKhx7Ow1PXL9O8sridIXzntV8yeliibiaShOTKmuSXGTZdeZjEhOLTnF1w2IemK8S8ZVDJo1wsFdTeUU0gCjzJPgdQXzN4Unbn/ewsTP6+bvI0er1BgEPW75wGosylr9847PGwM/tZgHtl4iouPRYSXmglwI3fllnqtnU9mrKRW3aNFGcK1hcUP+2Vq2p0vs7irbhl3dON8u5QLGfg7WLfdkGls7Q45FKX/qMieP9P/orC1Wbfd8OykuUiWThWin85zLl9TubgI3F0QPawa4muh4/j5jD3U4jPLWj7L/hCa7sNrwop5BtyR6HqCNo4s7zL3P7Ux/j2ptOUA0W/x8sGVBMlmNAY8q5Blr7LQRi7AihpetWLFdLPvi+9/PkNFBf8SwCTOn40R/+ae7d/y6efOuTln+fgMaApgWpi9nTPfLMi5FPfeRzlJCXmHqNFW1Qvv/7fpAP/tg/YPr/Z+/P4yzLtrtO7Lv23ufce2PKqbIqqyprfPN7mgWa3EhtYQQtQBgMNGZo2yBD8+EDNLQbyd2txqKh/QcCDIb+YNymMbKRACGBkcQo0ICm9/Se3jzXPGXlGBnDHc7Zey//sfY590ZkRGRMmVn19PZ7UZERce+55+yzz15r/dZv/dbImuh4Os6CElXZnipfuJKYtnCuAtdkZmLQdRAzRgbb21zl3nDu9N5PdK+k3KBuuUph8SoFcrNeDYLl4dUpH/r5j/G3v++/4vEnVi1N4DzOe5xzvPj8Nd544Sa1N6Q/oTh1tC38xD/917j2JmceCngiIl2uUFGrvTM+gQLiEe+I2fHTv/gi7TjiQyjVGokYM+Op0nplUKpMgtosCdZWWUVoM5CipXuGNZXa71958xo3p4nNidK2oDims4S4ltoLZ2rHUgVrQ2F5NMK7ivEsMm0nTKaJcVS21TFLQtpWrk8bgsCoVs6uVDQx0rRjovd2DuJKJcSdQibze3F8R+CwcPxhc8CHOZeTOqInHmqRJZyuQ7LbKTMUYje8W3hFdJF+pyR4vDnYc757d3kvrs/OV/Yqoof8+OPm1Of3XOiM917g+PzM5pG/zGPBA8YpEP/KOAjVWvz7YSthYJdfcGi0qVuje7y48OLE7c0H2z+9JQtfxWc8CMLY/W495BNzUp2B3c6AEwHt2gOV13SREBgHQJWlgeORZeH9j64Rlpb4uU9d4cZMiyECL1Z+WAe4VDu+7l0XuHQhULuId0rKiVwkcJ0qLps2QGqTOV1ZyZqJMdNGZbtRro1hkhyzCWy3CbxDBNaCslzDsIJ6WNLDHiP/OWtt+9p2xYdfaths7WENziD1oRfOVMJy7VmqlaWBsLLiCcEcAeeElDPjmfDqzYT3Qm4T22OYqTDNyixhzYqyWKe+bCWITjqFroMX+uI46LZ3iEaHqgSvpCRlq8sW9WvZmCRTB1it4NFl4VydWaktBWA6A8KtCTx/S9lMlIoIA3CDeJYEHqmVi2vKcABmgqF0kKZzclErYXQCE/V85Jrywm2IzuiN3ilBlEFpX12Vts+Dwj2RMk/OmS7Fw2cGPHVhmVkjvHzlNtdnmUnMTDO0raUYshElqAJUXlj1wnJQzgzg4TMVQ++ZquP61oytqWlDbGdlkmHSWMoriGNUwdqSYSxThNYFXn2zYZrmdLG9bsdpGNKTOgPHJTSe5P2nMU6bl7DXBrwjgutfeLzj70es3P0a+4f26YHO7djJHerEe+7d2K+CxX6dd6Qz7njvXjnvez32OZejvXfB/Vpwsg5CSu7mHO+9RnPptdIJTnXvOf49neV419ccGhnoBWCOO3pY7YANRzt/yaKlIDDw0KbMZ15bZ5I32JiZ7n3Ha1dxiFOGTmk088mXb/KF18zYLi9X+GCRZXBQOdOudyg5Jrr2hxnIKRBjZtZmZijZK6lSvNr54KxDXkrCxMOyeuvUhzVC8gWS3ojmBDhMdldFEHF4B1UQKi/knGlipE25lNJbyWNslBRBRYkRnHjURVw2xv4Qa5HcFNp9m4s+me6E0vciFB51qFgHQlEpZKzuGK4v/3POzqsSQ3W2k+AlkJxjUP6WFG5LImK5+6j0raMhk72w6YUggYFCEEuHQFeeaDCXc64vOdxKga0YyZp6/c5UWhjnohmBt3JPjyE0vljdnJRhEJYrYWu8xSs3Mi/fjkyTkoscQsxd/YKQit5BZauAFphkx3YD2UVmSdgeK1tNZtoqEXM6Bt7RokjOxCSMp1DVQtMoGynRxlIhITandzwLxxy7N56D8vh7vWdx3O1c9jNW99IB2NdA3uV1J2WS30n0m1uDHflb6bQM9h97GYejPKOqtlH2VVi6x99PsFcfh1i5yGuYI70dbmnOSpYOL0kgDnePHRaALGmHHyC9WNrhRicsNYcBitD9HodYRBVORExVV8rY+6voz/5o42j7/n1tVLSbCCOOAq/7/veUFIHDcsCVMyM7VZhGE51xJWcmBQL2XhhWQu0yEEkZ2uxMwCdmBOMXOIScrXZdnKLZ2skmIGcl5kxbjItHCE5IBaoRDFEwdAFyTnjvyTnbJTgxIzLH0+1Lu0oHhxcQbe26MSa5pozmDC7YOQBVUDbGidp7i/idkFJROsvmPGi2GHrHM3hKo5v/7iESimHWgtiI4J3DSy7to013QDURxFO5RCVKEEf2jiDMBYsoObCCmHhVKhGC2HxXXamlEzRZnwPnfZ9fE02mF5BSgQ+KW+gsHskIGu1+VuJQp6Yp4AVVj6RITo4rNxsmTWYzggtQi5DbIvlsoBVoxnlzhNqYGbfm+VSizGKmGgQ2W+X6dmIrQVvSQllAnHXQHFYOVxyCdgbTrLTR0h/CfN3fs3KnEziFJ9nUDnPsbpzkM+52nL2cgpNf0xyKBUpr9OMd6Sjns5up3n/+Ae8/DkJysvkp57fTRbLf6bwG3+ndCZ4nH3OS9fEohHaMxbEnOH+E+Treuj9JmuTw1R2HdgZOhArsMVzJZXQQsGBGUwpMXAVhECgQryn2tWqlhSgdh8ZSBRgcXJVnMou9tqWrhRdDE5Ip4+Xc5b2VTCZlTMdAIYkzylzJt1hvAbBKByMkJhHUQRLpSWBtUnIWYlqYJ9W+sVBWUHW0KZmOAYJvkzklaqqKEZMa9sEM0laTwMF2K8ySOQLGLYBGdyFXpzpyyf5Jv5RciWCdy3iSOUtieXhXHn2H4jp5Vu+MJii+vLccySAHux/icA60lHma7+TIYvccB+KMiJhVLeevnixCpMuzZ3TH2jQdiOztHNRlfMlRppjwOCYz2BwnU5d0RdtCFXXSO5Ft7Di/JnMcFaY50yCMhp7B0ONE2Z62bDTKdnECmmS+7cBBKChDJXZOSYXYYWzekZMRV9kVvZ9kHFRVctqkvgeRAjiN69hvro91D3TX90N+9u7fHekz5c73zNGAuaNy2Lk6znUfzqh15D+BHejiweO01qpCv4/TuwNHd5x2Xp/c9TLuvC/7nN+hUbp+Fz74g/cch3/PiZGBwzQm2nOxlBvUQf62IYOIIzhhIKYoWCwxOTtyymRdvKWmPuekg55Nd55stext6UWgomRnBD2flKqTIi6nkrTA89kWTExCLEJHTTI0InjH0pJntFzjAwxGDl85nBO8D5YK8BX17ZZXbr5JqZIjg4ngqBI18573PMMHvvYdjJYC1cBTD4bUdUU1GODrIaEe8trrb/L/+p//BS+9fJtGDBVQNSSk0wFcrMLYa673WtiHWaSLt8rK+0qUq0rGGPomnCSkAv+5LqoPwmilYjS0XgEG8ytVUxQLk/QRvHfmjDlxDGprJ12FXECDgnhI50zZvWmLE5HNhhNcJuFM0bEvvTGUIJQ0k9V12EaZVUjimcZoKQHJSPI4cahL5KwMvWlIbM0yW9OES5lY1pf3EFHEKctDz9oocGUrkVwqzp5xC4ZDz8pQ8Go9CMCBZlLZsAcDR5sz09y1et4558er/jh4HRyUxzzMsU/b8N9PR+Ko6ZG7D0sT7G6qc9d33cUwHGZO+mN0DgE7t4FFrsBhr+8o62FxPd01xSG7/z0nXB4kWHiqXA8nfeC4O71zuM8Vdkgk605bd+znowtqD2E/j+JI3fkhhx8ndgYOgxjsnjALFHXusQmljhKMPOEwITghegjZA0pdBWYp0jjFJ0rc6okKMSkheJ5452XOnl1ieckxqj2D4KkqTz3w+Nq0AUKoqSr7cj7gqxrviybBsOLTn/48P/rDP8etrUxUy3+//4kz/LHv+aM88b734XxFCJUx2V0geI8PFSEMuXXzDf78n/1v+dAvPcdErU/BLCo1MBwq3/3d/xnf8Hv/AOpqDB52ZV4sInYIk81X+YVf/DTPv/gJompxgIx0Mxdg3KFYvmOu94ISFw3C3vWv8ztqM5v7n+h6IWQrc3R5np4QYOCUh5Yqft//7nfxG37zt5U0SLKnXR2vvPgyf+Ov/G1eeXG9z7llNe2Cd7/3Wf7QH//fsnR+GZFkaR0xR8cC/wxkE+tByHiuv/oqW//3H2RzPGOzydzcTIybXHKSdk5e7KunPZbjxZhoFNpkBn4galUa6hEfmKXMdGyiREu1w5Xr9qJUDupKCMHIjCOpOTvKDHykVfpSy/FMmbZQoayMPII1aWoTOC8MKkfGkaJVQOgu7/1ewvMHOYsHjbvxDg76rN3v3+u4e/3tOOOk+fi9Xr/3ee3coO9WTna6qIwg2YKcvnSwBE0H2Lo9x1GN2e7rO/B9agHa/KzhoEj1tNe97V2Ln3f0e2B2aZ7O2Ku+/zjD0NDCw9uTO7G7d8/x5+Wwa+++6gz0BAs6iF/pNKOVTOyhZCVGY2eL86zW8K3f+tV8xTd+DT/8Qz/Ji89f6Y+ZszKNwrhxvPfdj/Lf/82/zPknnsGFCi/eoGgp+QNnqmROPGax5ipPXe2+Q/jq536Jf/MvP84r124wQzk/EN51aYWv/7qv5OI7v7YsCOt3oMXIaDIAuHZDHlod4F1GPBCdMdkRcuv44kc/xDNf/QjqHJpmZJ2RmpacQFMyTf2rbzBaf413ng2sTyKTBE2GJhcpZC3KgCJmK/eY54NQgIPYr2X6mUuaFMOcrdwvL7zGKjosQhoEx/ve+Rhf9RXvQL3H6lst8r14dpVzK0NeK+hGgiID7Hjk3Bl+/Td8gNHZM5REPYixAVWxY5BQrRAJiMDzH4MhyuZ24sxQGJ2teePWlO2SswleCBXgEs4rKLgsls/HGhWlDHihDeasZXU0uWWShUaFkRfODQJ1bqhLNYmoWuVE7ViqRzx76QmGS9u8uv4FQmPy18kPubI5pY1QiaPZzlQh93LTObeMNxOzLIXj8PYax3Egjvv+0xj7IWTHvQ57Zrpn6+Bn7CjndNj32gtKEk+Kl94ZixNM7b10QMsnsNvA7RQXuhdo0e7jnfz4dzQTOtY5d30KOxr8fkjFTqT2Xo8H2LVwDm9160G7khOFjCMDIcFSFfj6dzzOd3zbV/ORn/8VXnn+aommEhmL3qfqmW5MmFx7kXQW2pTQ1JJzi2b7d2pbUjLdgdw25BjRtkFji8ZEO5uRGuX551/kHJs8c15oWqX2wvPPv8E/+PN/kcvPPoJKpJIEmsg5giq5bUET1zYyn/jQ80yTGbwsxaw6YVuVH/0n/4aP/uJ/YMlnlkaKD5kc1XoiqLUvHs8y65uJ1YEQm0wApgIBa2OcyhJqcmHonnAT2PP3CzfJAUnmKIQ9AAlRIxMicG19xt/6gb/LT//jf87QlZuoCRXh5nTGq89dI3X7VoHaoiq/8Msf5Xv/wJ/h4lrNIBTCT7aqAS30frF+xoTgqGvPi9cnvPL6NltNxk1hODItitgq2QmjknIQwKk3Z9Db+WQPVSX4XFIc2FprY2Y4qlkZWNphqapYC5bTXxlUpBxJbWbgYG3gOLtcc/HCGqPlIe+5epUvXt8gqrGXQ/CMY8Zla4s9UxiQGYndx9utMsmCZI9KPmIt8fHHvTjecY377iqH0x67CXb3AjI/yrFOl7PRbZzd8zgHBo6zF9yflI1w7wmDu8fiZJyCI3Bq93B+o6wN8d7I0/2m5Nw/nYEdE1lKvYC8Q9muG9lKBkVZc8KFoeeJsxVnLqzw0Zdu8+Zmhpyt04BYQ6O1AVwEvvKZNR6+MMS7ROUyzqW+JM3Y/rb5ai7KgGqlaCkpsyYzjXC7UcYNzFphNs0kMc7AwGeG3hFqI5ENg1iLZMkEb+I818aeD7845WZjJZGWF4flynGhcqwEZbn2jIKysuoIIRkJTzwpWefDccpMmsjmtmN9kmmT0GSljdCqEDGy4yQqjbo963kPC8ceBIlatjtRiVjFhXYiOYCAK3yPysOgEkYBVivPwGlPLsQpU1VujTNNtIg8lfRQcDAQGPmaFacMBtbCuJZgVQQK4uZGMWlGnGMzwpWtxLTN5K73uhh6ghNWljy1KEEzVan1D8GOFWPpDFkewJVRRfCeze0ZGWUpeKpgXQsDieACwxAQTTQp4Z3n/FB596VV3vnou6i85zOvvMSvvvQG6zHRJIjZkRykqMwaRbxVxUgWro8zV6dKUyRGkfk2ddiyuf3GUSH348D9xzmf0zY2B5VCnoQMd5S0wt2Of5hjncSw7Czlg34VHTFNcDrj4Dz83aodutfdm/PqRO1ONimn6cypzEsune48t51rsUtFnexzVZVG9+7iuTjua2nh4hCYryHtLr4YazEIuhZlqRK8S1zdzry+1bKxHZEsKLmfqiBmVLJXvvjmNi/f2GY0gJVlT3AJ7yjCPF21ghluc0HMDck5ExO0UZilTBTzuL3vGutkGgdtmxg4h0ZrleyTCQb5aNe3Oc20KRFLCbzD/GGXFV9gvTZlnAjD7HAZoibEK1lLFURWVDzqrdmPKw1xvFEncGo9AXrQ7RSgtt3EIJFC6Cx/dyWPL648WoWVE5wQnCJqULiSSIXhn0tuMxc2v8JcV71oDQhCyg2Nt+Dd45jmaDK+GFHQ9+tHiDGhEYYCEhTx1khoGimVAQo542tnJE6sc6L3yqj2hOWAUEoIU2boPbVXLi4NmDSRII5hHWhmDTEqo9qxtDQCnTFrhKZpEBViG5nGCU6XWQ0jLowGaGzZmrTEZC2u/dAhQ+MbiBNm6lmf2n2W0kBmseTpNCoJDjNOsqkd18DvZRROw1nY/d6TrP/THP31lhRkh/7smTo5ACq+6/GB07L+J0Oj5sqhi+Nux7v3aaOTqRee6roQ+sSgO9R53d8k4gNtVFR6a7IDNilz7wUGQUzaFmPiT2Im6lyIszcWAoPg8C6hRFJW2ig0JYecUtcmuOgMYTXwlu83lneMSi65eO8CgrHXnWTUFaJOcSg6aNzkMwQVT1IrjWyxErP+mspaF8B7R+XNQ4jiaAoT3uEKSc9K2NoMUR2a1dosl1S6E6wyQrv8dzcX+3mWh3/Y7niIFx0BZyWZKmbw+9+jBGdz6QQTYSrs+KLnhOxCLcR15XS28QQnBC0YhJoXLNqlkBTNQhIWpKotwl8deXyT8EFKIyiYZfvsnCBncxWzmiaEC0LthUEQvGTqumI6izTTTIqwNPKEqrJrpGFQeSoPS6Oa4UCR7K2HtoB4YdzM2JyOWTl/nqoOLNWwFjxLgwG3b09oWiWoI3ghJ1OUrCvP0CeToBbdMS9vx7GXMd/PGOy1qZ7Ucd3vHA47dhP8dp/rUaL6vT5fdf4Q9XvWXhyGExlzOeDH3ed/ODTj8B8933+6XeHeRvpHHfcdIjl4KHRS2fs9+w+iXLcbD8QZKCgzPUbaPdDd/wQqhCFWA96qoqnkyPeYRBF6aNcK4KzlcVSPqkXVppo39xOz+E68jqi5tJgVUpbSlQ/UWVlZZ/jNu3fMkhHSzLAJsYk45w2qtkQ+HcgjC+/XQpjM2TrwtU0iStFXsOozUoI2CdPkmSShyUILRMQ4CJjRa1IhEzJf8qcFZYks8GdLaqD7uetKQHHCDM43NcIMqDokF8WB4nCl4rbY+S1+TvEW1JFzoIkZ7yjKZZYmMr/LmVNkT5OVJgbPsEQjs9buX5uMlJSnVoo59IURoIZgtC0EEsOBsByElWrAdsg0swhqdaDBWfvsuq6oB0sM64ocp7Qx0zQtURXJSpxGppMJ46ZBxHN+bZWtWzcZDoeM3BJNm9ncHFMFQSuoQ0WblFrKA985SvdxnGrJ1i5Der/GvhyXU+BYnHZlQ39cEt3udprjcNd8Z8XR6Z2A9HtB57yfLjfirTNOjUdS7sNpVSWc5nggzkAf4RUwuRPw6ghdToQ6FPgZE+BJCWKkj5J7j1vKjyUS7b1xVWKMqGYq70ioNcjpzsEb8a+Dr82YgKoQs4n/xARNa96cJ7O0OqAaBEYDRxg4I6LhcEFwlcd5h65H/PpNU59ZGBmhVXjo8iXe/VXvZLDkqAcVVVVRDyorTaxGhFCzPZnwT37kX/DGF9eZNqVyQOmdlYzlzzsp455PdIgN+jCb3OLWsbA92oOORfXGtzApZhFK3b7ivTVfUulK/Up6RiiKkxQXz6D0LEKWTHBWjRGz6RNg8g74TqWy3OtCEyG3Dd6beE/H/whecV6ISWmayGAQwDliVmYteOuNzGgQaNuWlcGIwUqFLgmDaggamM3GOAnUIRAqT5umpnWcrRxxEpWzNawNV1hbPsdwMCIPJ1xglRubW8SUqb1j6IWRD6VEsyIBTYxkmac8HtS4VyS5o4yT5Pbvxzg0onZIiF+4N/f87g7B6TX52f8kFv75ZUfgvh3rsOOwz9j9bWG8uHEseJPdLxQttdrmSzeIkdaS6fxXo0CjLTnqTtKcQoxCXXve8d4nuHBhidGgYjCsqYcDQhVMF8AHqnqA8xW+Mn2AMBjgQkU1qHCuwnnHG6+9xg/9vR/n6vUZ06RULvM177rEH/+v/ySXnn5HOVZNqDzeO/sKA5z33F5/g+//3r/AT/2bT9Or8onJB+cI/+kf+L381u/+3+N8ZTepyO9S9AYUpZlu8Nqrr/HxT/00k+hIpOJAdSyJsv3sc5MP2qC7zeOgRZlViWXjcsVhSmoO07woxgx9UNP/H1Vw+YmL/G9+32/lzLml4pQpiOf6tRv8yA//OK+/umnplgzeOUThwoUVvvN3/WYeefwiGktxqSZSKtUElG5FnaOHoq7ixpVX+ZV//7O0m5GpF85WQutMXGpcuA04ax2cM7QRpgV12tyOxApiO6V2gpMMA0VTRjSxPFoqaY+WJjZE9WxNG2ZNZG2p5vzqkCoA7czaLmsizcYMnLC1PWNlZYnaQZCKSTOjzYntGWxEGCdTIuzTSPfJJ7iXm9BRuAD38jwOiw6cpOzvjtd3kd4+n90FJ50rcC+uf//rfovB5G/DsRst+lJ0drrxYNIEHalGLBKHApuIbZQ5m/GZRgsDR6J8w697F7/xd34H//RH/yUf/9AXSJ3UZVZagdvTyJOXH+b7/+pf5ZGnn8G5gDiHcw7vSjlciWKlbzwhIJ3obCwp/sT6tdf4+X/3K7z2ynPMFFwFF1dHvP+dj3D28jlzWrKiOaK5MY0BBW0Sy7rFimHd5SGdf1RG+PQHP8h737FGCCZBrKlFNZOj1eXHmNjYmnDrc1/g0VVhY6JMk6EKnfZ95wUpO2H3btxR83xEgpKq9UgQOuRFIVlevpP+tXsIMyyFsLbk+KZf9x7+8z/1J6jPXGQRW2i2rvH85z/LG6/8KqgSvC9liY73PfsUf/rP/glWH3nMUgNzmAPVUlpYrpVO21wdN998me9/4QWuvnAFlxqUlkaLhoHDJI0x8iWFeKkCMQvTxso5U5upg0NyppklKg8rA08QEzdKKeKlYpYim9MWxHN2UCEps9E0hLBFlGtsba7TzMYmFR0TsyZSLQU8HnGONinrk8hmclZtIN3dy71Ttd996MZxc+L32yAf5vOPe6yjvO5QCNgem/u9mJvuObqXyPC91wn48rh3Qxe+Tl75cNzxYESHuv2+/HcuoSnzv2vJdomyVAfecekc3/ktH+DFz36aT3z4i/Oez6LWyS8pWzfH3Pz8pzkzmpHTBFKLNlNy05BmDbmZkWJLbFvaNloOuGlom4amndG2kbZJXHvzJnrtdZ4+L0xbyz+88MWX+YE/+99x8dwy3mVyTmhOxpbXZFC4Zm43woc/eq0YMYsHchddZ+Vf/Jtf4MMf/EWW62QkOG/GTdUi2JiEaaPc3M6sBEEqZeBhlmGaKbyJQiJMR9tfDlt7ncutMI7bQnarz3UrXfMPJ1AVwOL5j3+eD/3YD/Po4+epxIiRmcz19dtce+5VnDOp4UQ2focILz33Ej/7gz/Iu9/9GK7KkBOaMjnZIsgkckqIeNBShZCFV1+7yo2X3mBzu2VlZUALXL09QzFSYixOpaIEZ+xdVxxCVYhFxCnHRO0d6oSlpZqBE9q2ITtHVlMKnE4jbWt1KNfXG64pxJhZuXWdpcEtKgcxJ7abxDhmxnEMssSwpvRnEKrKo429L6vg5f6kCk5qJPZj6x/FOO+37g5yVO6lYXsgEV4HiH6JR5dv1XHc5+D+OVmny+s4TiBx33QG7vhgoY8Cc38KC/ktAcgE5zg78Dx5xvH0kxf47OubPPf6diHjWVlfcLAUHOe88s7Hl3j4/ACXW4YDEExsiJ7pXjbhBYcjq5Hz2qjE5JjOIpPWSHuTqZXJiVMGwVj1lTcdfu+s3VHwQu1BVLndOj75RuTmtBjRQhirRTlTe5a8MPDKaJBZGVm1hCsRcUpqlQQZJq2yPVUmM2Gqlq9uslUrRIVZgmmy8rTDegQHsb4XR17gcEhp9qRi89SVgAoWcFdiVR/DAKsBnrjguLgqLNXaSwJvNvDxlzLXZ5RqC+sA6RFWES4teS4/BGvLGa/Zmoz2p2dpgk6uWsjkLGy28PKGcn0iLA8rRks119a32Rgrk2RzOKgcg5AZeKt4COIITqnKcbxX6uBYHlWsVJ6V2lOHbIRJH2iTMGsztzYm3NgS1seRpI5bU3NkVmrhTAWDCtqsTNrMaOgYeTg3Cpw/U6Mkbm83TLLn5lj44s3IlWlJHXV8isPdvhONk8CdJ2XqnyoZ75DnfRKNhqOMu5bOYVU40K3he+cMfBkZ2DlOw7HcXWXyVnbkDjq3t5TOwM4x5w10rGrVRQ1rY/V551jyZkjXtxo+9tkr3BjPkYXu1Q5lEMxgvXFjyvWbE4aVsLLkqAdmeOaeOThn2XdXSIWKRZE5Q5uzdcTzoFmQIEgG8dZxrkdzkF5Rz6sJA2lWxtFaIqtYYxwKbwABVwSKVCAC02TdB0OBjDPW9bAI8IIT1ClOrQa/EjsngGZxBvaJtvaCQA/zYHSaWFI2MUuzKDF1n4G1KFZDBWoxx6BRuN1AmDimrRK8ORLbraU4ACOASpECESWKspEc1yYmI145a/BjZaAOVaHyHku9KilbN8RpzgTvQTPXb88I0xbUoyRrXa3WHjkUWWNrNGQogaD47rqcmC5EEJqUS8dKpVYhCjQxF4KoY5xgK8GNrLQoa439TmYwy5lahHecH7LsWibTlnYp4CsheIdGExqpg82dqvbqkbpw304rMj5NsZvjREd32zgXr/MojsNplq6d5jEOFNY5YkOjL4/jj9M21r+WHKwHKEdslrTrOL3j96VhT3BQOUMAGiBGq8PvMAVzIsy6BGcGKSZFAszUOhTG1vrKezqhGyn18Erfc1uslj8ntUqCrrnFDiNbFoYKqmIEN2eef8qZFjNc00jvJChdw5wFjYPSvlizWLvbaM6CRd/mDCiWn89Zunq9vjTSi+noa9cmD+nRh6OMgzZrKVwHp6DOBHOCuF4fIIhSiXUFtO82l14KM8HZ3GRxZJRGFS0KRk6kL+l0YshO1khUT6PWa6IqjgAJnJu3KhYVqz5Qi8yDKJVTthRm01yOZdNiWgz2c+fAiQPnHa6UEYIz5wQh5YQitMlUChnYusB7VlcrJrS420aoHFbmIEZVtouMNA6WRg7nlaWlAdOcmTURpxCxbpsijtorlRNi7lQiigLnMR23+zVOcj6nfS0n3fBP8v7jEQzvTLW8lSPML4VxL56dw5CvH+Q46bk98N4E89K4eaqgM46VKN4JWTMpWbVeLsavhwf6e25QvpJNyS8rTbIucuLseFI6JYpI33q3txXJmvGkomUgIiRNcwGhbMiDx5ETJaI0CNuVFErOSpNL4yIF1AhiIlJ09jHHQ4s6YYlao9h7FSUn0ztoc9EYyPTywxmT8o2Y4NBB0cZei+Ju4jA9pwNKFUSxmVkRX3QFpHNw7N6guXdGittEThn1jpjs912L4Y5Z3fENun9ryuQoNEmIIrTa6Q04NGW8ZJN91tIiKhenTaHyMKphEi3K994zSS2z1soUUwYDnSxFJKIE50EyaMaLp/IecdZoKLiKEIQmJSrvGQTHqA5wxjGeOq6PW+qYmSVbk7EFRBgNhGEtjGcN43rAaGVEbGZIssqMVjPiYFiZ4xqRPUUzjqNGt/t+3isn4jQRgt08gtMe98qZOq3j3gujcj/4Fl8e93cc5Z6edD09sGqCuQxrlyuwn41LKTjpqg1MaCeVXHoqoZ50DbF17heYzTbVOTLElM0ZSNkMFxTUoXxcSVGYwTPDnDGYP6WOpCdFSS8zGAYkmKqcD2JqfBbi4pwg3qMzxW9Ne52BVMiDYOe/fG6Vy89eph4GquAsUq09wQUQj0hg2s742Ec/y/ormzRRSNi5qELE5IrbBejxNEqqdhqT8rsyqQqlJ0pxFoqBz+Y1GXiBkJPSJmXW5sIDMYehSebMqFCUGjvFMu01FGYxIVPBOe2dDQoS4UTxVqDf9ydPURg3mbYtrYlVIWdCZVUkSiriTtbwSp30zofz1lMiRcU5hZyIKZN9TRJnJEYSPjtGtSPNZgTxrC45tlpz3gbDgIgjzjIexReUaGuaUG04tzJg5D1OI5odCNR1oGpbFhetrfH5fTipI3C3+7x7nJQQeJhj7/f6vRzSL4+jj7ki65fnrxv30hm8H+NBoA+n7gzstzDv9IRLC2HR0teZHRtkp9LWZgVXyg0zVlqWS4eBEpWa7TdZ4Kr2rK2NWBpV1ANPPQh4bzoD9bBmUNdUIRCqimpQM1oaUVU19WBAXQ+pBoHNrU1+/Md+iiuvbNCqGaZnL1/gD//JP8RjTz9FFSyadN7jnSNUFT4EfPBsjzf4W3/tf+Jf/uSvMkuZTBHSEZAk/K9/x3fyB//LP4YbDMzhKZUkpuQlIJ42Tvg7f/Wv8zd/4J8ynhWJ45Jj7rUYFtypHfN/SotIMe4CCE6tb4OW/LZTqLBmPKKCy0rwsHam5qu+6au5+PAazlu/B0TY3p5y6+d+la2rYyjXYiqHQqgcX/HrvoJHn3yY4EHEobkt6o0e50qpgto6iap4CWzd3uSDP/8xbm5MiSI0arB77TzJZZzkBbVJR1QllLRBxhyMEOZ6YM5b58JJbKic6RFkn4lqRZaTmNiaJZqkRVhICJpYGcD55RrnPDe3ZoybzOYkMo2Z88uBlcqhKL72pmZcUCFVWVjv9w56XFQ628tYPKjN7UEa/r04NvshK3vNz92Qjvs9vuwEzMeXgkN5EjLr2yZNsOMh6/+VFy6gRItqUbTl1q0t7UolvOPdj3N7MuH1l26UenubuKgwbhPDUPOffffv5X/1Xd/B8vIyVe2LMFDAe48LHnHlyztwDinRvZ2Zsfpn001uvPk6P/T//SWmGQYeLp1b41v/F1/PhaeewiUgtwb9Z4UUyalFU2Q2WuGJi2sMgxEIG6VIHVua4uXPfIaXPvTzVFVFSlNrrdwkYmyIsSWmyPb2hCsf+yiXzwi3x4GNNjItcsyRYkykU/Gbz8Nxa6t3D6Mi2Os6BUhN9tlaUJkW7REEAc7X8L53P8F//f3fw8NPP9bfYxWl2R7z3/3Z7+Mn/tkv4tVQgpiV4ByXHjrH93zff8HTX/keO7rOjbhqt2aKUqR0n+/Yvn2b7/nP/xt+6qc+BiJUNTz+2EN83Td9M7/wwY/zwhdfAoSYlTYlvEASR5vAee2lpLueE5qhSRFNiuBJMdNIosmJWc60UZlFS98EcQzxDJywOvScG1a0bWKK4OuarRgZNwlxkSpUkFuSeFBPTMYxyQXZOTjZ89Ycp8XS3ut3d2Xnn5Lzsl/J5N1et9/nH2dO3sr55y+PBzMe5Ho4dWfgIC/1wAem2xwL1J9UClnQSGqPnFvid/6Wb+bV6+v80Gv/ltRYPl5LDr6JSmwTTLcZxSl+3JK2I03bkFNLbBtSjDRNQ0qZ3DbE2YzUzmjbKU0zo20ibWwZj8fc+MLnePK8ZzIFsvLmCy/zA3/mz3Hh4TU0R5BoGslZIbVIjpATkxj4hc9s0ERT5O+YjuqgFfilX/4kL37m8wwrGAyxkkSbAHKyHPc0ZsatsuyU6Fqyt86M06wGuYtVMszynTnmu8G+hxWi2aEJIeW+ZBBjYZaUSkbK/clJef2FV/mH/+P/yGOPnacqqRPIbE8bvviJz/ZFdJ2KoQLXr97kB//a3+S973mc2pn+QFd+5UUKQXS+RhRrP7yxsc32iy/wyECI4hAPdTNm/OZLDJiV9IPiSr+H5Axl8gqh9CCI2So7KrXaEkTJAk2yMsHbM2UWrUqilwUwqgOT2BKdsD1tuXprxmyWTTXZyC6ANa+KZIKAxkybcs9JoYgnHdZ4HFdM50FEjYc918OWun55HG281cinXx427sbrOQ3E6STP1H3TGbijhKjsrB0vQPvfAJRcP45KYKWGc7Xy0Nma243w8rUpKduCz1jp3XLtWPHKY2sVl84MWa4xnQGxmkHNxgDMmo0k7qz0rDfEaqVzSWDWKtutNQTa2jakIjgzyEGUqhLCQKzNbjaRGy+KIzPWis9ejVybGMktAVmESuDC0LHiTESo9sJo2VH7wmdQu15VY7PHBNuzxNbEyhUbta+YTaVxmmCSZEeTy8NGMYdeiPZiqxaQOVojxQkA8Ch1EIZeGXnhwrJnSawhT+Wtnr4VeG07sz4r/AcsSRSAAcoy8NCSY7UWRt5SDoUAsuNcfDnvRo03sT1VNmdKK44kFvHXQ2G9FV6+lcgItQijAIMAg0qovTIMmaXa4XLGO+MFhGA8Fc3CeJqZJOHmODOZZZJaaadzQoxG9pTigLhKSI05AtbU0Pgw1UA4s+Y4uwJDl8itY6tR3tyAlzasfLVvysFO3sBe9+kwBvZ+GtTDnsfua9jNIzhOmeNpnONxx90+/zC6A3NcVBYh0lMdX3YG3nrjsM7Avai8eUvpDNzxEPWW30Ku3ZcvKogD76xWvVV44+aMrVnft6bA2JZqDw58cGzMEs2NMQOnjAaeUJlDUPhjiBTVPG85ayeWv1XMaMdkteVdNwBx0Mngtg5ahFaEGmfd+rrcdznnraS0BRBYTH54Kc6E0wVSpNXZp2xvcOKtokANIRAvZnCyfUalGSfWUKmbr8VNdXGzPalhkPKfjp1gEr0ZVSmcDnudx5wFJ5ZGGCdHdpnKQaWCo2vJXGakn5TS0VBgJnBbjTzRCgRVIwEu9lEWcMnuVxJIkkkhW1FAYSfGtsybuH7NJLX5ck5w0TQpkhPa2HWVdMQsBDzOgzoQD+0sERtLWQQvDIJnUHkG1ZDgAqNBzbD2eDJelJgz05jZHE+ZxUTWFjThJVBXMI3G9YilIsLh6Aivu+/jse/ZKd37ezl2l2ftpTdwYM3+lwC0vrgvfHm8tcdBa/IwFTx7vf+wKanTGEc55gPWGShfYttjN7dWbWBiPqGQC9vSo77VHQK5ZiTKV8Ci/pQdM7H6/ZSMTOadKy13FYqKHSIkFTQnY6iL9KI21iJHUEnWXtg+qpcW1ky/uSeBKALZMW0pQkb0ee7OAXEC3nnISkqJGAVRa0Rktyyb3kGWPq+eKAZDta+AWAwoDktw6qfrgIW4431i96i/Fw6qck0ec9Q6J6GSUrqpao6XS0aO7K+qEA9lrjEgquDsd14Ur4kcLa/vXOnboDrXGRBDJbIWEqWW40rn0CnOA0W7wInpRmQRIuCSyUV3EspokVEW8GoOmcPhvOBcZhDg8kNLDIKQ20QTTXJ6GCKVE2omjLSCrGSNVJIJKINhwgWlGgxZ35hAVPwg4KpEniUaumsqypbqdq7nI963w973BzEOk5ffb5O9m0PQve7tNoSdoc9b5QruVVmiLvx3r4v9tYRgHLaq5qDX3svxwJ2BTnZWxaBXymbt1FKvtbedO6k1fOlU8IxcBoLDOQgeKu/w5biqSs6Z7ADn5pF66rQLug1HEA32Wqz+O1MIXsXga57XSKgIMRuDXnM2JSNK7j5bReFim2Xb8EvkLqUMr7voaPlpM3ILDkTRU4hJmaXSjwBrshOz0KpVKOwlZHuaG6RIEYhWSwcogLN/O7EUQCf2ZKhLmStxiOtKD8scSk+fKBuifVXOEUq/95hBipRwcIJV/UnPi8hdYQGlBLTM2SLJUIrDNwzCdmvOQCpOVSuKJEs3eGdrBzJOhQpBo2kBJM0Er6wNlVHt0OjZGCcms8TGJNG0E1K0z3PiqJziPdQDz9LAs1IJtSTWljybk0iTPE0Spq21xPbmSdHmrgnTye/bSRjIR3n/YTkLJx27HYKD2P97ff5b21Ho8l/3rr3wUXgD+/GFTssgKSzIi7+9xknnoFuLh+Vq3Ytx2M94YM6AlQ4uwNr974EC33tPX2aYs1UNaP++HW8wkRyLGUnYY2bysoIksxJd9BpKzqAUN/Ywc1YluUwuueOUlZSlSABb5OgrxQUrq3MuoJhsMlIa0kTBbWecM46CZPqIPmZFRoHHLz/KYFRbdO0c3nlcsLbKPlid/Gc/8zwvPHeDSSwCOxT+gaqVK9LlH0/pgd1rwZQ8eedc+R7Wln6+spjYjyspBcUIck1yRWGw3LfUGT0z4s7Z3ODoRZZUDMnx3tFEU6FcXCPSOVpCSaWYYmDOSs5CFswZ8QbdO4GsuThhQsrGN3AC0hoqIVlR9SQV2tYYDRoFyY7prEGTcTnEmTR1wgx6q5kEJlAEuKwsZcdKPcAFm8+mbWmzkqeRRj3TKDStlrkWvDhLKT1Aw3XcnP1JN8mDSg1P49j3KrI6uaMhmPvcjbfPvT/mh+zYod5uSMDuVMBRjPrddDZ2v/ZecgYOMx5ob4KFRIFBZwJmolP/+5gsR5yTRVIdda2brqyJqEJbtOQFJQwDgzpQe8f586usnFnGOcegrqhCwNdCVQXqqqYe1FQ+UIeaUA+QKrC5tcXP/vSHeOH5WyQcOcMgKBeWPd/1n34nH/i6r6CqhKqqCGFAVdVIsNJFFH7kH/04P/wP/hWxKTB7SS3ECL/+m76BP/0Xv5fR6rIZGGdtbp04nPPmIGjin//IP+B7/szfYhxNZ78T9umSJP09lu7b4Tapw2+UAmK9G0SNwW9chw7oNGQgZsWrNV+qa8fFR87x1NOPmXhQsC4H01nLxz/9POObE7p43GB+JUnm6Wce4+HHL+JdNsRF7AUu+H65iBhRVItDOJvM+MJnXmJzfUZKAmSqCh559CEuPPYwb9zaYP0zrzFrlFjOFRHIyizNN6ghgsuJHHNBLczR8YCL1pNBNRe1SQjeszR0hOVAypkmZZvPpIhGJpMtchIczhpLZWjbzDQq4ym0EbKz+9T1xTiNcb+j4dPWDVg83t3WaM836PG6u6cjTnOc5lzfq/t2XKLpjrnd9bLdqMxxSpnvVTriXo29+C2nffy9/n0YPsJhj/+WRAbmi6mD+U1IRrskuHY8c4uCZ7GLfAXvlEGJzNpWChFbewh+loTNBt7/jkf5bb/j23n8iUdZWap59v3vYnT+AuIqfKhwzvLCJlEc8KFGXEBcjfgKVJndfp0m/gU++dmfZZosfXAuOB5eG/Cd/8l/zHu/6TegOfbIAFnRkuNOecznPv0p/rE40Iz43loTVVm/fo2tl79IuzIgpUSOpbyxmZCaGSnNmE6mvPihD/L4Mmw5x7i1dMKsSCxHrFSuIzaiey+Ww5aZ7L+xO1K2LoKtKimaAdf5JdFtxYPlwG/61q/it3zXt/NN/8lvgsrh/ZAsnraZ8pe//y/xIz/4b2mNKYm5No6zK0t87/f9Gb7y277FWkFrBI2AB6lQZ0RLr9FKOhEQx3hrzPf/uf+Bn/jnv0xMVoHwjoeW+aN/7A/wLd/1HXziU5/jT/0fv4/Xr46tAVXhP3g19KDRrlWzkrwSkjUu8uJwLpPF+BuafVEs9KQMdaqo68CoVlzITNvItMmoBmaThkkx+M5Zf4RphHGESVRuT5UGIevh4OFfqyVih71m6bGo+zv2Ij2+ncZhz3t32fJx1+PbdZ5g773xMNfzVnlu37LIQM8kLoagY6YrlP8USJoucrLfCcJy7bi46pll5eqthqRuITy2lr6agJiIN65ybXKdK2nKlS98hOEwkKZTBqMhg0ENKOKFdtbiXMAPBoThMvVwRDse8+InPsYrH/w8T551TFshRWU4gM3b2/yDH/jrPPXk36NtJwxGHsikJpfmAZlJVH7+kzfQ1iRqU6mT79IdH/vw5/i+P/JfszJQqsoa7YhmYs5kzeRkjs3tRhmhqAeJ1urYR4uOG4RZdib5ewIy1d20CMxpk6J42DkCxrpTzCELJGoHNZnxtet88ed/jmHzJstnlhgsr7G0dpY2wtbLLxEwhEHEIHoHzMYzPvQTP0na/CIVkXa6TW5bEEc9WqFaGqA5025vobMG1UQWx3gW8ddf4PE1YdKUVs7Thg/9658gb7zI1c0GZuY8qFq/hOTAe8GJlTiSYCxKpTBwUCEkMa6CR8heyVGpfemX4QNVBUu1MBoMQFucc3gSMSaqUWAWc0kvGX9ku7FOhxtT2Ipi13/M+3W3sV8+/e2yGR/aCViEVE+EChQSyh4OxX4R7F5R7vE++96Ok0bg++lTnJSb8nYdb0dNjLeszsAdHwxQ4NvSmI/OGehfU+Y/eFgJyihAk4UNU9uhHAJxMAhCrcKFoXB2aAz14JVhUCrXkdVM5KeQyE3+dgFu9whtVDZa2E7QZKWJkBoQb0YxAEPJDGrB146siTpYs6LgHU0WXrwZubKVyM6ZsE220sel4BiVNr0DrwyHjrpypa4+W7Oi0m0vFoGa6SwxaaxhUauGlkwRxtFEmfbb6A8yAAcZjR2j31DMcYmxIDpWT4AXZeAySwGGAUYBzlTw1EXPmSUIPlBVju0EH3l+yrWtTMM87yYO6uy44OGZi8JDa5lBZZ0BKQZZBUun5NxxNYvgknB7y/HGemR75shqzYe8S6wsOW7N4HM3YZoFcklruEwVxAh/XQdLUZwz4aTaWc8JX7ohhgCD4KlrYRACw6pi4IUcW+OJmLgBTTsjxgjiSSkxayPTpGzNYH0KWy1stUqbjaii6g71oB53A73fzsCpM9APIAvejUB4uHO5O9fmIK2Eg8Zx5uJ+35vTTHF8qY+D4Pp7iQ6cNE2w+/2zHO/6nmMhA6fRGKPLpYsuMmulfM2ZAc5Zy1zNyqQRGhOXp/AMQYw26EQZVFYNsN5ADQxqITlXSt+ERoVxts6GwQlVtuMnVUS9wcdJaUXJnRKeKHiDhiyHLsxwxpjXXMiDnhAqokaanGnV2uGmUilASVdbEx4ldJcJiObSvM7SEVJmtat0QIRgFHnQTA5dD6SME1+g7pPdiwNuUrkLapGy2J232nrTTRh6c7KcWsXHxAemK2c4d/Es73jfVzGJymrMPHPmJW598FNozMxSImclIDgPjXOsNxU+KcvOSvSygne2RlzqzkUBhyvdJaMkcELwFnF3Er+344AXbs+YLjyrShGLVOM4SALvHUouctFWXuizwzslOqXCESpB1Vs/DJesoiNCajNOIzgtpEqPqhE7k5h09MbUUgMzKyEhiEc1EbvbX65p3z1FF78dEKn2D0N331j0qY9OEtzn9wetsN3Q+VE3sMX3S++hd6ia23+O9jlON/Y/j4MZ/ceNBA/juHwpGtFfC9f4IMb9nMcH2sJ4nnXufHV7QA0kEEQsX91B1lZSJ/PX6NypcDgqJ1SSTVnQdceQHe1+vVrjIVGTPE5dy2Es7LXj9riFdbUTRUvjG+fsz+oUj+Wfo0RSk0x+NrvSoa9sblp6CWAti4Obp/gj5TipzEJ5T1c2l8uZZacYVd4MsPlD83LJozgCe5GA9t7s7MhOxM5brP5eECpXlBjFKjOqIvITnPKOd7+Tx566xPNf+Cy89Ar1YEQ9WGE0GPKOD7yLlbPn+NhHPkpuM+KEtYHHz6YMV2oG5weQEs1kC080eF8SKooXh6ohMYoJO2XNJc8PpdYCJ8KtactGo2SxXhMq81SNZltlViVhSI+WWx6zMkuJSoTWeypVgstIUqQSUMcsm+x122ovUWxdHBOg+MqTCGzGllsJttWcJa+gmlFxeDUZ5Cx2n9GuDmYvg7vLsu+6dyz8dUdTopKOo3e6d66Buxm43X89ypZ0FE2EvRjXdxr/43Mn9o+wfm0Yq9NAAY6DLL3diIL7jYPO/yDk6l5+7t3Gce/5fXMG7lxQ1rGwY2/Pg2UFMYa2o9S0K6BWMnjHhUoyXYIiF9wpDSqlIY5qucjcCwVlk6FfCDalFxPKWYqEsPbiQalsRkm73R9SKnlqiRCNVNe1WW5Lhz9k13e7anMgFIiOlOekCdXckys1Q1SrpmhyqUbAdAZSLj0LS37bUPX9UwV3e5h318Kag5VLbt/mKQhI6aMQMEfACUXIyWodsve8/MYbfO7FF9nYmPDxz72BOJhOwQXPpUfPcWZ1id/1u38n4+0xy4Oaiw+tMtlc593PXOahRx/i53/xV/jCJz6NMGYUBtaF0GdGqxeIcUI7m1D5ATEmmuu3WVqJMG2phgPGm2Oa7WkpJ4x2z3cuORSbe0EInbH0tr5yEXyaqTl6WTO3gVSBJm+IkRqCkLNVuETUnBIcPghtG2nUcWtb2J4WzQVRgrOFEMt6dmJrs83GAcmY87sjou+dQ7s3rnf77Fg77umCzyCYTsf8muf3efc933dN7PuXO9fOXv/ur+EAyP/g4y2+7uQb7HEdgZMY08Xn6TAchLfqOGr1wOJ7fi2Mt7vD041jOQPHhaR3TJqWoipRENfrBGQ61buiTFci8IQZyN2jW3MuQxArSzSdAYOSZ63SJtcbNV8cECneQLf/Kq6XAdZcNlvMoeii8o69LEBqIxKlh9IRU9dri0FBtUTwFF6DkE0mkeHSkKp2OO/wYp0TXeVwziE+4H3N9Wvr3Li6aZ3y1M5DsVr5VqX01el2+oPbGS/Ctrsf0r0eWnUZl83REEyIx+5JdxMElXk0mxXUeSZtZv3abVNYzKBOEHGMNVGrLTbXTojXX+DalTf51VeuU488T12+yOc+8mHaRrhwaYWHH3+UX/mVz3Bhtebiw6uMJy2Xzq0RVi5AG1ldWwXxLF1uQKCuRqjUXHvzJl/4/GfQ119lMIjWZErzTsi3pH6yGts/aUaKvrU48DhTh8wmjJBQNAlRDIVosDLPNiopdb6kUgeFBLOkbLWJ7cZ6GgBMssNJZuhhGBxOlSbmHjEyj04KnwDTX8BEqmQOGC0gAK57+fwBYO70mQObyz0riEPnMHYOw445OZzhXUQu9kIxbJ0t/ubuzOu9Ugp3RS2OYGjuVaR23NTB27lCZHdg8Xa9ji+Pvcd9TxP0D0OBu7toR6GHxQWLODvkIKMmQ6udMe4WIxiUrXTQgipWt++ta55zlNIwVzZE2+Cdh1CFoj1Q2yJ3QjUYAcLVqze5fnWTqAbvDyrP+77iWZ54xyOEYWXvrSrq4YhQ1VTDAVU9gJz5Dz/zy/zMT38Ml0oESUEpIrzn697PH/mv/hgrZ1YJleB9jXeVNT8KHh9qvA/8ws/+O773z/41bt1sCyriygWXeveSI9/DPzpw7u/6GtQkdnvHxyLotjgelRhi0Zh3ghexrnyqtGJOj8fkiwcDT0yRaYagMKodX3zxFT736ZdYGjrOXHqImDJXrtykTQ23NiIvv3qFc2drrlzZZro1RoaZj3zsJd57+TWW187y0U89z9d87bO8/vpNJCWeeOpxPvqx53jkkQs0wBefu4pzytLIkTXTzBavTVDxoCZGFTWbU5rLOnQgPuGtKhTNVk45FeORmHNg15/Kl2BOk2utWdEsG7kxixns+ZqzlMvZlRGaZ+SZoAlcghpBvOM9730XN66/yZvXNsiqDOrA2uoKsWnYHk+YtRkfPE6E2bQpiNC802cBOizdJrC0vIQCk+1tM9zOqkDQUtLbaUNTfIqucVK3TvYglGr5/W5FOddJVe5Ya+YMLroNh3EOTnPsR649znH2QkH2SrcdJjr+UoqcD6qweLuPvRyft5MzdJRzfCCcgd7DlLlM7eIaUszwi4op3BVIIAyElbUzbK1vkKJBwCJGN2wSbEXl6ace4zd/17fz9LueYGllSFUH6npACANEhDZZLX/wFcvLy4xGS4xGyxZFeRgMRjiEn/zxf8Sf/z//fda3M84pj9ZD/uB3/x6+7Tu/HURQjaBtgfRLmiGDasvSQPiVX/wE25PUOwIdHDze2GYw3WQ0bMjbU3LOxDYxiS2paUizhnbW8srHP8eSSwwrmLSWSrFhefCMkqWDi/derLvnfHHsuykrSM9HMEctqZjojll/sxHOGkaNi0T02tBzZnkAqWFQOybTiBNPslQ6OSdWlla4ubHB67enXKpWeebcKt/2H/8GqqHjwqWz5DDgx/7hT7Jxe5v3v2PA85/6Aq+8eYsn3vMUr7x8hbA+IQ8HfOKzrzIcVsRJQ3zhdUZnzzAmcub8WZrnr+DVU+fE8giq4Nkem0KhSLbcfsmjp5ypxK435YyL1mp6YSIsPaNCo5RUihB1ob5dTYFyEpU2l9QRFun3CFqxuW2jvHlzm7oWRsOa5YEhX6KCUwfbN6m14cJaJ/CsDKuGmBJ+CNWZEaNBTWpnbG1DTIJ33kSYmozmopUQAKf8+q95D1tb23zmC88za+3+heBJOUGGmB3BO6ogxDYza6y7mfPQJsfa8gCXI7NpRMSuOyZLHylKHUwKvK6LUFieP7sxqfWaKOmuwxuJudbIUcdpEJuPOnan2Hb/bfH7l8I47H38Urrm/cZBTsHbyWHoxn0pLdybaDH/6krqOsDehusf6a5l7rkzS3zjN38VH/rwJ1m/Me7Cmf6ADuVrP/A0f+qP/x6effclqsqRU4umRGoScTZjOp2wublFO51BLvo3BaZom0RsI03M/NJHPss/+1cfZyNZ1LXs4Rvfc5mvfM9lq3uPLaltiDGR2kyM0eBlhVeur/P8a5vMVGlzLoRCu6KBKJdWhqzUJUJD++vIWa25Dsp2zGxGYdzCtFVSVzWhShZf3AFlscHTQfN/kvpgL+DUVCHFma5/CErlBOdc6SUAZwaeJy8G0MwbN2esnTvDq29uMG4Uj/LouWU2J1Ommnni4RGPX1zmnZcv8eSjD3P5sYd49MlHmTQJlpZYu3iJ19+4gTjPk+94B//yX/40G7enPPuO9/Cz//5nefixS6zfWmf9zau89z3v4OrrzxGGZ/ihH/1ZmiYxrMH5TKgCs5myvpFAHSImdxwkmz5CEYXKmAR1ky3CN4TJ1pSIKymajKgj40Ay1UCpQ8Vsltia5qIhUNQOu+nuI2/DvkQEJw4flMora6OaUMPlRy7y7FOP8qEPfYKnnn6C1ZUhv/qpL/Ledz/L9es3uXnjFu9937t49aVXyTny+OVLvPj8K6ytrbFyZoXnX3qFhy9eRFS5dvUGK6srxNkU54RzF8/x+utXWVle4YnHH+FTn/kio9EKLsDm+ibnz5whxgnNDJbOLrGxtUmKjqVlz/btMbE1OWZfOVotfRadIzaJ4B0hKLMmIhjRM2WHSGBzc0abhTZ3FRN7G8vjrNOTRKCntUHfraR3v9TCvS71vF/jXhLo3mpjt7N3t2t+qzkD96y08FSGmpGvvTOinBYIcwH4XsyRCplbG1N+7qc/bPncHY4DoEqL46Offpn/8s/931gbBCovkHORA7bmPilnUirEslJy2OVZ7f45NGcm2dNmpcLIhK06fumzr/Grn30NX/LnrtSqa4HvRUBVaMglt6z91XTEr5SFq5szbolxBLwIWuSVujIx68onxGjQrys6CVnVvBftkin3dvRRj4CK66FojUB2pM6h88qg9iiJlAIbWy0xCytrq8j1LYLPPHJuidWlwPX1TD0yEajrV2asX7nBh/gkNTAaBkYDz2hUcXZtlVAHlldXeeHiRVaGZ7iwsszZ9hrf+pWPce6hCwxHl3njyjU+97nn+MLnnuNzL25yezPinCMEYeAdA688/sQaL766xfrtiIpHNRcpaNMYkBLRWnth188/2QiCqJKKUqYnIy4zGHiqIIynLduz0qOBjrWvd0Dsc2qJJbliNkRpg4hvQPQ6AyJ1EG68+SZ5OqAmMbl1k9U6M3XKqk88dHbA9ZsNZ5cGnF2x6ouzywNWBp5zw5oL51cZb9ziyUcfZmvzFusbW1w8u0a7dZuYMmeHnjPLnsGw5syZFV6cTHj44Qs0zTrrN8a864mLvPhiw8ryKufP1XzhuStceuIprl19g6yZ/+ibP8DLL7/C5166wUSsgqdNqaT7rI10VgVtiTp/mi1dcHD+fL6BArsJkqewjk9zY/5SMOZvp/Egjete3Ku7jbeKE3CU8QCRgSJaUwmaM9PWlfh/7yy4Ma0dgsO7uGcXrCRGHHA4g11JvYCMK6xeFkhOUkhYrhPLL6TGXPLB0OVkjcylahoBIsb58p4eDnZSmiSVvHEWUJFC/DMjUTvHQKAqbZm7L3qaly02i1JNZChjUGsq/IAmmfBSlrluvhZse688Zjd2z/1hbnv3fmtHrMSSQOiZ6s4UGCunnDtTo7FlaRBo2kwTlZXlIduTCZqNAzKsPduTxPLIsxISVV0aHJX74n3Cec+gFkNAsiOnDgER6kFFXZljOKgq6lBzYws+/+oW17Yi201HGsnUNayNAmeWhK/7qnfw5o0tPv7pN9mcJLzL1B6GXqmd9F0iTcxo3qYadUX9AdQJ3guhCBZlFaazbMqX5gYYF7Y4hbuh7jm5zhAw8TYnPohxWhAGkjmzNuKxi2eowsw6P6rStjNGwxHtZErKGT9cwbsIWYi5RlxDXQVym0hxyrRNKDUXL14gkxlv3SLGlhQDmhOZaF0XM2xPM66qWFsb4aWijVtMZy3tWKkEZjFxduUsKrDVTHjk7DIpJvJwlZvrt6mIKMpk1pCy4/aGqUc2s2TpLedI6U7of//NPfc8g8Okuw4zHpQzcNDnvZ2cib32i4NQj9Mad+N7nBYicdBx9rvuo372UXUY9kORDkIh9hsi91B06DRGf0naSdPmEpWVv0uXNoDeQeiiZqxcazc+7imGl1xq4rWUG5ookUfme1JRnhNlLgJU4HwP1GIRo5UYLky4A1GrT3flIlyB+RXI0jUTWlBNUPvY2sGSN7GipBnESI24UjqmHQHMctcOq6f3slBFwM5rnvcJOP7DsLdzsHA8NXPntCOEFQKaZjxKXXligmmjTFMmpkQInltbU7zAmeWKRx45z2eeu8qgCniNtNGO4V1pI+wExFlrZrV71Wrsc/RNqzA1Q5OzMI0TptMJW2PYmmUiAlJmXaFpHFsCq6PA5cef5Oy5bWazGZ/54k3GU9c3C8oK1gRJyXi8z9bFshBXyYILggumTZCSMp0qKWlppzUv91Pu9pCXtSEZ0dLAqjWCairIxGx9zPZ4xsUzQx4+P+TMmhCqYA2WQgWaGdRKVVWWznCZ4AIBoW0z25PEytDjK8/aqMFJYqxK02aoIuI8Lg9JyfgTTaxYXl5iMFghtRPaNuCrwGTWcu3KjO0N5aUbtxjUFb/xm7+e2zdv8flXXuXi+SXWYsNj51cYDjzTmFnfmvHCy1c5d/4iz79wBW21r8zZb83dScy7E0GYL8MHnxo4zGfsRah7O0aK3XiQjstif4S30hyeNNVzP4mJhz3PB9KoCOhz5TmbMp84Bc3Ijghi90WYYqHgcE4KtD+fwNImvhgFi8DrUBTyhOI8zONxsx9K0SfCeaxzYIl+RUuHu7xgdEUILuClRHvlaJkMDkIWXEpmZLAIc859KMYqm7ETq91DEJJoufKid0DpxFgg61TSHFm72vU7HYO95vkofztoWPSemddydCkMq3JoYiaXGyDOqgliysQsXL8dUTcmJ2vcZLoNikYhOpMZrhQrvUSJLUTNRCDimCVhPM3MWusB0GaI2UijmotksXZzY84aCrMmM51lzq6e5/z5h2lmmab9LJ97/jYxQiPGB3BFotphrY8jpk4o2LpJKG2EmEpL66KL0cFTXT3MPPI/eN6FogNQgAyr1jRCaEzC+iyzcXWL125ucW458NDZEWtLgvPRNB3SlJwE7yuaWSSK4FVIOZreRYr42NJOGkajAaPhkKoe2nWKGIcmTVEXCK5iNIKltWXa7UCVlcsf+HVcv3KFj88+zrRpuT3L5Gnml37xE4QAZy9d4pnLj+OaEe974mFWlpcJK2u89Po1RD/Dk8++j+deuAaS9nUGDlhp5ftbO3I+KGJ7Kxmt0x5HubbTMG67GyWd9Hh7fsYRuSr3+l7fjZh4rzgnD6yawBVjqiX/3TEGOki40xrowvXOAFlcmkkqvVnqRiivz1nxTqi8MAoQQnEWMqUoXkuFnqJdxR5mwEQsWZEoAkTqaDQBQhCl9lCFIiLjBFHTMHDegxjVUbcatqaZrILX0va35JyzFy49foGVtYrgHS4ItXf4yuOrmlAFxFdcvXaLj338FcbbRXFQIYrlplXz/BHZtSaOkw7Y+zVaOBD23XtZUEW0zzXbZxBwG2NPMOzuLRjyMRpV3FrftHRHaUntxRd0oZSPZkjR1BZT9rRRGbfKrE1MWhPn0Ww5GlUT/rHbOHfwHLIAFmlBCBQXljl7/gxPpkwm0qbP88JLG7TR4YIywKEONCWcCS/jK+H82WWu3dhiMsukbI21Ou9PFuZeixPQIQTzm3InK75LC4ksPuzSz3XG0mFRHJMWZuuJG5vbrI0c589UnFnyrK0oK8sjnKupmhk5RzQJvl6mDtDMJjhVxAceeuQpBksXmDYRbSd4yUwmG4gT6nqI4PnAV34t3lV88Gd+gkeffJpv+MbfwL/4sX/IdLsxgqCDZy+d4f3vusxHPv4Z3vvOyzy0Eri0/BSXzi4xaac8+sjDTCYzvuGr3gPVKikucH+6myL0T/leab7d62+3E3GQAX47we5v53EU4txJDeXd7u/iejiuo3DcdXMUHsFJPqObx/tRvvnA0gRdxNxHRjsigj72tNd0UbliDYO0A+Ln0bGqktTKnFZGA6rKM6is29+wciyPKtZWR6ydWWY0GlDXFVVlwj8hVIRqQKhq6mqAryu++NwL/Lt/9avcmhkPYOTh677pvfyW3/4drJxZxnvwoaYKNaHyVFVNVVUg8LP//mf4+//TP2U6MbTA91elPPPUZf6Hv/GXePjpx/Guwjkh9LoIHnEeccLLL32aP/z7/xyf+vQbxWEyaDsER9saMx7mENrec3ynY3DUhTTnVxTkYiENoig+OKrK00xaQmVXKmIlaKoWsT776ENcu36Dq7dnQO7TLl5MqClm6XUYZhnLxTfWdjiX1I3gbR0knftz3e3v/ZPelQSx5NK0TWxNZjw8rDj/0AVDJiSQ9FO89MImbTQhKhcEdR5RYXml5tl3PU1KystvbNKmzlnV3pjvmOfenS1ztYch2zGnC/cEQPMC0dQJpMIHkbmK5mwzs7E9ZTQQLqx5lIoLZ5fJOSGaqOqKqhria8fECdNxy9rKBR579FmGK+e4eeM6jjViO2Y09Jw7fxkXKra3bvLE40/xhc98knHTcu6Rd/KZj3yYz33ms8xaZZasuqKZRC6dO8/a0jKjwYiXXnmZfPESk1kiu8i5mXLl1Ss888y7+dlf/iSpjT2Xpbvo3at1d4tcyrpQyQW12z+98HYZh9Ec+FIYu6/zNKL4o/AyTlItdRIeyt3Gg0xtHbXU9sFVExiSWyLwLv2/6AmW7+VnT+bSw2f4mm/+Wj78y7/KzTc37pzorHzl+57lj/zxP8jKuRGjkUXfo9GQCxcucPbsw4xGq4RQ4XzhDHQerFjJl4pHJPLi5z/Op371T3Pji1dJCsNa+I5v/Ar+8B/9A4ThENVYyHsZ0xwoOgM5UqUtfvTv/zOasRkZKYp3FcB4Qrr+Gv6sQorkOGPatrRNS2pm5KRMm4ZXXnudtXbCpZGjyZmYDUbXOrAeE+iiHuLdb/ZxFmV/TwRS6hQV56GxFEPcxtSfgmrp5twq3gsxZ77w0huklBhWnsp3jp7xIdoMs8a6QzbJOiMmLSS+ch7OldK+ksvvz6u/pOIVFCJihwqAMm0yv/TBj5JmYx6+eIaLZ88wfP8HqIZDHB/j+RduMmmVShwhOM6eG/DwIw/jXeCzn3+RnLoi10UYoPt5sakO89d0FQn9bC0iBLJjrTunpGTn7Xz3t50kWlV7TqZJaWfC+Hrm2u0bnF3eYG3Fc25FeOiMYzBwQMDJErgZ9dISoapJ0y0CDcN6hdYLbmmJhx96lBvr19i6fYPNW9eYTMY8cvndrK9v8vlP/Qo31idsTpWtqXEl1qeJv/fPfo61tWWu3rjBa1eusTmNeOcYDAIvXLnOS8+/zCNPvZ/PfP7FQsDtekJ0VSllc+qBgn3WrXbP5MKULs7gHpvkg3QSvpTTAgeNw5RKnoS4uduYHSdFcdT37R53S7ketOYO+tthSYeH0arY73OOyiO7b87AnoQgrKlPZ/n3mzoBnHc8fG6Z3/gtX8MbL7zEzSsbczJbN2kK7WTGsJ2yMkkwjiiZxjsmW+u4a2+w5QJCRnNrSnsxkVI0nYAYiSmRm4bnnnuR1XaLx1eEcSsMvfJv/n8/BdMJyysjctuQ2pbYTEmpJadISpmUE597+TrTsXXUs6jeriyr8vKr1/jeP/l/YW21wlraZEQyOWc0m3BLG61F8bhRzgyMmNe6TCtwe9b2E5U72Pou47gbZe+QOcB5NOd5BJvVeBNZaWPcgTzkEs6LCufODnjo3Aqvv36DUe0QEjEJTbIOkZOZ0iRT6JvDyR0fZN5QKGdDGhbPq48Kuk1DFB/E2OtZQBwtyn/4lU/xkY99mnNnRjz1xCO8+9l3srayzFe+711MZp/j1dfXIQuj0QhkwOe+8Dq3N6e0aY4DLOJVTrScV+d7zJ0UWcwf9J2M7rgjdK9yXmy+kCLaY8eThZSHlmvL2HxHMm0Wxk3D1dvK0lC4uJZ45CHh4kNDvA8sLynOKzdvvokjG/pVO4b1OSMjTqa88fqLbN5e59Of/oQ1X5pOeOHGVa7cXGdjouYMzIQzSwN+/+/4Vn7hV36Vjany+pUrbE0atmfXaJoZZ9ZWGYSazXHmlz78SV54+Rq646LnzqMIC07b3kNYEGySgzfVw4zTriToxn4kyF9rY6889mnMx6Ixu5fze6/v3V7pjr2qCx406nXfSgtPepFOYDnAuTMDNsct21OgI+T1n2FkwYeWPSOfTVZWTTRn4JRhJVSlX73rehT0G3m3AZsW/axVpjkzzTBuOlleIxg6UQZe8d6ZFC1WZtjJIt9s4cp2psXy+92VDz0M1LgNXqByyqj2pkInBTYt6ZNUrqyJmaaxuvRthBszpSnljPtpDZz2gvLeKhpiKhC/WBe+2pngTMZQAO/MsKVo4jqawTtlaejJObM8CrQpMZ5B0yoxWdlkn3rQednnPD1vsstGOHXzAP2ODUhZWq04d2GFjY1tNm835DwH7LsZ8wiVKKOBYziqoPLcuDmhjRkXgFikr5mrDM7nOlMF46NosWcpllRXiWbt/HOJhClMBrfHPVGcU0ajYPMa72ZQ1KxpcYiMq6k9cbbyMAiwOvI8dGaJh88vcfmR8yyPanyIDIZDzp59jNXVC+SsrN+8yYuvfJIUZ3g/xIkjxcTmeMzVW+tcudqwPlbGreCScvnsiHHTsHT2DG1siDkiPtC2keXRkOlkyrBaQqn4/AtXaLWkkhQ67sRRyqWOM04aiZ3k875UygjvxTioXO+w83YSLsC9dAQPcw6H4bYcp6rgKOvKeG8Hj/uCDBxUL7qXh7RjMlT7kGISldmNKeJcIRguRK+qqDMFuRtbUDul8hZZOrHyv0qkqObZhu5cNmGirkMfpZNiNpg3i224wVsqI6E0kpAsJG9UM08yLX4psH0uOgB9AyQWDBKFAKkkseqF1uLDBbh5DnEjlkMOzqLClMS6HaoUyHXxyKc75gvRLsQV4+PAGuh0Z6tG4vMq+ODJbSESYl6SimNjmvFeaLYjbYKmL3l1pb1wd7/7Ty/3tiANJTp02l3v7ofRnIhzZ1Z59OHznFkZcqNeZ3NrxmSa6UpsVaxMMQOzSYbJzEiM5TNTuzPp0uECKuaMDiphUEPKVl0wT2+5Heu5qyqQklLYq6PkfKIhVAXNWPjUxZFz7td7NzSrlaQCqEOTtfiexMytzW1eeXOT51++xaMXlnt0Jjae7Y1tqsqxsb1h2gBtg/MNw1AxbTKzZkZKto4zjuCESxdXWF0dsP7aNeo20raxl20OYWS8jO0Zw3PL3Fq/TUcBskqChUs9yqYsO2HizlE/7rhb9H7SHPevdYO/e5xGAPhWRluOwmc46jiM8/S2ria428R0pK/F15Y9tfy6dMsD5u0LF42DRaEOi/y7HHJnZLMoUYyB7hSq7nUlsu81A4As2d6XTXfe56IuWM7FieK01IqXz0oFAs1Kb3ByQRukNHFx4qiknL+U6FIst5zLlZj/U65JsVp0ARFHv7f286MLhua0H5zcp16cmvOUC9Tbo+DeHKScodFMzA3OOyT4/vzs/0KbYNbLTneLefE+Lo4CravOjaDuTcvrNh3nDOYPPhCAsytD6uC5vTWhmSaL3rMa9yLnEum7hZhfd8LT3RV0yoMOloYVQiLlaBUTIkU7ciFdUSbIUlidmmFX/bHTzQBzKOqhifQU73PH2BOC7f6W7fwE7Q1wdqaaOE2w3bZcu73OsBJWlm5w/sybnF9d5czakMonprMGjS1KpJEW8ZWpaarHhxbnFE2KZOHSxYe4emMdHKSc7HU5k3NLTEoTlaiZ7emsOAK7z3bvcXc0BLv3uza/w0R+izTjzrnoUKJ9P3Efp+E0N98H0UNhr3M4zc8/ipF6Kxv5ez0OyxU4zt9OOu47Z6CfjIVrunNRLir0FaOuC29RdkbS5biVcwRJJjTUGeEFuFnQovbGwqZg73fl71kFoagRle51Tg2WjVCivWybsIQi02uGRsS6C8ZcyFILXIbehjqB1kRmHKawF0XJCbtOpOSMzSmIatruUS1VML/2cmEHbJInGV35Z/AeTbFHSPryPpHehXG+n+TC9Nf5uRQkg3I987/Nr2Oea98JlTnnes6F7EJAdufeVOH6tdus31qnbWZFxApAqSsjh9aDihQz2+OmVC8UOL+sg3k568JxS+pGs5BzwhV0qfKOSZNtUZQh5fXFe5v/vnOiijM65xVYKsU5h3cmLGUvuPNeaI+KaL+epPNgZQEhiya1jANNSkIYx8ytWeL19YZatqgDjCpleRRYHnmmTaRp4eK5NdbWAm1MJDWFxEHteP3qbV6/cpvhimfkIk2O5SLMIW4ae24ms8isWUwP3HlPD7MZiiygKcUpPIyzu3c0qn0bcXfIz+7OdfHnvcZbwai/VcbdDNj9dgDerg7Hg0SYHlw1AXMY1v7fbb5gIgBSAoMeLrAoefF9LMCyUnL3Xfqg2zTVNk2hEDRKGZeodRp0UoRlUsnA286Pc46sarX1ZfPNWJSsFIhYdtb7K0IUpS3iQOIWlBAKnN4CofZI5UqZpCMHtZPwAuIJzpPayMbtqYndZItAJ3luuOyiu9mYz8VpefzmEi3MeTnF7Ow6k2aymoKgFqPQIRed0IxIFxkvGrPO8LLz/pVPVe3SBh2xrtt097uq7tphY2MLQUvZZxGh6o1vIjbKYBBYfmjEoKqYNolrN8dMG507BQsPozkCc7c0VMJw4AlGEKG5OaO0M5xHnd0c7LEBahfpUhwEKSkADThvwk1OixgV9E7UHRvb4tyV56db7x1x09pQl5uGoMnElBrUzhklbES8j6ScaVrH69c3eObyCFCmjdI0ymDgeO9XP8ubb95gezK2vh7iyzmZc9M0Ec3C9nZJMfTn7Q8kTO03tDhE8zSh3Ake7XGcOzfSHkMpGND+LsVxNAsORBgOeA7fCs7D3c7htJGDHce+Sy59P5j87WrgDzvulSNw2Hl7gGkCC7fnUHc3EZ3g0DzS6h7k7jW6aPBhvsFi3r84MblZwbgB2HfvLPftOy6BE5z3OGd6A9b+1eMkMJtOuXVli1j6AniB5drz1HufZPXsCr4SQl0RQqCqAlUIhKrGVRUvvvQav/wLn6CZZLIUMFzMWC4v1fyh/8Pv5emveJaq8oTgcZUjhICvKkJVM6hqbt24yf/1+/4Gn/7UKzTZ8sFRu6iJYoDu5cMh8xkXwSBv68CYcjbxaCd99N/FSPPNP5fIfl79b/d294O/aH87Xf+5gbef7owu54iLpWqGo8Bw6PEeuxdeyTnTNpHYtsWxVJpZA1kYejizVFFXy7xxdZvxVHeDLP1na/E0z5xZZWU1MBxVxKTcuPWmzcou/sY8dbOwahfsmZOFv6kYKsRiVDqffZFu3qyiYv7O+dBcHM/ebmq36Eh5wdmS+b0w6q2gbSYnQ3TWZ4nnXxvz8PlgCEgtbG1HvvD5F0kZ6spbEyJ8QT+UlCIpK2RhNovYKZZGYLvadB1ldAhZd8XmeyyiLYc8plDSecc7j1+r4ySOwO5786VuxN/q47BOxgMpLezq+vttbYfL3okKzREABVxWfOU4e36Vra0xzbQt0Ot82WaF5dUhv+27fiOPP/Mw1cCzurLMaDQ0UZa6IlSBUHl8sO9VqAnB46vKIkrv8KHi9Rde5s//F3+V51+8ykxNdOjZJy/xF/7i/4nL734SkWRnlnKRv7UIMefExz/xWV7+9Oe51sxogEnKfcfBoPDYmWXe+fAqmhtinKGxIW1FYtOSU2Krydy8fpNVnXBx2bPdKLMEs2jRXVtC9aQdUXFhbrtZk93iTEcbGXNATJbZiIsmENQFoKVscoHEt/M7xXjt7+Vrv7mbMd25Zywsio6ItyPK7NzFTD3wvP/97+DCxVXqKuBwNM2YWzdvcPP6DaaTrlOl8RsymfF0Rts2eC+cX3W4rIwbm1MWjm4/GAfgoYce5pFLZ6gqz7Ubt4nxKotqu9q/s/t5MXIHya7/s4g5pCK2ZmzCehhj5/rf9ezslT8XXbjLBSHocmulYIOcF42p9vNhxIeMirA1UcKmcmYpEJwyXKqYtZFZm/FVIGdT1tSkKAnnHClpX21it6S7hlLps4cS4+7z765t8Xsn5aRoz9dZdAj2OsYd671zrA6CBU4w9nu+3grR/0nGSZCBuxmf/XQijvL6L+VxGumn4xA4H5gccR/hUvauvgnP/KldwArwDs4tD/iWr/8An/rCi7zwwut0xKIukmo0473nXU89wgfe/wRCS+09qpA0E9sxaaul0N6YtS1bMZJzJMdMig2paYgxc+XqLc6GMY+drZjOrFP9xtVr/P2/8tdZWRsh0kJuydFqy3zOPRnrykZmuj4tFyelwY+hFttbM/72X/m7rC15Kg+1114T3xWuQMowaTPTVlnxIC5TKQQvNGA6/ABZ+thrcSw+PMdZUJ0t6SNzEdq8mF8HUddv9jvfyQKfo/tst/D3RYNe4P/CzeiPorv+vSes2H2C8sjFC3zDr/91rK7WOMk07ZRbt27iXSY1Y+rK4b0HyTRtIrYtKbbkJGi0ktOHzlVsTBPrmxYpzzUFjbeQVVldW+OxRy8zGAx5881PWItp014u6YEOGaDA3Haic/eiux+lekWUUHuyZJNXZqfB24GGLThCiymIzjHK2TgW/ZAORSuIxw4j2qVEDFXoUQuFiHB7Oxp/IWfOLA94+umLvPbGdVIyLkEG2ibigzlJmgQ/cEQ1xc2509FRG+/sWrg47saePgpJdkdt+q53yD3IXR83VfBWH0c573tdH3+vywQf9Niz2u6Y62bH3n/E+/IA5Yg7lvSiQZFd37XfXp1AO57x4Z//EBttZvc1CiAObq9v8f/46z/ISuUZSKYqpekWHVlfRO+V4MWqD8SO3W0UmjKaYZJgljPLrmtP7GiayK988Au4rFRDCMH6FTgHtRN8cUqutY6tFmYKaQHlcAjihc2mZRxb6tozqoXKl+6KWKtlRWlQcokcO15DcMZhCFgw1+4T7hzGM7/rItFOec9+yJ3R6eZ6T0dAFhbj7g18r8+zlE4IgnPQtmkBMncLDoO99k5mveCc453PPsnlRy/iJNI2Y1yGgYOBd5xZW2NtDYajET44coKtrS02bt9ke2tMnOXC78icX3UE79jYTkwbOi8VKT0WhoMh58+eR4GbN9fJXSS8cKUFrOrV9gqIYy6CzM/Ze4fzJtAUYyZG0ATi73R8dszyAfct59w7BGbcy/pQq1fZUba70AGz01Sw35j+w7R1DGq4dXvG1uZVVDJhGJjOTPoY5+w6s32e864nU97B8ej4PwdEhPtfl/SOzWmMw5DZDnJOvjz2HkfVjjguD+BLmT/QzeGDciAfKIFwMVLcGTnMy4EAnGScmEb77Ykyy93+MIdVxZXuhA6228yszdRBqYJFJ6WaD3GKj1B5R+WF2pkhciLGGFcLfaJSuvCpidGUlsMqQnKlW593RI0EZ7C5K6TEcbLGOtkaFFKQYlzRPvAFksWZiE8nfOQFslok1Un/9ozxzu5rh/4aGXI3NL3fOMrD00VU87fMH/LFaEd18TV7HWH36KJbeq6AllbAvvYMh5V1JWxyec38ovfL5w8GgSefugQ6hVy0CHKm8oGVpRGDEKgHQwajZUJdgTq2tje5eWOZa9eusHFzk9msJRXRgNVRxfLSgK0mc+vWlLYxR2F5dchgEFhdXWJ9fYtbN2/bOWjnAEi/HI3TslOzoCP4iTOCXYzZ8vXZHFVVCG6uN7FXSeGRjJJCzjsNX1/dIXNEYY6wmAIGksk42iayNPRcfuZhJk3ixvVbpGztm7fHLd7bM5dTNpa+anGO3Dw/LwtOwRGdm0Wdiy5ZcNrjXrPc366owF5jryqLo5BCu7Ffiei9EqJ6O4y3yrU+MGfASGBz4wCL5kPnBlSgKsI7gpXZpbxr8oqa4NA7Bi7jxUrhLLoURMx78MKcIV/a8c4L0MR6vZdInmzH9F4JaX7DuojYkQzWF4dqJmkm4azdbVcM4QyG7fEOEerg8GolX103wrbA0M4ZOoAKSbqIjd62OjHlP1euQxcU6XbP7XHuh92L7uEsxrhzZqQU6/XP6/z+3XH37ryhe2yLHefAIO6smUHtqWozSikV52wH70D6c9XiDK6uDrnw0FlSTmaYNaGaCZWwtraKEBgOh4xGy7hg7PbVlSEry56lkedavcnW9iYbm7eYTSMxtgwHyuWHz/DMk4+zMclkTfyGb/o6nrn8KCtLNS88d4Pbt7funO8+LXBn9C2YrgQdb6HktqzrpVrlg+y9Ue419ts49/p956jYjNk91IX7uDOOV0Qz2cGsSeTxFqNBYFDBrIEQPLG1ypicDBkYhHkFiB17/u/5R8/n4TBjvg4du3s17DWOI8Zi3KX+snf94+jH+1Ifh3VMDxO9775fBx3vpMby15KTcWcK8fDjwVYT0J14+be6HhFQsQg6CFReCAhRTTRGeyNsYbLD4VEGHuqilNftTb4ADlb6LcWoOrzrtihXoO8Sf6gYAlAY0dl14KkWgZfysxNjkavtdqlIzqZi3JMuCOx0QZLOHZyUFc1Cyg40dRwuOxMLbq1bXVKSWHVEUmvi4521E57Po9x1kd/tYbvTQ884XHd5/SfN3zR3DBYX3860z8491jb2+TkvokIpKjPNxJTNKXJWu25Owz7XJsrFR86xsrpMJppYlFOqOiBuidEIgq8YVDV1XVt1g8BoWLG8FFgejji7NmVja4Nr19/kxo2bbGxs0zSJsD3m4rnzvOddz3DxsYs8cnaVtVGFauL1V1+zyH6febG0gK2nGlgVwaM0wMgFcopcB1rnLQVBhAXH+Kh5v90R7o7KAy2rW+cOyJ3zSHEq87w0Mtuz8cLVCSRlWNsaqysha8Q5k5iWylJtbUp2n3R+e+2fCxvxHg7jfgI/fcVImeBDFw8cEMHu+VmSy3PZrU2bh/m/78/YjXw8KFRhP0NymDW413H2O/5hf/+lNvaap9N2OI97vPuHDOz2vHfc+7n2W1mKmPStElzp9tdlNfv9obQ5LV8ewRdjryV6Ri2Csblx9Ozkoj3vEGKR1bP8Z9nJSg19ToBzJDUClGi2LcPDNHfnazufwxSOCshfNpmFyAPLu7dZWV4dsbI8RLxxCJx3hC6PXAXEOdom88KLV9iYtaQsxGxM9zbZht5myAstYk+SS7vD4+9hXgHNWCtaAZLNqWgf9c6FXDrnbmfU2TVa3iFdSGf0LOpzrugCCMTWHK0OTrdjdeewEzr3Trj86MMMqgAKnojkgCOQQ8aFTBVq6lBbThsF8QyHnhRHjAZrnD27TZZL3Fp/lOvX13nttTd49ZVXGG9PePWV12kn6/j8JOcH78UvL7E9mfLGlTfJuTuf3RUbJRpXU8R81HuejYrPEJ2wlpVNVZ4Pwg1RZmTapHcYwMU0wd4P96Lxv/N+7rinXTqpU3GURSec4gjsvDeoMAo1X/WBy7zw0qtMx5GmyUiQXrHTBQjeWW+FZPwH3dU8q69uWXy6d623/UrR+jlZOLud62vvcST4Wnf/2Dmq99MVoHzegzeIp2WY9tTZeED5/reCo7HfvL6VkKcHlCbYufkIlNKoAqWWPCHlVbHURpmCYKdyB1IceFXjLMfC3B8OA6trS5xZGzEcVKBGcPLBE+pACFbfX4UKX3m891bnH+y7cxWTWct/+JmPcPXKBjhTIVwZen7Dt/963ve170WCaRNUVdVrDFShwtWBl15+jb/zt3+Y61cnhgSoGcmUFTcY8Ce+50/z7m/+ClwA8RW+qqicwzmPdybl20zH/KX/9gf48R/7RWZRSZQaiD7q6tynnTHFYSCxu8FIncHoFSC6h7hoDXR3xvXnsEckscMmLG6xNhedMJD3vkDJmWZm2gXee3oTK3mOIyx8hIiwPKx58vLjJgKUtHA3Mj64UrYndl+Lo5XVHDTvA8lBXTnGkxmDQcXyaMjZM2d56KGL1IPA8889x/Z4yo2tzPnNCTkqaGBja8qVa7e6mdkxa8KCly9wznueyMpKzmwDj2bHeeA6Dk3KhQredJmrbq4uuXvD3L2pLpZVdgTcvfgF9PPVGbYuvO5UEOfG2a5jEdFxJDybmzNefO41xpNCVfWd812cFnFkNVVGqyQQrB+KK58pZc3oQgS+PxKwl7Gwv3XVGgdXJczn586f9zt+t6b713dObcehuE/jQTsCe1U+HKUa4iilzG8lA/jlMR/3xRlYzFnuDmN2rot5RNMZopSkZ2OPasf73vskG+Mpr7x4ZR7tiDX+2Z4l/DDwXb/7t/Kdv/s38/DlS1TVyOJ2sXax4pyJD4kZX1MmdGbYZG6yJuPb/IX/5r/nn/zDnye1Di/K+ZWK3/S//Ba+9bd9O1kSOSk5pfKVrUQxZx6/sMqPLNfc1DEijqoqtXPZRG/eeO7zXHjIkdMMYqadTdHYElNLbiOpSWxPJjSvvMCjS55xo4yzVSfMUscat1NtEqV3/P7jIJLWHZtn+d5l6heZ+4AhIGpGN8scb9l9P2XhaE6KyJMvc+3ojbUAKUdTM/RSGuAk6IViKKmKbBt0d3yFs2eXePTSBbyz9IBXhw8B8dY4yASlAqLGGamkKpGl4FyF5siwHjCoarxLeKkYVEPCV3wFo+Uh165e49yZC7z3/e/n3MULSOW5fvMmW5vjcm5q59lPKL3BXXOOpxXOxswWEAisSmDZCZPcspozrlHWK6WqvfXEyLlvEgSyjyNwuHut2k1Vh4d1Dnh3Z8qM6uLvOxRNEUk0wKvrMzRnlgfOnK0FL880BhLemVMgzqPxzr4hdr9c77beDbq/c3ROr7vj/Xtd+0Hz0v17MQVhp5lAPK5H2+7UyPhSH0cthdxrbS7qnOw49q9RB+DANXnE+YbDpxWOc+z737Vwx3XMW5su8uK7Gmigz2E6sUjy7HKgDku8UjZeVdsissI4AdPEy5/9Ip/9mSHXHj1n7WZTS2pnaEqIJnKM9pVaUpvRNpFjIraR1Da0seX25ozXPvICl5ccTVtOZzzj7/7l/yf/+v/9j8mSEBLkbMfNWqoCErcVbr66ScCcGElqqQhgOk38vb/zY6z9PWHo1VriluvrpiZlaJPJDy85Qb1F00GtlDEm40I0Cs0J7kkX4e1AE8o9Eswwa2lL7EmYvCwYctOlCigZhe440pMPpag8el/K0LQ0gML1KZkuDusCV9W5QzHnH1iraBbXiWQuP/EwZ84sU1eWTvDOnA1PAIn4UIy+qpEwRHEysw/KNTEpUjlwQuUHVFXFcBAZjEasnT1DM5uxVA05c3YF5zM5tbz66jVmTSpr984NTwWGwNMCjzfWxtojXMazRqZyFSs4rpccu8+m6Kgi1gY6OHMyi2OA7t3+eG6452MenWG9I4pH4Fg0fl3fzLxwDNc7SfNN3dbkyqBCtTX+DJBzwntfmj2FXt8gZ9fLb8/vsx27l1U+tnHtUIy88O/TGZ1cNrL77O7+GachDvNWGad5DYeF5fdGs+6ObL7dyhKPg7AcZeyF9B7nfj4Y0aFuckoUsvNvu1+bSxtioWkzH//Ic8QkSJIeZRBxiGScsaT4hV/4JB/74CcZeqHu5YetTj+UQMj6GJixXpT1zWqQfMzCNClV+ZjsDIF4/eY2r9/cxgdH5YtugStVDx4QZTM7xqmQD9WMuyDUzljjUxFShAFCLRCcRWihIzu6TMqQpIgVOSWY64GKIxTB/TaVsjR2bl2HJRMedtHNjydAsg5/fQTZLcQOQqbU0EvRTOiMzqLxKh0oC8Td+RSLjoB9LnQQtgWkrn9BHQJPP32Z0WiI00RO9pmQ8c5hS9uY8Wi33swREQdJG1KeEirHIHgijpwjQeHM8pDB0NtxYiaIgHdMU8uVN69adNwjWIsTZc7rCnC2KEPOgIfwnCVRe0+ohDDVvlnWefVcSYmpV1snzlIcVVWRcyZGJSdPSvHA9M7OyHf3cyR0zbN7gKB7rXTpjTvve1LH5mZDqJRBXeatUUvdlDfknMnJuCQdV2G+Xtxd8/s7zvLAdZv7c++M8O51f5SxaBiM3rm7YmExhbLP+XJvSh7v1TiqodnPoBzMZTn6OXXjqCTFvXgJi687Kqp2r8ZpOlr38jruuzNw58Oz/yMlYNAdmYAVZDfqDBovrVt7oQaBypnxzs4xwdGoUKlJGQeBSiCU93kxA27rxciEqCtqcBAlg3dIUjylP4J1KkIRVCLZ0vs9ZJ+KkbSyOMsDiwDZ5FpRoRKhIlOJOSNBLMqV7GxzK90OCYLLBVrNFt3VWMfFLMaPQCloxAHzfczFI1JaMHdEswWy4vw1O42TalHt02SwcLdbS7dxLDycxQVbLO8yFvz8dR1fX4BCiu9TC2trQy4/cRknVpvvxAPJfkZxrjgRuaGP4kVBg0lG0II6nJrTQI52/mqNg4beomD1CtkRNDAe3+ba9Rtk6cSy5iqa89hXGeIgKxNgGeECipBoUia31urXzjYTslADE+/wFtKDQM4RcUJVma6FqkHydxsd+a/PeZfp7Jy1HetBWHDK9x5JxJ4pILgKVVNjSii+eB1JrYundopRJXrfb7M+6jCkof/JnomO83AKe6PhLEZmFXG9Y7MXcrZ7vJ1QgdMkBx737/eCPX/ccT+QgoMckh1qmcfkd53mud93Z2CH6e/hZfpIqwtcuk3fIQSxqNmjRDJRsPbEXXRaAp6Bd4wEAkog4XJpwyuOyolJ/rpCROygbIwcWCyrdeZTi/gRh1ch+WTxqUAWwas3RwS1KgABX46VVYlFB7+/2cUgihTYXLTkz+1qVbXk3wEVshh0rqV6IXgrJXRKX6tuOMHh4NdFOPOwNag5q6UJFq5DS2Ttejxij8Uuez0AstMYlXxAeXmP8HTEtg5F6A7T9aBAcnHblEceOctDF84bbF0MvxTHQAREPVlsLr1zRjaVTCaWNWhqfSJCTJGU/Fw1MAlSeTymWeC9JzvH9es3uXlrc16itwgva1HRF2hE2VKlQljDA9GMqUJMiVYzAdO4mJGLwqT0roVz5piKGukx57SnzVuco31ufBnzc9zxBy1oCwtw+e57SiYhBJyhVdkqCKQIZyUsDWI9FnZ+fOfo7SBWHnPs8iXpy4dPyRhLmQuRt1e0fz/H3eDu3X+/IwXZI7l336+6NbPXa49jAA+da99tsE/J2O6eh7s5TEc57uK8nuQ5u7/OQN81bW7G+j9Bv7MV24kTM5zembFVMd1zUcVlN484i1F1YoxyL91BDNp0YiqDUtjm3bOeFfBCyiWO6jYEp0jKViJobDeLBAPGEZAFx6Fs4SlrD2NbZYQ1MtLycZZmKA6IvYEsQkzm8iDQxhIdi9XWx5yIWWlLD4LOSHbRX8rmnAA7od9dD9GiA7b4t4MfbtfzNcwxk95gHEgOEoNcVO48j14Rr3Ma6APhncuEzknqXMPyOWLoQ+Xh8uXLLA8GCC05Gy/BiVrELw5cXRyaIjAlBakoCIMi4MyZyzkZccwnNCqaWoQKNOHc3Ei+ceU642nnhO0uQpsjNY0XtoHz9hHEzplCQAOZRCbhEbadZ7sy5cSuy6MTNeUM8ai2OClVJHvg4osOgdI5kd1PC6/rJ7db/674M/N1sffwZEw3o3cbxOHFUluObl1KUc+cOxc4exb2O/ThN725PsVBhvq4sPDbJ7Z/sONuQcdhqggOuueyx162598POfZaAzv2xUMc77TQrYPO4zDncLe/H+Q0HfZZOLQzsPtwR52eDo63/3Y7mxZIs9tcc7etmk0BvLg+6k4YD6zLG/YgbR9iFpngPnC13TM7aJMiWXsYOToQFUJ0ZjSw6BxNVkPurf2rs18Vs2mRmsMMU24FcXlhj7bzmalVN9hGbQ6CKyVMWcDVnjAIRRfBeAHqXC897DHlxM3bW0yaVNoG2+adsfNqs9JoAdv17vdjT8GXfd7Ve5glUu8ucGfU7nYci4XrX/zVItw6R0K6W7Mo9DJ/f/9jp/9QIkDJZs2Hdc2z77hMqMU0BbI5Xg7BSWXZXwXfOYyuyDwreAlkWjTHwhkplQsenKvIXlDxOFqQRC7RYs6RK2/c6Hsn2DwtXrnNVxZhqspYhA1RKjK1CoHysGlLizLGXnPLF6dCXP/wOkmoE2LOJE1o9pDj4gwVlEf7uVE6Nc0DVoLkgmTMkZjd9/BORw9rcVycIutymKiqYI6LL+ktOthqAa3SuRN50iHzhdDvHl0Ga/c4SXS0Z/nhWwjafjuN/bgtdyMGnuTzDpPaeauOk6ASR62u2WscwRnYGf0dvXfIQgy1sMGbhevCm93lPLYBJZWi0OdIJNSBLxuP68oB+0MpVe1YXltiNKwJwdCBqnJUIVgE6RwheNMe8OV70RvwwYOraFPm0x//PDff2Ci+huAl8cx7nuBrv+VrqIcDJAi+EkI1MI0AH3DeceP6Oj/6w/+C629u2QYKRE34wqz+7b/rt/Itv/XbcIOu5K62Lyd9tNzGxA//zz/Ej/+jn2HWWFOiiEkxW+e4YpR346enMPYrw7qz1G03TL3TE93RRY9uUz3IY+0LGg3q7/LCnT9SIPlzZ5d59NFHASvn1BKRKjZvzlckUZOMzmKiUVpkpkJBQ6KJPblQk1XRIlXpCCZ4lCE5h3MVTjLj8ZQ337i2Z5S7iNhkhanATfsIbgAjgSpnvAheBSeOqPAKmfVSbZFSIudCHCTgNLFcOaQObG22ZKmIGu1+93PelV92EZXcodw7d8IAfG88775RlDlV48AkZ464ihTOBcSY6JIbd/ARoHfaT3PsrP+/S6R6CKNw2Mjv7Wpg3mrjfjD59zOqdxJc7xxH0Us4yjjudd/PNXcsZEB076lazE3vLrvZbVTm6P5umHIu56IiJBUk2sY0qoR3PfU4s5x4/ZU30dSR3OwNs1bw6vj23/If8fu/+/dw/tI5vBeEXISGKjP+TktUaLXo5aLQAvOreprZjH/wd/42/5+/82+ZTc2gr4wc3/i+p/h9f/C340cVmpvS5TCVrwyauH1zzEf+7c8yvr5FVJiVxkVJDGVob1ynuvYyIhOcJoiRlCNJEylFchbG48TKrVd55qxwewu2WxirVTjMImQ8zkNT2srutzEeFSrab+xtuBc/pxOaseqPxfd155Bz3vVQ6M7vCl1zmx70dhgkJMWgi3Dp0YucWVu2Vaa5Fz/SmHGuxsrZipx0gYmcOMRViMxK/rmrv69Qb6I4mhXnFcT6TECFIyNOWL+9wb79Z4oAAOgwSURBVLX19X1NW89vwGSqt1CabBUsHvBeqBDqwkMZ58gtNTSsjsZLsOcisxaEp86f492XL7Ix3uSzL17jpesNSUHxdCWX83RP+eRdxnfREdjv/u9PojLSZf9rkXlPBRTNudzP4siI4/6MhevbB3U4Ktv9Tid3f6Tky+Po47BwfPfa45Oe5UjHOAwx9EE4L3f7/eI4DP/g1NME7hS8pPlJlVrnolpWEFY6cZS5OSghIVamVjll1SeWqsAVzEZQaqOzw7oZtolXPv8cH/3Jf8XF80vUPtPGlpQjaEZiRFJEcoQ0Fw5qYyzCQZnZLLE5aXn+1StcXlK2nNX2O4Vf/ncf5MVPfIa6Au+LgVKlQvFqYkC3s+f1FzbJJa+fyiYtYmWLP/VTv8SHfv6XGA2VpQEMXdl6pQgtITStstXAEMfEZ2LpgkjGVBhLFB27edhnrk+XbLPoANgdypZLKOJDWhruzA1QzpYD373ZdlF/pztQchJ0aQQxbL9wR+b5ee+Fp568zKAekOMYj8OJQ3NGnEX1Ii2qDkrOnWzKlUpDFiU1Ga9FQDpNEBdwOdkVSAL1+ODw4tCiDHnlynU2t6b06MVCZqOD60UoPQAEPCyfrVhacWxtRW7dirhsRWxookVR7wlRkUrBZ1IWznnl6y+u8dVPP8bKcMBzs5usVqVRV+pKNfe43+huVeF9x3HWRk7QpMj8wh0pRXyp9pwb5c6Vv9M5WPzcw5aE7XpFtx3Qaxfcp4j9y6jA0cdxDenbIYp+q4y7OVFHmZNDOwNHvT13h1jKxr8j77pgMLrPFEMivGRyUl588QpRSs19tzmU/cd725i/8IXXee35VxnVMAhQVYLzVo0w9M6iNaHvYuikCLQgUHoATHOmKahDXd6bFTamLZuvrhOCUNdQecVjJYPBmRjQrdaxNVMrgcRuiC8GPzjrVrjdGG8hijDzGY8Y8Q7QJMRUGhNlg4WlkCkH3s47o0xVFrbg04W19h9dFD9Pzaj2ezN9PX+5MdLlhbr7fQcS1HEPOvi34ysY5m5G1helvMSoHvDkE5fNKRKPaAJVQjBZ46y5wPwzQPAu0HFKFMWLgnOmeFgNUSJOMy7UBeq3vLqR+MTOL2euvHaNOCsu6+I09zB991CW1I0I9cCzuuwYDDzTWWJrO5tQU+cIZSW5IjUdYRCEcyPP0EXGt27BcJnbNzbYmrQ0OXV9rOjzJv0d2YEHHP5O3gUyXdxoYiHrBDeH6aUQeDt+wNwRWJiePTapk5CyBCwAWJjzQ7/3BCjZW6Vm/Ut1HGc9HGQI9/vdgcfbYw+9VwTC0x77ogtHeEZOtZrgOMaoSznc8ftCXOtsSafQN8nWvVALSbB3GMQMceUF9ZlWgGSIQcrWyCg4Y+nXWD25MbgtD5k1Iynj1EhgMywaB4+Imi57BskGTcasuNIkITtPwkrHSMqkNfZ11rlUcIVFz67bxko0mbIyUy3nVmDmZHF/Vumz6JUrjHI33wbbVC5+n5m/p4u4S1OX/yrFEHZdHnUhiuu+604I964brJPSG6nMQlbOn1vh4kPnybklFEfMDqOkbHB7X2ZYqjScs14HHiBDTpkQAuocqhVehZxn4IYEqbDqAyMSqofZLPPqlWsknSspQnctO0/Zd1UmSbm93pCSJ2clxfl6trO1dWQ8EaFiwGodWSYxmW5wMyfWN7e5vj7l1namSf0by5rfeW9Fha68sTu3/eb1WEax8wFVyaUqKJsUB3Payv7R/ny+Dgff7lWxsns5z3eOffYP7rzGw0KvXx5vz3GUtX0HWol8STp9h7XLD6hR0XwsEpDmBDLX/2zlS50oUCapkFV6gwgmeyvOou/aC77rYCSWp09q+dSUlZyTbWJl0zaCdS7QtBTY3yJVUS1lhFblYC1wC1QbzKDnYhAcdtykMM0FEShBsgWCRejIg/OKeLvuWByGWAxlVi315QUdkI4lr6Ue3S4t94Jsx3DADrEp736NbcaL71n49O4B0vIACeBszsXZJGhBOHYf0753RnWRU1BkjTtVRrUS0ccee5iV1RrnWpRkXSWjGX+TPVZiTOYAqJjz5IrDJN44HjLDV4KTRFJX9AUq4xRkEHFkSeBaxAU2Nm9z5c2bZBwslKbunieDv8uaFkWC4/ZWSzOza+zIftKlFLxS156QTZY6kImTyGYLPkxITLg1Tmy3SouzDpF73TftwY8jGbL9XnvQ+hCx7p0mDkWv96E7PL+jf+buz907IiuOJnqgI3CYz9t93748Tnfcjzndr0zwsJyP02Dgn3Scxud1a3lPNOQI9uG+OQMH5QH7TbL8bH3VSyTtrDWxd2p66wU67iKRPvesJfovm3GHpGqal/llJ4YwpKI90NkwZY5zF0fD0gbWrrBzQHIu7XsRaCEm0yEWB60aqJwytNFIVSnT8x46+FycdXj7/7P358+SJUt+H/bxiDgnM+9aWy9v6bfN4D3MDGYGIMAZiIAoQJIZSOgHbZRJMqN+0G/6cyQzmWQmk8EEiQbSJJIiSEgCQRIkAAIcAaMZzPZm5s28pfu912ttd8vMc05EuH7wiHMy71J1b1V1dXW/cuvqe29mni0iMtz96+5f98nqtNPUhrBcc/OnGQlDtpyFPteuhRYmiVX5npvwZ4LcTIuN263oxXOqVO6BSklbkIxREdmC9IVQKed8ASE4v3BzZWkcrSad7gdDZFDwIgRxfP0rX2LmG0QG60XgBHUQ2sa4G2LEi0dKvB9sUlzpWxBzIjhf6H4dqsloCRJ4BksMETHeARWcwsNPPuLo+MTWRT7/ZSvPRDECsuI9zOfCV24vmIVIHiLrQXl0mlgN0PU2t947FI+EiPiEDrDMDTOU09J8K4mRJrmx0KYYwpsJcs+xn1y2ceoY90kbHv+0NgUllTG1zoXR1uULCFM92VCt6r+6Adc5ZpLLDNGbyhfFW/y05fMCrz9NXtVneFqFxLPk0rw8ZGBTaZ0b302Hp7YyHtkBXY071/KtAq8W76tg7uWcNYfAFHaOinoxjgGxTcSqC4p/cc7RlY37SRm0UATXBCknggvWTKZ+LoqACOrLNu0ceQBdxdKo0BCBEkEgZsfu4Zxbb94qzztR/ooLpnCdMdd/9P4nPPjohJSd5Q+ohR9ULITQpYuGwDOJgFgnoHEgJG8vtvrBCtfWxMFRB6ghJ9YXIJvhtJHrVhdszoWYJptxxyYpjVaUSMd49HiLAvPZjK+98yWCtzE3xkZDH1Kp5DBWwdpEyAZesfyCXO5/1rQcffKQxc4ObrEgp2gMgDJly4sEW4vq+eSjx3Rd5gIJkurokdut2Hu7refXvv1V/sK33kKHx6yXKx4sMz95dMr901OOzpTTVWaVtRgjHnXW/Cfl0hdjiGgwQ3PRCE3KDKpm5OTz9yDT9UfD5OLKuAxCvXSz0zqw09qsR+ScCd4ZmVaGqNmCX9UuuaZXdpU8aQMb7cXxD7ju+n+txF++fFYGwXWvee3vwysgT0K4qjyvsfsZ0BFv2wXb70kJCAioEfSoFpg8l43PiW34qPEDb7S6jUVBhp0ZB3cPOLi9RzCoAO8ci50Zi50Zs9kcH1orN/TmJYZZQwiucA00KMq//Oe/w7/4J39IikaHNG+Uv/QrP8/f+J/8DWb7c5zzhMYza1tCCHjnCE3L6XLJ//5/+7f47d/8EZLNsHHevMrlOvI//bf+Df7n/5v/Gb71U4mAgOYCp6sRIf2//6O/z//xf/fv03WJISudQlRvHANMJZjn5XwZ52Xvb/xVBl824OaLx4yLTmSk7T0/h6qMVMz1lFXB10tt+nRVuY5w73i+7Vh8RQ7u3tnjrbfv4SSaEt1QjNmK4O0+y7M4cYgXJGdUrNrAeeGPv/dH/Gd//x/zr/3Vv8Sf+4u/UuD3goi4WiIJOIip5/33HzCkjRBI/cA2mIGqcrgb+Gu/8k3+8je+zC4Dg5szNJ659jR9z1uzOSe34fFZ5uOTjrMOSw7MRlo8qNJnZ70WYiYEx47CfIA4lOqNp2yylxkBN4rdS52jalLoaIBX2USxRmPkytDCk9+/Sq4OFVSj4Hrne1GGwGuD4ospn5YB8KqEoa57/ZdmDNjWslGAtKmDymZScwG2lEXhYXUIjShf//Jt9m4f8L0/fo/aOr3GK7uoxAH+4l/8Rf7tf/t/yLd/5RuEHUfuOjQOzHbntIsZITR27azWmKbGFIp3qQoaI//pXsMf/n//mNNo3PBBHG/v7vArX32L2W5LipGUEjFGcn8Gyf4e+oEDL3gFcQ7vdSqPE/gX//RfsJNXNI2Shg40k1Jk6AdiysQusu4T7330iMMW2ix0yXIR1lFZqzVFMnSkZnpvT/j1Nq4ytiIbBWta4yYXYrg2XzL2Cjhv1KkqObGB5Ag1y7wiOiKydcyWpze9eu4eTb709pvs7+0RPKQUy3ntfV+ofKuOsLWTzON3mOGgkHLkT9/7UzrpmO3uEkq1QQ1HWN5KQpyzhldnKz746L6VC0revi3ZNkadc7yxu+BNnzh5//vEWUPXrUmSaaThVpM4bBtuk7jTwJ1F4NFp4mHfcLQ0KuSI0g2ZgCcEx2weWBCZN8IqKjFbiGsa8+0Zvexrf9kcPknq55yrJcBl/CjFvwahjGZcRUguO8/T8wBuKtVAyVvPelNY9KbVN68NgZvLZ60Ev0jyPOvvusd+asbABdIhzmmO+rkL3OXbxRBTIxXbBLvlioyWePT0qVyg86SZP/yD7/G3/w9/izfv7dAEJaeB2PcE75g31mSIwikQs5UsVlx7yJkYMyll7h+t2ZFEWFgPAUfmn/+z3+EP/uXvgi89EpwUmmMxIiNRUnD8+CiNT5OSWvuB4kn++N2P+X/86L+iaaAJ4CRbHTmW9U7JJeg047K1Ri6NgHHezjOUQH3Skqz4TLMkpd18IawZW/O6Uddd7tXVJDjdNiJqDseW1zYpClMoavwBbkwdtc9ehhSVfBBV6/r49a99hVkbEN/j/IzYR6MRVsslcVgehg+FSArjfbDBd9ZuV+E7v/gLvPWVr/HOt76BqAd6cjJyKu8n1MOJ5+jojE8ePp6eUse4A8E78lAMErH+F10a+OP3fsKjubdmWaW/wXw2Yx4CO4s5O23g1gzupIb1YeDjVeajx2s+OVL6fiChrHOmxbEj0Lawq47jPheuf2XTEHqS3EhBXoBLtyy9cVxGFAWjQBbnxrKZ87DlpkFwHeVwXZIk+yxUg/am4bKXU4b7+ZfnKVn+ouQNPKt8Gs9+k/m46dx9asbAZTdx+c2di8NePAgRa5eiCA8fd+jjjrSRUAWgMnEHnJ4u+aPvLfnTP1EaL3gnpQzNlc6FWI/6GheFEpwwMDSlTDSdRbDqNHy5915hOFNwhV42YCWKWD6CA2JvSWJOLAEsZ0hRaZ3gxdjxoivlg0V3BpTgLVM9Z0hi7zsnuNICuSlB8pyt9XEG1ml7xG7kIQnGLV/zAvLEW4/whI28KOlzdEc1EVRzNoul4kEbtzN6pxlkqyxtuv+KSoxzq8pi3vLlL78BGs0QdAIulTi5Q7JH1Nj5Gh/ImqZkUwRVQ228c/z8N75FRsjqSsOgiGhpEK3JSj6w+bv/8CHHZysSlqvhnCWpzmaeJjiGdWKIphSzKA+XPfTKo8Y6aO42xkExayI+9ezvJnZ3Hbdv7XF3f06vys4isu8zM5d5dJoZVpafMAyZoYu0M1iIMveGfm2ZZVuhle1koq0Y4sZ37zoIwXSOMt+i21/O0Y6r81XXxcVzXCZPUxRP20i33y43I8WV0Bev7F8WudGrJs87jj/rBsGLlpvMx03n7uVVEzzhxq76oo1JhHWzkUriY/zuVuts7YDBygVbb2V4GSFWfWL0fmSx/uzBF9a+TKlUqPuc4HAkNQXjSlWDxtLSuNyraYUNOLzkq6lkMsKQs9EPF8S9wqjZbqOUCRaOA7Esd5xVLkg2w0Ychjzg8Zqt1301V8SYB/ts91Z9bLiBJyiUsj2dvN0NyVV5XOWhTYFyatKaRXTqQ1NjQFRFtX08xnEv08jXxkTjFXIJ36hysL/g4GCXYYh4VXAl6U8t8c6JJeM5J6RcKKk0W/tihKQJyQPeOwKWVGolKKWBka8rTe0YZ9wTH338CV2voB6vid2Z8Ge+eoe37u5xcnLCo+M1x6ue0y6yjMqgwikBIXDaD5z2mZkDyYnWwTIlbkUlxRUHBz3tjuNWO6PZsy6Ju01mPYOjhz05Kz4pTVYWAgczx1mf6PN5D7yO6LQGLmQXX6KoL//ObXv2m/O1aahVBEvMhnyqnDcqJ6/+WRXFhDiaoVOMz5rPos963idc8WfEIHgZBGavSjz9tUzymfAMXJcdSjCiGBEjN3FiXAFVeemW8rAwQuuwWL1Yx8EaZBBMCfuqgKHEkg0mrxtVSqnsIyUmOSrmelMbMKWT6kRavLy4+UOB8WO2ePXYfpbpvuzpJiY1VYiFO8DIhYxid8rmryQ1HjTjs4wGxnW3p/Nf8upFwfa2rOV5KvIiXPzyKnn0wGoyY8YMGCnK346rZ7yopIrNxZizccH2mK75xr1bzJpA161ZSENwDYU+wMr5nN1H2wQr/xMjG3ICmlMxKBUf/GT4VcNOfbFbplFQVaImPvzgAWkQfMoc7jT8lV/+Br/8tbcYlo943GYe73geLwOfnKx5tI4crRJZMmc5kodEJ0LAEKomKikqaZU4WZ5yuHTcvjNjf2HcE4dzWwRrFWRHWK0ynsTCB/xMWMbETuOIOV9rzi/7Tp3PH7jq8xeMN51QtK0cAalrYPu4yxCli/ezcZIby8ZqFVDNY3TqaWd7rYCeLi/CIHg9zi9PXsRYP3ML4/EmbnAxkerpTX+Pr2EeZa0S3PznxOB6U0FFgcrkVVKUmndG9+ty2YSc3WFRt1aTjsWNXbEEKkxvpylQo5jX6hTUCeqyJfwXT7fWwFtCnMHsmUrSrhO/AFPGv0UHtFyj0B9nxvrxaKT2piCTWOMcLBYeU80NkFJJYIaQ/X05evu0eRi5BKQm3U1HSh01PQ/HXjgTowaXKaHrKoUzeYWyfY4RPajXs7EriQwE7/jS228ym7UoiWEwuN+SGK1znnOerANZ4ziX3ns7txfSkBHxuNJYZ1RhKogGKC2EpX4lRFmuVnz48QNUYW/m+Fd+/i3+wlfvsBuXJByL3R1uLwKPfOZ263jUDTw8W7GKcNZnTj2sCqIzU2gFuvXAss8svGMQIXrl5GRJ9j2hUfbmHk+CQ2F/Z06iZzazdSbO+BPi0HOWqjGZL53vm8bozx3N+VVUDTyktIMun6tg0PnP1+tfnQMgE3B0yb1ffW8bn5GLZKujYXmFPO+m+SSD6bVcLddx/n6WZPP78SLW0Ys4x3MZAzeezo2TbH2pMNY/Uy3Vb5vCA5vMaxV2d26Tn7wk82nJBZDiNasyqBp3EJAK3wCqiFjae6Gqx1Hof9VK1BRLDKxoY85SkApnirvwFXRx0xUxYp5BHFmHLY/aFQMmi6AhsHu4h2vLhljCBM4LTjxerJXy48cnPPjwIX0s1MbYTy0tndfJsstr+sSojMWud8FLO79hbxxTR/IquQg7u43fBS1UvWzkGWweu5l/sKUo1LgThMpwuGEwaAYH81nDN7/+NW7fvk0cluS0Bk1oth4DqJBjKuEi85qd82b8Fa6HlCE4Twn+QEFoJAQkOZTB7r2EK9TB46MTHjw+ITvYnQX2ZM3HP/pDbrULvG/ILuN85qB1NGR2m5Y7CxgIHK2UD47PWEY4XSViUlwTjMBqGOhUGZaRsy4TAsxa4XDPmDTbxuP3FdE5s3bOfNfR9z37y4jIwP0TWCUh1WCNMDZ73EJ+rtggnrYRG3PA+fNICcXYeLpxjsvFy42cP/eTeQOefB9XKZCtyhbY3ldKqOey417L9eRFhgielAj6sz4vr5oxeYOuhS9Q5PI/tX65i8WftSjpcgeqefKyqylQasuNDU2I2ZIIZ7sz9g/22DvcoZkFQhC8c7RNQMQTU2aIkaZp2JnPaBqHiEdcILhAmDV88NMP+L3f/ENSLHFlgbfvHfJrf/0vs397z0IXLuC9KZ7gPBI8fRf5u3/3H/CD739AMhwbRYlqpYE//3Pv8L/6X/8v2Lu9QHNEvMO5YEaJKxnxLvA7v/sH/K3/07/H+tGKPildnuiLDWQoHuOGPVK/X9dbaIUA6AVsnqNZtnHoeYj4fIKb3WdBaYqneNn937m1y9feeZvdxYze59KXItP3PYgzmmitbYqNKreSFqWsKBnVCM6TUBLWv6DxQswRR1sDGWQZSj6K4/79hxyfrEATWeGjB48YQuaBnDIPQtMYOuGco521HLQNi6AwS7xxsOCNQ8/ZEDleRh4cDZxFWPeW/JidQVDdkGmyMssN6pRln9lvPPv7LYe7u9w63CGzJuYFB3uJU46ZPVgjvY4G7Aaos7EOrvY4nuR5m06vSTDbxuT0bZSJ54vL5/WmsqXgr7j37ZyDily57dwGebU22J9ledWU3askz8q98WnKM+UMbN7+jb7+UjaSjTDBdL4CFY+ws509K5BKqABhpxXmi5aT5ZohYigA2XoCRKHvlTduz/nVX/0lfumXvs23f+HnmO815DggOeOBFDPrruf45AQU5m2Dc0VxZPMy1cEPdgLvffdPOTntrX5eQTXi+jP0LJLIDNEUfY7JGh1JZiCzYM08WB5Ar9Zcycof4f0f/Zj/6P/277AzAy8ZVxru5BwtFj8kUoSHZz3z1HGrdXSDsMywSkY+hJbcA0fJMK9a9JpTAYzNEy6RG8N645xatcTTIDBVnfoW1FDO+NnpGKfw5Tfvcu/uAY1XXNOiGfq4pmmC+bDiDc1RayntfWUgzGZAlniyKU+Dl51rSCQ0J0sGxduyC1bN4RA+/uBjhs4qJlZD5MNjOHGw30IDzBpntNmS2d+N3NppODgINAFC67g1a+iTstrx3A+ehx18fNJz0kX6mMjqCCrj2np0mvFB6RqhGwZ0OCZ4WMwCs3YXdQOLsGQxC8hpz5i5OqJpN4OwLyjw8qsTo/7K585hn5+QggnHez7ZUubXuO8rofqx1fXPtsf5KsrTchAu5CQ9Jafl8y6vkhFQ5ZmMAdtmb2oIlDSz0cOYNpdRIdVQQIWVtTR8KYH8EKAVJQ89NVFIUfMUnTEQkpWTk47f/ee/xw9/57vszBtCEJzLVhGQS2WA2lam2RoAoRWC17FssUuJZkjsBoPlM3Dy8Jj/8j/5x4iDEKzEz5W2wgFoA+QgPD4tZYK6sdCxvIauH/jjP3qXuRfmjdAGpQnGVY8qmmDIsI4gqgQxQ2JWmin4DBG770GV7ilDf/nC2zAeZOPva8pl3mX9sldP/bLr1vfqxc3BK3WGbNiB2DpoguPrX/8KO4sZOQ42X97jciDloTAPKt55xJWy0RBsHlPEeyXGSiXsMYTJPH1N4AriVNeSS4qIJ/WR999/QE4W/lilzHGEZi6cZSUNGRft2sEJp3lgHZUB2NvxzGNESstp0chbu46d1tOo8kkQTjvoOkVjBnUMmuljqSZJiW7IKBF84mB3h1mrZAsUsTtzeIGu5KDUgtDzM7hpjG1vpDXWvxnKweJlJf+jIkZP2rM2jn5pG/WT8wnkBZknT7/+51F0e8Ze6rUvLTW/BE16HT747OSZqwmuChs8yQKsS3GLbGbr9emDY7MhsTp+X2DJLlrNftIaq7TzCFYtEDyIKMt+oOt72rOO4K3RkfNFIaNW/OUL+bGr5g1j8l8uhoKK4EVRX55ZhYgx/yUgFX6DUEodU1aGXlgO0CUhWz9DECt9DFLavnpHFDFFLpAdSM44cRQ1QDJXFucdkhWfzSN1Ys+vWNWCjIWPNxMt4zyOdUkMu8mGdz7+Lxg18NNqyLXE+qeg7+Y1J0W0u7Pga+98jbYJeBI+ZBIRp44gjdE01Q1ElRCChYywagyD+CvzoSCF4EAk45IgEhARMyY0UoIKnK6WfPDxoxLmSSSFt964xzfuzTl5eJ/jsyXrKHSDErwSVRiysk6R3WVkMetZtDBvHK1zeJfZbxXZScw9PPKehzmRsB7bQ1JjOcy2Fs5ChuAIpwO4NTIMpJw5Xa3xBIwwUctoyYZRd405A1RKh6iRWnqqC9GsRBJjDsXGvG2Y4GUen2/jVtWtXeDahCqXKY0aNnjBBsET8x5ueN+XHf+zgGRclUtynbyWZwl3fV7kedfPi5QXXlp45UOVzeoijDe9cmHKtSYJlZx5heiEmExhj6Vl2L4WHMxECK4iDJCcIl7HVrgOIwKitCZ2ajF66jFOSyKhJQ/W3jQTY30xPqSwD45ubOkYIJRmM/Yzywjgl6oIoXWWjOW9dbgrqL8hCbm0aS4GybjQRWlK+ZxXW0SDFgY8nhwjvm78+HkyWyvKk0sd5cXcgPPx5/o61IRPgTF0gNqUvHHnkC+9+QbBGzJkVRse511pR53Hf04cTdOQUh4VPJhyRSAE45BweFIazCjNbppDBBGP93NOjh/w+MEJudSvfPXNfX79F79Os77P48ExdzOO1spjIjErZ1HpNNElWHaZ3bljf6F0wbPbzPAu07aJ3RARB03w7IZA12eOz3qWq2TzXdpVtM4SHrvseHA8kHQg5UhG6AermHDl2TRPKvpKQ3wz3l7Mcec2PquumIV1biZCqZex2d7Alnml5FXYxK8rn9W9Xlo2flm+yucYdblKLjNYthA5Nkn6Px257vf35fIM6OUL0trhOiqpiUkes4UbERpvP62LvVUfmMc3HSeqtN5hkWTz2B1aetQXxS2m2dUujFJYANXg61T+WZa/bHguJR5e7jBrLnTEHlVjnzNlbt56Llz8m+vbNrxiRBTjRrFtV5KUv0urZEqrhI0USlCCs5uPah+y829S/l5C8HPD+PG1OCCuWGBPe/18fHgsGqjthgtKIOLw4njny1/i8HAXccZqGJyzioCkqAvjFynnjEOMVChASjZmY3MfUrHsvBkK3kEqjRQkkDWTU8KJR13mk48fcXa6JpM43Gn4pW+8zWFeQRpws4An0zSZ+WLO6WpgFTN9Uk40s+6gI5NEmIfEsuvxORPIzFqBHce8ibQirIjMnGfYFRBPaAJKpAkCORNj4qyHoXBOe1/bMDt87Q9wHWBANrwQJ1sev6qWZk4yff30ssDDtmzyMlz23tM2IR3vi9Kt9GbyJJj5RSqWTzPZ6/NkTFxXLhr8z1PmevPXP2/ysgieniY3NgaeEH1+isiFh5YCTdbsZRlPphTw06B1Z3FZIY+UuepKAt2oBc37dxs9AKpnnxVi1uIJCS6Z11kVuLlVRanIdMqa3FbzF0xJ1eI1StlaKWXbOE2vBRWgIAN5enp1UjrPQYyUZksbyEiBbVMuBEsFaUhIqSCwG8wIUctrm3NzhUe+NeY1J+OqTfMcTPO0L/N1F9u0qebpOjXEsIWCmIZoW8eXvvImoRWyJhoVggQyGRVr6pMVQrBcANQqE1LOOAloingHUVIJBVlugfeQki88/4ywjDjLIRhU+fCTj+miHdc0wuMH7/P+0cB+s8B7x6xx3G6FZm0lhWd95LTPdAmGIbFKGV06XM40JAKONkAzKH5QdmYeGQZ2W88bt3cIPtA0M2bzGY6e027Jg9MlJ0s4GzIJM4JiFkLT0DYR6RNI5YqsxuQlcyUWFpjG1565ojJlFkuy5XYA8PJ5rwZqDe9c31g8Lzr+70XKREb2eXA2XyWo+PMmr3qo4LO8r5siDjcyBials30Zd41lfHFQKv+ebJ1b1I0egwCuuL85W1OcVPYgEYclOtXYU/GqC/qgCpqsFK/uVxVpMMWto9IpVy4evlSum9IbAFQFXwMFExVhua7beAJ7L2ZLAJTSzMhKJu1p+wSS7IZyqpQxOp6xPlFGS6jBNu2xIVHZxBWhz5mhEPzn+kw8RdFzDqY6bzgIKBk5pxSuIyVAcGEtjPC0bjzj5q1J3vAQHarWjWlnd84b9+4YlD8Yh0MsCZ8ixk7pnbNyTKGEbawsMA0KKbDOA8Fr6VUkUGLhpcMRUYeyDKxMMOKJKnzw8QMGNQbJvlc+erhm8HC3PeXObosEpW0c+w5c03B3Hjjrek76xNngWPXKaZdJyRgxQ7beFSHBrM8cnykzp9za87TJsZh5FmT2ZMA3jqANfnePJq/olyt6CbZWGiHERFN6YwxlGV9VUVcJpur4uk2Dq36Tt9bwxXm74swXPg833Jx1WivPumVetpa1xtzGsN7znbee+0UjDuO5XwJU/DLlspDgVcjjq6rEvwhy03X1jMhAgdzVdqJnLe2tbW3rOatnaw6L/V0T+qJMno/R3toxm8ZAzrDMBV7fXGQl/l83zUJMOCIRdSMcidVgrLqr/8zbqN5jMVQcONLY+MiJIRlGdmPtfCmhCuPMV5a90qsya4Q2OGbeQiDBSfG3zHjIWRiA9aAs+0TslS5BxAiQUhkxKejGBVPrKZvWlQaBWEvnm2x8E6nR1dcalwtVYU/Gky2jKZHRnitz+/Y++3u7qEZidmhSBE9wJcXDSaEzNgPGOW9kVGLnGZKD6PDBTcZfWWfqxHpQaMnfEGcc+yqcrJZ8cv8RNblyyPCwywxe6DOcxh5H5nAvMPPCznxgb+GYt8JOH+ijYzUkzobMWZdYRmG9MiZJ32dasSqUhRf6HFl3Jzh2keBofGbPtzQYjfFO69mdOfI6klxgNQwkrJxWi6K/QCZ1CZIj5+YZe/TRNTfVuZED8wSx878Y5fXpqMBnNwLg8u/OsxgBN/H4v4iowIs0nJ5mZL7KBsZnnQtx3bX1zKWFlI39ut/mq+PQpZyuKobpACp18AgBU/jFqsIo6jOX2HOX4CxZa+Pg7J8T84aCF7w3pe2l9jqQsdOhg0JmBFqY1ox3ALI6+j6hWFOjINbgqG2EprEQRuOgLUp9yPDxaaI7tQ3bOS3KyIrbWufY8Y5FsMY3swCNL/EAb6hGF2GdlTnCLGaWAYKH5aB0BQ0I5g6jWsfpcrj25ouxKuXLLfsr5/iSOzjvDWgxIJ3I2LjJKgHcuLBUrfnUvXt3mM0CQxxY90IrVlmiDhrvEQIOyxlxLjBR9pqWC6Eh+GQogOaR58ED4ip5UUtMataFt6TO09NjHj18RFbFZ7Mi10AmEZNj6a3L5DIl9hrYGyIH6mi8Mms8TZPZ2wnc1oaTVc9JJzzykaNVJkVYZ5Cs9A7W0Tgy0oMz7i48MTXkvrBNSiCmyKpTy0PIA9FBJJfqGguVXW96t3sFWPjLrF95xooAEbnmta+8pYIGPcc5rjrxEzamJ5Iuvchcgy+Qgn8RVQ/XQSo35ar5eVoOx+vyxEluMmc3NgamCOP23zcV8xa3E/S23qcqF7cx+ZPCcQpKKvB/VSgGjARvrWMX3mr0g7cGRY0zL9xL7RzocNjfIiXRLJvnGEstfxZIOTMEMxB8eWgv4FXw2V7zRZ9IUaIOCN4RMYrjGpoIztE4BRIxK+tBSdkxE0EJ5GgMeimrtUvOFh5wGLd9FPOGI/ZMSdNTZ+Eyg+ByI0FHT/Gyrm9Ptb6rgfiEjTgXw6sSH1bo2ua7dJ5wjiZ47t07ABfpY4eXBnUOl3ozuipngC8OPx7EzMUgjiRKCtA2wcIo4ojJulJqSiXB1FpQ5+zIKdG6huQcDx895Gy1JKuxASng8HQRmgCz4GjnM7qspNjTnWVWUWkD7CysK2bbCDMP7dzT5B63SLReOD6Ds8GRoqI4hj7TqfEYrLrEaVK6AOocazJn654uO9aaSGpshzPxLEKidYkYKQjBVZwC09wgU5gJzivhZ4XTt/++2Sa8DWS+sO17tEovM0+fcNjnIcHgZ0iuazxcFx17LU+WGxkDm4bA8wFxVYr2vDDnE4RcEF/zgqonUXFx2T5GnNJgtfits5p+p6CphhGmzoIJzGOvClymK+ekpCxjLf/G7VAdcCOtsfpyj7VL7sXuMQrGjqi13M48PeuYWPoGZGt3qwpDUtaSx4RIFePrj0lKkpyQxUoKZ95i5ako3lUqteFPsQGvVxEwzcWzbYzVU9xM4LoEci0xAymwvCmovMGH71nMWg4O9snZM/Q9LmeEFtcoOVuTH491K0SkICoKRGKuPnNEgiLSItmqPmKOqBN8SiXBM4MzA8LjiApHnzyiW8fyRM4mOxiF8aIVvnzoaHPPulPWmjnDcXJmhufOMrPXeg7nGZUBRdltPdpYHwV/2LArLY9O1pwsE6pKm5TcgwZgmRiaNc45upTphsRycAxRjYArQ+Nhbwa7C1idbhjmV2Vd1y/LGAarRp8bp/1Z5Fk5BsY1UXJ4Nokwn2dPqQ7GtHdcjlxcVfP+Wq6Wm3iY10UjXyvrV0ueqVHR+Sl8VgipxikvWgPKmNRUYslSqUYrPKC1VHAD9ivhgVakwKc69lq3Sjwhjd0HbSuspdZSs9vUegioTjkEIhaHFsWqGEqwVRVyqVizLoL2+6BqzYWs+GGsPvAU+tpqXThnNMUly7EmOKJKqvdQGPLKUNB6y0HQghhcFqm5KnHnwvhf5km+oH3x0nNvXka3Y9R1Wq26ILFYtITQ0nURp0pqYsn38EgIpC4xx+LnQsBL4YbAOhOKgPhMEA/qyCI4tRI6yfUxnXU7LGmcQxpIKfDg4WP6wQiMrB0QiFrC3pv7e+y5CDFBVsQFVqr0JNbRQlXrCOsus1dYJeUskTzs7S144/Y+D1anKMosCEkdQaERcDGx6iDGRDNz9FEZkhFbqRdcMKNQPOzNhbvOc9ZllsM1pm3jA5v++LNsyBP50LkT3/gcNu+bxbPPLefW2Gt5+XJd4+pp3vtViObFE11+7MuSp+UzXIdA6VWQGxsDlxkCN5PpTHKJKWDj5sbPOMCTizOTnzDx5pW33tGI4lw1GhgVpmIZ+uLMY3BFQTiZPlsrFmocVdU8Mss9AAohkNkTG7kO2QwiYw+UosynuC5YZQGYN+rd1OClxs41TxtY3vDQrYrA4OAsWOtjjH45ltrCOi9j5v4NcgWeKTnqKV+A6xw7eYMlr6KeK2d2FzNEM2fHj+ibzN7+DjkNDCkwmy0IbYNNmJlWUpMAnUOd5SQ45xBRnHhitNALLpS15NGc0BSJGhk0492C1fqURw+PjZxKc8kfUVxOLGaBIUaOugEXC+eBs1yGXBpmZRFOk3EOnHa2PoJAO3cshoH++Iisyt2ZQxpFfCAPyrrvWSZh2QOD4IdMzlCTVMLMIwJD1+Maz+5CUK8cnWbWQ7qGMr04xzeZu2fNpK/HPe3Yi+2yXo68ypvz88inVfnwPNd9aknzxudexOuvonxW83IdubYxcNVw3xQRkA1Png3FdfFz2WLwwAj7bf12MSLoMGTAb7D+jfZA+bCA0dRinnUqme6iZiQY2Y9uHM1I7pOoCYmWEJiUES2QgiqomiGQdLpXKc8oQilxM2WftSp5DOmox0mBYNXOmZ0UTgRDG4yhEAaleMbTWFY5r6w/7UW46enVHI7Ny11mPIiUrPQtTxNEHLu7+/RD5mx9zDyaUTjM5qyHRLuOzGaB+XxBbKGNFv6Zq8c5h2swamc1ZAYRUoqWK5BlrMggD8RsxkBO0OsZp8sTjk7OJvZHKUZjub2Hy44zzbQ4gkATLEel9qiIqCX5DULfZyKlnfJpou07fEkWvbPj2J8prlE0BJxAHx3LtbJKQr+KeA+zmad1DolSElWDGTERdtuGu7c8j9eZ2E9j7C5Q8irbvvfE7UAJhT1pP92es5t935+05i54fTc689XnfF7l8Cpv2DeRC+Q/z4jgvijZSiI+99pln7vu+TblszAMbnLNl7GunnX9PrcxcBOZFFNV0QmVy2qcTTGK5nOblCnUvPG3aRzbtFUMQvdFCVjyno65RBYWmJRWTTgcL1KSB1UnzxwsYU+zKakGAcmQa3Gc4EoeQlbLQYil1bC6eq8FfRAY1JGSs/ACRelj7r4UgyOXEEiqtkiCpK5A6ZZDkDSTsc53jM90fgHUEjB3rcXxPJvgqIgKsVPNHbiQ7LOxWY+GXS5IiwrOQeuFxWJO3w8svdIv12hSQheRRpg1c5o2s+jW9PMDZrOBdezYiQ17ecF8vkB9mUsR+vWS9aqzMc0Qo5JjRnMsSYADKo4hKw/vL3l0fELK0IZsvBPOEbynU8gpkcST1OOz4mNi3sCsbXBkAkr2QhTHKoNEZdBElxUfA7pOFupJjiZDmxXnMm3wzOee0/WAdsUKdhZGyFkZciQVxEsyxBxYaObeAtZ3Az95MHDaW+ml1r4dV8/W5qSbQSBpMq4FJLutTa4uU0uzLeXEN1gql1aVfEpyXa/zKgj6i2AIXCfJblOu+u5vvn7Z2L0qXvkmAvWq3NNnJc+6fl8uHTFg2421WYGKFMBW66MSI8/VeNg4Wjc/WXYvy0K3uvF1dqyzeeeV3KY45ObRF0+PXBQ09iFXarScTNwGWt6z8kJTwBZ/ti+Wk9JVUbMhBhhyIM7Y80LOI8zfZ/NGh5jpLdxsIRAnNFiZoBt1gI6Jk0kFUSMoigrJaJjKOBRoOl/+Jd6O6z4l2/yFyPb5n7ootW5SYJCJourwPjCbNcQUWS6VWYiwOmEuEGiIMeJWmdXyiONwxqxt2dvbpZu3xJ0VBwd7zGYt4h1Hjx/zz3/jN7h3702+8pVv0UdjLlSFlAdiyqQYydowaM/jo8ecna4tT0E9OWe8KDElnHNE8QxDpCfTiMNHx5Ac6yGzaI0noPGCOKUjkcUQpG6AYR0JAnnecNw5Yj+wmHsOd8EltdADRqSkwdapKHRDNFTA25jmrMQ0oN5xZ3fO7KDFLyI/+Okp6y4yrfhJLiP9Go1QKZTcG58VZ0jUmNMBxfaWjWMvmdLPQfz0Z7Ek7WmIwJNi89dLPr78PE+8pxue4/M+P5cZaHBzdP2J13gO9OelGgPbG4VtKmOpU81MLq7JNGzbpoCWriyOi95PUsdxZ1A6yMiMKKI0TmmdGEcAWMfCkivgZYJKxV6Y7qEo80Gt9n/QbDzxWIK5g0I6ZElgXqBphGbmWa4zmUKWk5k4E6jEPkrrrDWx3VfdfJVKepBLOMKp0uWaTChkdSPKkTdU6qbY27kYW9dbIM8HkVoXu/EZnhCn1qpoysDb33av7SwgonT9Kd6ZpzsMmcy6QPKeEMwkSimyXvWs12tOG89qf48knluhwTsr+fv+uw9498ePuXP366hThhStu2TsyXhy6skoy67j0aNHDEMGJ5ZYKNv3nhJI8HRJiQJCYq2JNju6tTIP0ETjvrAEUseqz6wzGDuBkIdISo4zycyiGocC1q2yz0LMuSBAhrQ4Z+gADlzwkBMpYnWOMfHWYcv+wW3WXebdD1bkdH6u60BvPksJbxXL0kIL20dsL5lakni1IXDpiniCYfiyPfEvCvz/acnLnI9nVeqbOuT8fH6eDYVXQT4DZGBzsU3Kr6qzi+vw/AuVm6BaQAXmLJtVzlKgeRlRhLaQAtX2wUGyeWAixkpbDIR68cLvZsrYObxiYYHS2RCxHHMn4J21Q24E4zRAkARdF0cEIGY7pyv2y9xZ2SJqpZChwL+bYQwRgzYypWWxx1CIpCSZQglRr9qcdYTez7//Ii3sy0IT9lqFgy+PR48hI2d8DzlnpDQR2tlpURJZI6ItGluyU4Ze0bBCQw/a4rKzCgGg7zNpiIgT9g/2SMOMkBvu//QDHj36iG9/+1eJ6slxIKm3HAJVNEd8M2PoV8S45uT0iD5GBGd9DjQSYyIEK00kW14IzhElFxIsK2sVPC5DjomoyoAZkbGEeVIWGldCRDlBAE0KS4Gc0KD0YjwTNZCiOdnabRyucagm2sbhnDJIwidHXg+89aUdvv2tt/nw8bssl7rRmfiiIVAiMyYlc0/K98+4PzIVWZjye6ohOq23666gV0UBvyr38bzyacX+677wPEnBL1IuU/ZX5Ru8KvkDT5ILz3MVuvYMiEFdE8+zLj4TY6BK3agqlD+ZCE9a7AbNq7uo5KYNrXjWUpEBigtvvztXKZSnNsTClO0vpeLd6pZ13P68WIKiooRcqHCdFI78mn9gLZL7VNoY16kV+5xVOTicEQVYt75a1kgxCipC4WoHQ2HuDYmI3trwxoKopFQL46ZExc9y0xs9/iJXfR8tOW8KX9SySsmwM5vhxZP7HuczqbXEwJgG6/eQPcQB5xwhBJxXsos0pdmPaECyI2ri3r03+PO/+he4+8ZX8CGjUQnqUS+ID6QhE3NGxNH3a9ZDx5BtTJ2m8d68t2z+7JQYEznasxqbZUGWnKFWgxiPRFTrYOm8MMdyB7xXgvO0zpCQISkRY0PMfYLgCC5ATna9mOg7a4zkkyMERX0G8aRBUBeIw8Cug+989S1+57s/Zr0aON9wyLqCQsH6Ec3ksVwXI4CsSZ8yGQIq49IsnQVlwxy4Yl7PrYenyWe9Zjc94i+KwfAs8jTGv6vkefOMbhIueNUU/Kskz2skvlRj4OokFYp3Mr5y4ZgtY0GNwOViKx2hFpE7FFG16gJnHryIKe/qNokzqF7UvO4xZRwQnUpgapVCvTNX4HtxlvDmsZwDp+bpZoohoKW5UDlOShjBF7ZDVQjOKHDBGPGcU3xNAFOt/XTMYFJrdpMKotBbluSWTF/e6Yum+ul5RRehRbdhEDz5mvU267EZQ3CaJhFCJKtxDSQyPikhgEhACtugIuA8zgXatmHWBBvTJpCyGltjO+OXf+VfQdyMjz/8gN/+7d/hzTff5Fu/+HNI9jRNg+Ro6yZ7U/Q5FzbKbL0oPCjJ5tA5fPCk3gY+qViCZ7awgfWYECO6KkPg1dgjFzOjxPYuGZV0dnSaWUelx+bYRyVJom0DghJ7pY/ZjL+cOFg4vAvEfiAmpRVhCMLR0Qlv/fzX2NtZ8PHD4XxQgG2Y316TjTWhuXwHNuZ1Ms7ZMDdvEiR4teXzqlheBCpwmdF2lXL+tHONXlQFyIs612ctzzK/L2JNvHRkYHuidPJG7N3ymQ3lMhoJNRZdoEo935GpJAeW48eeBE5pvXlvltFvJ5RCYyvewgBSlO9kcBSoNk9dDkUK7bBA8na7pXluyWEw6yxnq//PNe6xcZvOOzMeyg4rFRkRrLmRThXXWjy2WtqoSmlFW7Z3qRu11EHbGOhqKVbv/LzSftK8XF8uP59s/JxCBhePnazAOsfOwawNNG1LP8yIMeFCIjQLvI845/ElQc/7hrZpmc0W7CwW9OsTHj98yDff+QY+zHHB0cdEaJT7H33Af/jv/8f8+P1POLi9y//yy3d5+823iHFAc0s3ZJrQ0HdxDFlYcl2pYMm5IEuCD45GzXAQxAigkjKoVW84BLKNe5CSCCvgUqLx3qiwsUZUQ1a6bKyVSSGoo/VCxBH7nhSViBFZkaCJymxIBAfzRSCpcraOhNOerzQ7HOwfgJw8cXOYNsxiJGy8tpntv01G9SQ84HryqsV3typbfoYRgavkumNyHW6B869ddq3nXRPnK1VehTX2eZLPNExQxbYkmf7gMqNh2oyq8qwJfzXMMB5XFLkp8VLflzMqjuxs4wUhkQkj9/+GUaIl/qK2kUdVi/EzKeVqPBjDIMZGJ+ZhDeZUbjzHxpeh3luwLnmKjLwC2Zmx4DaSY4b1gOZ6fSM1ympedJcY7+t8skVNGjz/fbjqi/syLWrTQ7oZfi7odaaZtcxnAe9aZo1H6ErHyUxoHG07ow1zfHAsFnN2FjP2dndZzBv+5MP30JhpZzOD7Mk0bcsH77/L3/27f58fvfcRbuY5Wy45+uQ+3/zq11ir4kKwOVNl3fWQfDHEHE4zOZtqTap4bxPrvRsN2RxT6ZIpRm6USiIlyiBGmhW9oM4hg+I1oCh9hmWdwwLrZ3H0OdOve8iZ4IUgjqiZZVJ8n/AIt/dnSAisVh2qEZl3KB7nmieO/fbcb64dGSktDRUoaalVWbJtClwX2n+VlO2T1v7PqlxHUT+rvFbGF+VVqKa5Sl6yMVC9RIHNxLZSnods+5AXFNlGANM89dpMWEfov/Zud754cU3ABSlxeDtenCCN4L2UskFT6s57vHc4FwjePMPHD4/ozgZrd4shAbN54K2vvk2zM6eZNTjvrENeE2gaj3jPuz94j/d+8FNLBtuwB2JW2rblX/urf5l7X3qDLM4SGZ11RGx8+VscfT/wD//z/5Kf/PgT+lRKytSSFxOAmoeZKz6wNWBmmRQbeTSmRsDlCQbB0zb66ymCyzeZzbh1lZr+IQp7+3usBuj6noODHXJOhZ3QlmpwjvlixqxdsL8z52B/j1kbcD7z8P5DREtOibcku9MHj/gH/59/yPd/8CHqBbISB6VfD3gXaNoA6pDeoWLePtmBJFTM6CJbbodgCIBzZgzWdtaqFIMTNGeL14ujNl5SJiMyZiVEKznts+WWVDRLUFJOxFLF4J0lsLbiaJ2Qk2WHrBKwFB6e9WhOzFthlmzMhn59AYi5eq6m9bAVAlAzolWtaZS9tBl2ul5sXSVvpCVur5vzxudnlTdwmTJ81Tbpy+R5y9JedPnes1SNXMVh8CorzOeVT5Vf4znXxEsuLdxEs2vXwuLDV08Rth2WjZdUGbsB1rLAzXPW/gKN9/yFX/023/nON/jFX/p57t7bxVu5gCl652kaU5BDnzk9PmPoe/Z291gs5gRn8drVasV//B/+p/zjf/jbgBEJiRPeunvA/+jf/O/xzrfeIaWBHKN1uMvJcg3I/Oau55P3PmDZJys9dJZUlhJ0Q+LxJw9ocyTlRE6RnAZLWOsjQxzISeniQD49Zi8Ig4eYHeuodFryDYphcxq3od5pCGty3iY8a+Ow2WTmpoknT1rQW/cg9Qtfr8OGYVQ3Ai1VpUJMyoefPOajTx7xJz/8mF/6+a/yla8eIo0SYyR2a5anSnjgWYSGdOcebf4S7a1bHD0+5oMff8Duzi1yUoI4To9P+E/+7j/gu3/4HjGDC5YA13ihDY7ZLBA7xWVwXuiHzpT5eLcb41rGU6jhnIwPbgxbJJTUG4owjb8bjdOopbbUi+Uk5FplIiCOlCs91TS2WgZMcibFhPeePmaGKCy7HieKd5lBhbt+hjrP2XL9DIh+na8NZV3XhI6m5qWbd64QGNubOFJRuk83Z+VFy4u+z0+jlvym53tWA+d5jILrvve0a9zkfK+RiOeTlx4mOK+EpiC9TK9esW4NXRaa0kEuRcu41hLPFwEpveK70yWrBw959MN3yZ/MSKmn6zqGwWhpc+yJQ8+6G1ieLRm6HlHBi5KSnbvvI/cfnHJ7Br0KXTJN9uCTh/yd/8u/Sxs8KSejN9ZMcMY1EDzEEJBs/PVSOxKKcRKQEr//O9/Fq+C8ZZs3DhrTFQX3yOaZZqU1s8me1QtOjS2vkdLS+Fz2xPaIVQV9EWk57xW+KKlGgHMXL6h5UibObdx5CfVozPjG4wQePXrI7nzJwcKTY6bre7oY6dYJYkfjPLt7e3zr575FjJn7D485PYm8+73vszw95Td+83f5rd//YTEEwCM475i3ni+/+QYShLm0SBLCMKBqZX1b3pFW37YW2xm1MWBlhGII1TAMpHSZFpZRHSQtfBNMdNWGLBRegXKEK1dUsuVMbDTXqpPmC+WwawJdzswPD/nw8SOOTpfle3XR47rM6xsVhZRwV1lPY2+OMWnHbX3+iXD7Of4PGcfgyfJpe+WfF4PktbyWZ5Hn3cdfiZwBYGMP3YaQq9RXsijr9YB3RbFW+H9ju+lj5rt//B4/+tMfMxdh5mQs/RNRgi/tb8WwaRFrCyyl50CtqdYopYe84DXTYNzzKcPZsueMen2DuoMXGrFys14sK13EkbOMyXyVw56aYFj+aXm2ERUpUEhVrL6UKTalLDI4wYu1P65K5HLY7fzP+n6Fa7cX0IvaMG1uxpjEOE/Vy3SlUZNR4Qp784avvfUWX/7Sbfb2ZwRvDJGxO2Z1eh+iEPvE0fKEdr7g7r03OT05ZiDx/scf8OGHj/jw/hlts+Jv/53/gI8+XnE2WOQ7BMfMYBRE4PbtQ771jW/QzAJB5+iQcas13tnza86o36RxsvDC2K8Aa4CkCjllhiEx9JMhMCEj5QylhK9WmTiRUg0jI7K1caVxyNCamIglTQJN8KU3RsL7smZ9YH5wyA9+8gFHZz0T8dO2XOU5jZUDYoaJVAPTsP1yQ09eG5NRUf9NVUA3hCleyzXluojeayPotVxHXgGegad8ZvM3mTyVsleO4QOLdVs8N2Hd43qsrl/FOsfV6sGMKX9fvkq1hepmMyNUrLeAytjshkIMtOHMjjFwdabA1RkV8pAt0a/eZzZSOwpfzVgW6bCWyBUJsO0zUysE8pgfYfcLWBm4FubBDR6YpydH2d1seoM1AfNFbNh2Gh2NANl6xwwiJxOvglIIm4Ljq28u+Orbc+btCp9X7M736NaJWzu7tAyICmerNYNPHBze5t6dO2QisR+4/9ERn3x4Qt8rQ86sYsejmBmb/nmbF8VohQ9v3WJxex/1Do8nqdL6GY1bEIdClV288LpEK8pR1xrYz5iUvs9FAU9KeCyJLVB5tfpqcyqb27JuyTgxA9WmRcd7sHVgitl7oyRuJJsR6xwxKm+8eZvd3T3++Ic/sZ4YMo39xTjtJvJma60uepHahMvWmE4LnMuqd+pzUnGN8TE3TIAKFlzi9f8sKKlPszHQp3XuL0J53udBXrX8lM8cGbiKVWrTCKh+v7DBLTAes+EFlY3aY8o/FC9d3PYGaYcLmYST0mmQyZGtG2LCmOXqkcYPJMV4qBu3TAYEk/ebUh7jzZaIVWKnUPIK6vPUroNuZE4UMpJlSmDDFKwXGbfvVBolJS4+2+WLbHuMx7JE3WQqfAFSXd1RKYL4qjxrkabRSreNY3/e8qu/+HPcu+1YLs9YrZecnPS896Ofsmh3+eZX77AILaHxiM/0eY5XYb1csmgblhEePDjm5MRaRwMc7M44FOFsNVj/CDV6YHzA4/nqV75Cu7fDkHtIQvAwC5HH94/46OOTDYW8OW7TBumcJ6VEzkqKimbhAhpwYR6qMVFJrKYrCBYu8s6RYmZc87oxx1pZN407QxBiFtpZ4Gtfe4PHjz7h/oPH1tK6hhSmSRnnAnTLYLFXipFIMWg2j9Mpt+OpcpH448bq6lXbIL8I8ipVdPwsybOQKn2W8hnzDFTRjffLzxExt/fclrdR3tSND1M2VQEfjAiocRPfQGUHNLh2OrMWD1swRS9i3lt1+01xKVmnrPBqnmzWalccOKEjBXHKxWgQQwYq4mBGhd1zLs+T8kgAOxoLpcXCmCA5+tdF2SY1PgO5liKvm+yEsMi5+XjetVlnyDvL5K9GjHOCEyPLCb6w+TmhaRy/9Ge+xd/4a3+FYf2QH7//Y/70+++yWveEZoYEOFue0Thl1+3gVPEosVtC43A58Mn9x9w/7lmVTpFereLjrbv7HJ8t6btETsmUrQhz5/nm176GCx6fA4rDJ/jB9/+E/+w//6c8fHSGYTB5hGC2CU18SZoTyy2prSVHk3Uaia2xObchb9b1i4BvrEVxGmxuxhUtjlI7Yq/oRjxflbfevovz8NH9h5wcr4zf4FIVXIyMjTVbUYLK3FnX8Zbq16vWxZb5OR0vMuaFjFzHV8hr7/PlyGtP/7VcR16aMTBuUJt7g1TY+jxXwAQxms6vvMvnTzFtwCX8jBdoRbaVbQZXMrsFo/KtaKavt0HxBosBUEljNnEKsw/Kecc3tZQPGmKQkhWURZ0SsqpSHFEM58iaxzDCeHy9kVwUQjF4HJaIXtHmFI2Ouc/WxOh8gtaTSsnqYNUjNvzQCfJ9xrDBiCqPcHmlYoacS6MGheAD8/mC27f3+M7Pf4NFA93xwPf+6Hu8+9OH9L3x72uC/o05dw/mnK3WKNZquAkLjo463vvJI95/sOasK9co9746W7Ezg72Z46QfkMaBZBqsvXUjHmJmHjwDid/8zd/m//q3/x4//MkRUWUipEKm0My5RDznPMOQyElRrUbD9sb7pLKpETESQ4Aab4ZstdemKhmtPatKjgUMg5Ue3rp3yN17tzg+OeP+/VP6jkJmNaFZm3NfHfxpHVfCq01DZmM+SyVALTM8/wxbn90y4ut3oxoe03HXIYR57cl+9vIqEve8ivf0JPm83GeVzyRMUD1d2VBjm1D/poM1bWvb6TJjfFbKB6tXo6BVoatMHnnx8GuCezU4XPlZkYgRQdAtYH1SkDKBEmMMVUoR2ehFFW+v3GDOBcovOQB9zEZolMz5FAqlcTWCXFXwhkhkNWQhYnz3fTLoODIZSZdtmxfqdcexMhTk3FCPf9X5uYmM4LdM/SDMwLEMfvHKvJ2zt7vL7s6M/cM93njjFq3P/PT9H/Cn3/sTfvLBY05XSh+VXRHIcLrKLOaZPERciMzmu7z/0ZIf/eSUsy4Z/0K9dbESvyErlKqBHIWu73BeCKLs7y5Y7O+gWRnWa/6b//o3+D//7f+Udz84ZcDWjgCqrujUEpopwf6aOxCHZEaZugtf+ss8scvq6qexs5yE4BStORXoiFjVqYsxsxZh5h23bu9y541DPnnwMWdd4pP7J6QEJS1xe96hhGiq/VqxgKp0JyPX/i7vjQmAT5d6n5Tzn5dr8RJcyG94HTJ4UfI84/hpEhPd9NrnX/u8KdxXWV64MXCVVX+ezWzSYIL1UZ3Ayqpsq4If7YTqjUuNck6b1Vj3DHSlbstoX2uogPFERiEsG9TFWvsYbaARVv6VxRSx6uRFZ7Vt2roWWqWCw1okeywGrW3gaNWTVYgRU1qlwVLWkjjnlCYYoYwXK02seQH1ZrM41tGU3pBLi1uFQa2qoA0eHZJ5cefm4nzBoY5e4zjAF9CWTY6Am24gdYi9E3wQHJkmGP//YtGwaFtm7YzdnT1uHx7iNPHdP/x9hvUxjx4fc9ZlsnjL0xDHbGaNiLo+MvQ9MwLaw3sfnfJgaciKA+s3oabAYzYyn6jCTtOyf9ASVhBTjwADiegSq/Waf/KP/hH/7t/5h/zo/hmRzQQ5sW5TKuCn8BB17hVi0jFp8DK5zIt54niKMm9KG2GNJfFwtFDRQlmcVdk92OHwjdvcP3lM1/ecnimn61y6J+TpsHPX3qpuKM8zfm827jeTEa2EXtfbcCc04Cmfu2H89LVB8OnJZQbr+fc+a7bS1/Ly5IUbA08jpNn6m7rXFa8lVwfP4o7bULNZD1K8W2sNsLFhFomFJ771niBqCrd4XU4gFDjfS+kpIEZQ48T47otTRlJK1zkzCFLOha3Vje2NK4zsgcaJXUeU4BVt4LTDvCunI87vxD7n1CBdTx4NFuuAZ3FvKK2YNdOUOIGKdc1bYMdrrlDvZbDwNMZMOuWpHv+zemdOJig5ayZQSjJdwImjX3c4MrOZRzXx3nvv8/DhR7gcub3f4kMg+J6U1Ur+gjBrAz40dMOanD25V/rVinVMuCAErATUFUsvJSGrsOozZ6vIwZ7iXGZnd856lUkp89bbb3Gwe8B/8O//Pf7u/+ufcv9xJHo7XnR61grj10RL56Xkjtg6zQmuKuF72phe/iGhbT3iIimBaE0ulUJ1DKAc3lqwe2vBhw8+oR8GRFoeHa0ZcrXvZAyxXZa9fykLYDlurNTRgpBxtUK4KE8eiOdRHq8Ngk9fXjjZ0mvv/YK86mv4M68mGOH3qrVgY7MqTWKwpDpkI0ww/n8TzK/vFYWPmnKWWkpor3lKh8HxPCVmr1o6EtYwgin3XPdLVcz/mhAJN8Zbi6Lw5rn33cBQvPlcaumdc6b0sbCCL1B6heuzTE+hBYfImQJt2HGhtJoXgz5YJQrp7UWpxpJuvXYzufECLjECzYBz9H0kDcrB4S6znR0eHD/mB+/eZ4gZEWUnOIIk2tZyJpwmvLQ0wZNS5Ow0IxJBHKkX1l0k65RnsoltpIrYZBhi5GR5xsy7Etf3xJg5Plnzd/7vf4//5rf+iNNlKt0J7fixP0C2Wvts/aMNJakVAyqkmMfeE8/rJY3Iehbmi4am7RnW0xuWh+loWseb9/bY23E8PDphuR5ICZJ2rC2+saXAr7zeuU3auACkhL9K8p9e50xPea7X3uMrL0/7bj+JWvhZr3MZUnbdc26W677qivVVkJuyX37mxgA17i3mPFe8YFT2WhZBgfJldHXPGQIlH0AcpawwW4lgRUaLB6RslAUWiF+dZejn4hUlGD1AKOEJb8bCaECUuH7tiKxQSgHtvrtYYvsqY2OhMSwBBLVSRRUtOQ7TaCSpWAklp0DHUEcohEVOzFBYpau/FBfLND89mUrfGAmcckosZjvMFjNW645PHh3Tx4GcAGlQTaQhE8QTgiOIQHAkzThpWS87AEIQIpmzZaaPim88PmfiAN4aBVjrYbXPNkHLcS1DP1isfd0TU+b4vQ94fPo+Z6tSX1+sv4JpbKy+Mq7lYyNcr1bWacRDT9+UnvZ+wbtICvOdGSGsjHyKUjXghIP9GW/c20N04KNPHrHubV2pCn2y55YN0/amUsmfKM9+1VkubuqT+b2JNmx+/kkb/WW8ma83+qfLp0VxfJU8iwFwZbj4hkbipcmqn9P18aonxl7bGHh+oHDjcxcGZay7m7KRt66ax01q0yuv+Pem0mvFqH2DWPzdSTESxLR2pX2t53JjRncp5yueLRlUcqX/wYuhEwpTEqI9hBkUJYShWDx5KFT0pX/NpCxVxnCAQdOTGhqSIQ3OQZINWFrV8hY2xtmocYp3LAVO3viibVrdtU58ZJp7ymQ+64Zs4ww4q8WftQ0xDpw9WjL01tkvlVh7lohi4ZfjdWQ2b9ltre1Sv46s1yvzwnNGcfRZ6QboorLwsLfwdN4YGGNKltuhSoqZqEoIgZxh3fXWD6LPxARdTKzXk+nFpAfHl+r4TYmUFhDKWUsiYYmn64uAQAWvjiFa+eCtgxknyyUZpWmEu3dvcbDTsF49ZrnszMB0Aqk8bzLk6Vnu4kmJYXUtTWvmsvVQMa0pv+Cq821eb3N9jSjchm1x2Zi+NhImeRWMgKclFV5nrm763fk8zf9VqMfLfIabrpMXZgzc1BCw3+seU/yxmhewkQfgKsQuMhYH1k6AE5Q5+UUWu1e8GOQ/VutpHpME6xYmpU5fKcpeCuOg1soAGRW5bDykiFjOo5rxEGvJQgJJJcEvl3MgJTnLDAGhMBGWjG5T9DomweFAhzyOhQ1PyalQ4zGoQ5ZwxGzsd9QN9dxYTpNjTIx14ETdpQtzkzznZpCcedVN2zKbtaQ4cHa6IqZcxtDK1HLRvLnWoissu8wnj3vYdyxmjuCUro+0TaCLinNqZDrOxi/2iVnjCF4Z4hiZwHujhG4bx6x1CIkhJqR48TEpKW6sVLeNnNTH3DIEiuWYJY9zVQmaJpDqetD8VRwbqkrfw/J04N7tHT76ZEnTzHjrrQM0dZydronDwDAYz4EXJZLLWuXpX85ryGXw7fhTZAybXXLktc5/HjGossnoed17vK6i+Twpj09TruNdP69Re13I/2chd+RpFRCvqrzUMMHlFv+0IUv1wmTz/YzDGgjV8EAd1/FjWjVhBeknr4OigKWUoOWiI7VeW0Gyxe/t44VHXst5S0zZ+cn60KylmUxVCPa5VChbVYSUp7K/arBQ+Pg3SY9SideODQZyre2eGAvtiUwJxWKoJIUuK10eb4taYzGO3TSy4/U2e8hc9eW6zIN7mtREu5gy63VHznE0hjSzNWfZBnF8tqjCyVpBE/f2A16EHMHNrBHU0CfTyU5oGiFgDJNZBU3W/Mh7x2IeEM3szIIxPtEQvEfVsbM7I2MJiqppS4edf87zdfc1mRDq2rvZhvZkDwsUh6rn0ePInduH3NpveOerXyHFNQ8fPkLU23qy5gk4mSoLRlDtOeUq+LYawoYQnF8Ll1AOXnLOF70RXsco+Dxsvp+FfNrjcp2w2etckldTrm0MPG36bj69o39e/is+u1alXz9VAP2KFozem4weryG2pXFMLnXnTnDeE+aedtYQgsM5V7oIUhL6PE4cIZjn6JzDeassiDGThsjR0RGnR6tS6WBGQ/DCG196m8M37pR+74XEJ9e68MyDR8e8+5MPycmS/uz2lQE4i5nWCW0w1rmmGEDVIBGMZSZrLSfMxCwM2ZgKhwy9Wl/7iKuQAMrGvWgduyq1bp4xtj/O3cbmepUH93SxBLacs3nfIqXVb/UoJyTADJIa6GBEXVaD8vFx5GAx5UQ0bcOjYysLDL6wN7pC96xuDM84Z+PuVMkx0neKkHHOE2OibRsWWYna4bop7FLDK9NYcOnv1RBQ3VSAL2Bjk8kOPF72hHaf7/xZx1tvvMlP3vspIQT6lMZrSTFWpYZcnhjlf7pcFn9+Ekpw4fafouxflPJ5FgP1tVxPnkSSdd1jnvUz15XP05xfFeaCT+85XkQeyUtFBi56X9VNt8YraQoabj+SGtNeLbWaYozVaJjKELskDAN85a0DfvHP/Txf+trbfPsXfp69/R0LA6REjhFiRrIlCwqZHCMpDkhWhq7n7HTJo4eP+MGf/oA/+eP36LpsTWAQGhUOm5Y3dnaBAc0DOUZUIzla3VlYwMdBGaKp6OD9lLiois8QcmbuhdZnGpHCWVA7KFoIo0tCP5iiXLlibOBYDhPvwcR2D0XbX1D401jWH3ruxZstovMd00Rl5IZQ1YIGlJhzycRUrf+251eQkoTp6IbMcVbu7AebD7UckH6A9aA4D20jFk7J1TAAjVbC6RsLvzjxODESoqbxzGYe8Q1JIzELR8vEkNTaA49Y0hQiMISqhlJyMTS3QygvxicHkYhXzzpGBl3z83/mm3h13L53l5OuY/XgGPXWMlm1MCJkQa3WlS0r+TnkMq/tuiGQlyWfJ6XwWcpVRtNlSulZxvQ6RsBVfz+PvOpJeFfJ5+F+P5sEQrY3HoPldYwnX3ZB2+5yUZQVtpyy6rR0F4wFrn5wdMb3fv9P+Mn3vs/3/tn/j9YLmrPRAKeEZvMiRTOi2ZiFSpGeZjVkIGW6DDtZaZwzD90C97z7g3f58Q/fxXnBeYw3XyaCI79ocSX8oOJIxXjxYoMu6MhCmEWIheOgJhQGKep8TP6z46Izj3DIhYdgIy+gjt6maXCVqOQtkHfMUdAnQ7/TfFxx/jFkM9HzTt+DjQ1qIw/DQB47X8rQKawj7M+M+2Fvp+VkObDqISVvSIBXpMzL/m6L5kjbCDs7gZwV7z3zXSOhHvqMc5mD/T1UlZSXdBHohLSRmPnkL+zEs/+ioc6RbTJlTk9WHMx3cQj59m26ZUd3NuBc5uTkjPnMvrK6Thsm3LNvNM/jSbz20l9teVlw/GcB+78ONWzLi0gqvT4ycNm1JjT/ircvhy62N1+tyfsAY03/peeT6bisqbAKyoY3Z3kAItAPiYePTjl2yidyZGRDhSnQF1jZO4NdHWrkQWJZ/ga/UhIJLYFRgyLRBiyXmL04a5WcAe8NsnaiSIZ1lxiU0qTI4HNH4aCHscwQsRwC0Snp0JeeykMuVQnOutq54j2vcmZTFYxjfd6AOv+FESXXvAqRiYxwIzww0dIK5/f560BRuqmitryP+tu5xMV6DzLdf1bHcq0svDCbCXt7LYudwIPHa5brSIoQg7C/2wIDO4tYxlQ53G1Zr9a0s8DB7i790BsjpA54Io1XZg3c2Q88HBIrajMpM7xUJ2NzfKJL9N1Na6QvHauah2BPjarw8MEZjz95wM7eLu1sxu2DQ+KXVyAH/Ivf+gP29huGLhGHOs4vBhXYvqfrPddrQ+DVkmcp2btuVcBl3v5LzYz/nCv/zwOicX1j4Ipn2Hz5/HRdpjSmCoLznzVxuvl38TDHswVUNriBvd3BZAgUYiEHrbP4cqUuzqIWV2Zq+wtTClTtDTeW8G3pqylOWyPziJQ8A4OuoRDUlFBCnzJDYsr43njI4BxB85iUpSUMosVLVozlMKox4FXCJeeUYTCvuRRCbp23Vlgw3v9GyVfRG1sNagWmm9tYsOXN6tnfTKRcN3N+dahevoE4QPM0xqrKalAenigH2bG359ifO+ROICWHSMOQMvv7c5zr6bszyIInEIcEzqMoTWgQ9ZyeHSOaCSSGbqAJLTkPtI2w7hR1U1hlsgHq/Uzr4dPZAKuxYXN5/8Exv/t73+XrP/8NDg/vkFVZHve8+/4PCN7RzoR+raRcMzFeQHjgEiPvSRCz/V6/x5OxNJYivsIb3hdVnlVZnldSV83d+RDvZzXHL8IIfy2XyzMlED67L2JIgIW1ZWwZPM2rjMlvUwRcp9a+5689Zn3XBW2kPDMx0qGaW6DVQ9fJI7dI8FS6OFBj1xV8nzq6aYnDV3YhMzosRi0CmicsI2LXGRIW0y33kCvvgLPjHBXhgCHL+CgO6LPdV2LyWnPJb+jz5vbvpmEt91xzJ6z97lQeOHHHFy+4qoBKmqCTgWRjm5G8HTJ4GhQlhXVJL3TNO2cNXUNUYRmFeJLwcsaiUdRlgg/sHDTMdw7KBz2r0yX9WmmCkhloZ56uX3F82rCzu4cTWHcZH4Q+KbO2ITdKM+vwazVqYWqoSYuNOcFeSmUffL4N8NLKDUDJ4witInz/vRPC4hjfzDleH/HJyQlHp2e8eWeXximrmBjIOHGgm8WyzyY3hRhtI66Wbxknnb4Dr+WLJefzg14be19MubExsJ2sdjMZPfhLAwGCSB6V7ZaSp9L3lnpxy5zaCg/Ue3TiCN6UtX2m7FUZkpj372RiE4wbG6mImHFQ4VvdpL3dsIxrr9kxgWtqZlMCH3TZOgxSvGvLdjeFKVqUPKaAdCSy0RK2AIpiyuM5zNCIaSogvAR3KT8vxv2nL7Cb8BbJ41F1yOt59Rks7+oxTDH1OpaXcxogpTxOpzveCi2gDDjOBhBvVRcqcHq8YnnWEYK3z6tnvvCgVoXgnacNzRie6fqOvk80bUPbBmLsEVFmM6FthX6VSrfAMggiVIIqm3YBTePapxoH15Tzz3T+27Np4GZVug7e/+nHzBee3b05ITgODzyHt1piygzDmfVjyNNZP205X2GgU9/v1/I5lU0v+0kw9ssiOfoiy+fBgHrJdMSTByw61dubbHQgrANXWw1W5WIZduPmOdWDF8VPMRCY0IZa2qaax7a9gil02UhiYzwDo1IcPemNj4iAVCRgspBGJMFwfUcqG+ZmVr/lRihRHDHX+7UbqNTHIrUFrRbEQccqhEGFLm/oAHiqUtraxKvRU0IAG+kB0+fHnwaf16jM9WVCVCwbP5fjLzEIRuf7EtNGJkro45U1CHj7zow337qLkjk+OcORCN5CB/3Q4wP0fUdUAfGIwmq1KsaYw3uPDg5RZYgRFGYzWK4d0VL2SprgRfC9Bj1kKxbzouRcSMZ7TlcrPvroQ97WOwTx3L69YO9wxoOHK1IurbkVZGs1fPqyCREbErUxby94XD4PG+irIM9SGrgp1zUKPm9ysXrttTxJbmwMPJ+VKNSGKiqK0w0kQCnQto7OWDUC6jWzKht5ZtvnBUt+K159zNZ1UIvhMZYkVscvl8W/cfzYsrCKshEiqPdoGqoqhpq4Vv/2gGo2rv2kI3hQUYOswnrQkeiotkD2ztADr4XWF1AnpGS8AkOp2a/PtymXkbtctfiVPGbvn7eBNo+t6lBd8fTHkMEmYnMRPpyS75RaUngd93FT/daQhit/RRVwnjfvvcXXv/IldnYb1t0Zjx494uT4lG7d06/WhKZlttOQshKcsl4vSUlpGmt8lJPSrSMOR4zZWkB7YRaEPFw6tOO9jQBBIZMax3xz3C57rvMJWleNhdb3zOBtZ4EY15weP2DW7hKamXV/HGIpcVS2GydcX15EzPfiWL3YzfaLoIw+C9lU5s9rFDyN2+F1FcEXS14uMjAqctmwAOwNy+aWEp8tH8a0eWXtc8oIT7pL1oSIMKjyuDPiHQ80xddzjvFfoHQAdEJwpXWwgEcQJ2NGeUpKLAmASdXq2cWN9MJg7Y+Dl5GMKABOMs1uy/2ztfEBqLCOliSIN+OkFeufMAumjGbCxDFAsQhQ+sFyClYOVhlmyXFWv5gTtPDEzXPKG5iQcArKfHUh4fR5xZo5jRZNncMrr7edhTmFWbZRCtk2Sc6fZfq8OhRh/2CHt+7ssWgd/vYu9w73efT4AR98+BHKjKZpEHGsVmuIA6nrwTmGbsWsndN1kdVqzaJtkGSwRHCCc8ZymWTTNNzALMb4z7by15LgWT+9mUh3E5Gt34XFLPDWGwegS3IM+HlHRhj6GXGlEEsTo2eUmyraCxnmctnx9TtxvdLU1/J8ch1imxdVAvu0ioNXVV6l+xxDqJeECV8VebnGQN3bmX5Kwem1KLZt3/wyIp2rBzJnQIWclJkX5lXJitWbO2cx5cZDI0rjhIDgXcKX5kbG2W9of/RmAGSFLkFUYzfUsLH1ScZjTIIOHVsnp24wqlysV0GqCAhK4wVfUI6sGU1CcpbLkGunxXKtJKUSQoRWhJkzZCXxJHV8cdxV8kZYwKCHrT19I0FuexMpXv7m3On2tbfnZVu1XYDAx3eefPf2aUNgvCj7O3MOdgLHjz/meL0iNIHslCBC44UmCPM24F3D2bLn0dESzbCYW51IF1c439C2Db5pyA5iHEZkKueE4rZt1PFxptDAZfd92ZhcVy47W9M2fOtb36JbHeOS4/HqIfcfPEQ1cnY2bOQ2vvxNZZrrcUFP743I2+uKgk9TnhYWuK4SfFpI4GUr05+FSoFX1RCAz5CB8PxreoWnM+qv8ldt9lOPrYl59cOCjN3zKnWrUdUxGhTV0UuVH9/we8TXGLd9INdQgxT8vm6D5X0VRogii5hid6b4Y8ysEvTomB8gQgkrWOOl6memcmzOGE9BOa2K8S7k8oJXWATB95s0v9cde8brVS92jH7IU9SK1NBOxQlkA9bexhBGZr5qPKiWFs2yMVHXvOdSGjmfO77+1bvs7LR068ecnBxxdtqx7tc07aygNpGj7jFtOyeEQAgtKUbIVj2gkhEnhEbwjSfMGtLyDFUhBJ3Wx4aOG8MAT7htvcRIfV6PTLPyxptfYWfxNbr1ir3jW9x/2PHhBw9IQ8MmAdKnJVeHmbbN9emAT/2WXstreS2forzkBMKLIkweczUINveVzaj+GIu+4lzG/mcGQluTw8XUV21NLKKkWoWglCR+IakSKcl+RTtapZwUXkLLpnMVYhdDAshF2ZXQQcIURF96CEQtiXAwal/T7cUIwpoVORUGnSiXfYGgE4xtjAFmzto0xxttvHUUXTGQpjHMVITgaQpMRn4FsMHdqhzQiRZ5QgOMS8GoDOrcXRcmKwaGg9u3FrzzpdvstJ6zThk0s0yZIQpd3xFzsrJNdQz9wHy35datXVbLFYJj3Q0kVZo2gSgxDczbBc5B32e8tzWT0mUUzpc16NmUF60BhbOznvv3j/mz33kH9T13mrt87Z1v8NEHx/Tp02n+c/27K+NxxRSWgN6Fe7zOvG/zGDzZa/1ZlcvG6IsiX7Tn+bzJCzMGnqVRwkVAuZ6jMgsAYjUHblQmNY5brruBKCiWlDdzlsjnKjpQDkrF10yW1m+8A1LaHTvLScAJki32mUioQi6ljPUOxsTDcg8Za3xjVoL9vs658AOYJSB1/xQhj46doQZJ05jU6MrDebvFKYu9nMORmDeOVZefrKI2vNNtSLliLHnMjK/vZKZujPUcKoyvje1mJdPMWua7M3CO1VlHt1zXSkVEHN7ZvadsPSC0hhrUY82Xi5GGK8hBwqsAqVSZmNqZNY7vfOMt3rq9iwxrM5SaFnWJ3EAcBrI6QtPiccQUGfqBW7duEfu1JQ32kXWfCYPljOwtWkIw8qdBEt5bqCEmm/eKDlhlyhVlkeNoXr7in0eJxah89NHHfP2de7js8d4RZq0hWeeSFj8tuSoZdTSMaiXO+F0dF7iVyp6Tm8Kjr42AlycXDLcXlGvwsy7XSei+yui9kFD9EuSFIgPPZgjU+GMxCCrsKmyaBOPPCkhv/G8j1mSvNM56BRQ02z5TYetc+iBQSgvFFLCv/D1jRUKB4t0EIU/3qFt3JVlG3gDU0IBeTbkKRaE7GdEAKBQFSFH2RnxjisVi3M5N5lEtT3TZqjAaZ7THafS4ryPbiMBlMo29bL0GSnCO/YMdvv61L/Odb3+dP/Nnvsy9OwekYeAn7/6Uf/obv8Nvf/cnrHos5CJlQZeB9yjzJvDWXsPX39pnfyZ8/HjFDx903D/r6bOS8FRyHwW8ZG7tNNw73CH3S1bLY1brjqGLzGaBmLO19hVPaALBOXZ8y3K5Ah042Nvho/uPcd6RUqbvks1/6mlaT9sGxAvaJXYGYUiRmOqsllF7ilL6VL6mkvjxj9/j7Tf3eOvuPd5/8IDf+73v0veJfK53xItGCZ5k1Nd+IIqOHTBhQlMuSxT9NO7xtbxYeT03n508i+F1nvNj8/XnmcsXZgxc1xC4mPyiowLYDBkw/pAx9j8SE+t0rokUyMS5wrArjlzY+pwIKZonLAUBSGhN2C+KtoYTNprCqnmFrnq8TkplwxRXrvc5bonl/VyMFe/sfqOaSs+qRHVGe6w6JisacmCNk7JQ0IkN5V2ukUobY4ebujw+YZzHw63GcusI1WqsyIbXPy1QJ9B4z9e++hZ/4dd/mV//V3+Zb375DTg74cc//FN+/x/9Ez744AOWy443dht+4Z0Ff/TekhitgVDbNMxax8HC8dZ+w7ffvMXXby+4tz+nbYRVFN4/WfEHP/qY9z5ZcbxWTlaJVVRr2yvwzlcPuXd7xvrsMauzNTHZfcUc8U7Y39nFO8fQrRHnCb5hZ2dOt14zm++yu7OLyEAflVWfiRnW68jpaUfTComM98JioUhwPD6ycMFkaGY+JZV/peSsfHL/Mb/3+9/j/bsP+N6f/oizs/WobO3uXsw9nVf+T+w7oYrU8t9KykTN7NhAni5be9e5l9ce6Sshr+fgxcgWJ8cl5d9VrkP09DRk4bK/b1w1pNc8ohV/oxNfecHNm6+euZugfIoih+o3QzUOcnHNXYHMm0YQZ5snWFigcRDEFci3KFhsYCpTvy9eenAVeqfsYRYPdUU5qhalXpU2hVK47HeVs8CXBLtQFKgTheBZxsw6TZiG8SRYw6GgpbRRoGksF8A7xvJJrahAYTIckrXx7RRWCU6Ts0ZINxp8HRsC1cWmxfg5XxDWeOHLb93hv/vXfp3//l//K9y7N+Px+z/hD3/rt/nj3/kunzw6QpzQBsVppG0c4eAuH6w8yS144/Ytbu3vsL/bsjfz+PUJbX/KTDvoVmjqCd5D48EfIE3Lqlvx6HRNl/c4Wnf88KNP2D1ouXe7ZX3WEbPN4JB7OicEGmI/QFZSjITW431DSpmYBlJWMo6snqOTNcdnazKORoTQgEom5cwQMRplH7j/MDIMleGyhqE2vmCTzVdm9dMwFRyQ8F6BQE7VMKyzvR03fmIY41qx983Qg2z82Oj7YX8y/dDphRLSmQpStlfTTTal17kCT5ZPS1H/LGTyfxbyojz3Z5mXzWv1JdT9xGu8bGMANh5MDRloG8gpE0fFWdV2ZRvM0x4lRucbxBEaAU2jonZiYYCYrJxwHqBx2boUoqa0VWmCMx0kGS/GNeBKQqBzjlCulVUYEsSUSQpDNCWdaskb4JwQgic4pXFmjDSihEXLR2cD909TiUG78WkaMo0KrRNaD4tGmAfrq+ALvJ5RNJsRMGD3se4y6whrEU6jcNyfZ3F82sCXQd+IdKvUng+FORBhZ+75tX/1F/m3/sd/k5/7xtssH37AP/8n/4w//K3fZuEie4vAbLFgZ96iwxJNsLj9Je586zscD5m+N2bAo0f3QQdyzCzPTlk0nru3DghD5PjoY1if0kpip5nTBGV5tiY0M95+85vMduZ0LnP/+DG//4Mf8t4nS6TdIcwc4jzSeGIX6dZ9MSilGHmO0ATAEJRl19MNiZidtUfue3JSnPMkycQhkZIhR0nh5EzpYllQOmWxbI/hhjGwUZJ52Zf9HL5zZY7BxYmaAkT1GN1SvvVeKsOjbLxrr9utCZbwWYJuY12ikUKN6JirvUJKN88CFRkStWl1XmX+bD73syEDcLkxMPoIV1z5Z0leK+vPvzw19HhuH3nWOb+pMfCZVBNM7G1F9RcPfUhbEcitkjelJrCVbaHC2aobW+e0ITYe5kFoxOrUXfkZgOCVJtjm550hBEGkdDy0z6oqSYWg0JffvTPyoQnAsDBFKDwFTkorZIAY0ZTxTkaSosrQ1wAtiqeQDGlpRFRRBTHjZH7rgDtvv02zs+D4dM0H73/Mg0+OkJSIOM4Ga3s7KoFrLaJzsJJOIywIB3tz/ua/8Vf4m3/zv83tvYY/+q3f5L/6+/+Ax5/c583DwO39mY1VXNOkDhcCze0v85Vf+nXc3i5H736fBx+/z3p9RsyJlCxzwgd76o9O1sRVz3otvP3mV4HETz/4CRrXrI4SBzsdu/OPITY0M8dXduY03/wqQ/8+P/j4DO0aDg732GsW9Ays1h27ix0WO3POlmsePTrGidIEb4mGCF0/IK7Becd8HlitBoY+gRNysnyCFK1LpN8as0vGT7ffMaX8lC/rJlyIrZEnSzUuXEHPqsK/TCXK9Hk2jA+xz9WQlohxYhgvA8xa2Jk5dlph3njmrWPmPbPgaWeOMPM8Pu344QdrPngwEFWe8phXv3mdzexJRDmvMYLnk9de/6svz8oV8SLlmY2B52VSqgvUSam7d0LHpjFwccuTzVfHWO60GUJhGVSllbzR7bDCClpaBJfMeSmwv1oOQdk/x/e0lA3WOnsXKE2Gyqar5pHacabEswhRLCO8H9RaGCOlKqEYC97hq1fmrLpgkJrUaKGGnf09/tK//lf5+re/jWsCQ8wcnZzy3d/7fX77N3+X44+XZK3Pfn3Y+DKpR+/ttvzNf/Ov8j/4G/86u67jt//Rf8U/+y9+g0YyX3+zYa9Q5PY4dmcNMj/g3jd+iTe/8Ut0zvPTH/+Ivo/s3r6DW7UMvSnrdbek8Y5uiPzg3R/z4MExjW9o9nZ4694eHN6idZ75G4F49AnvP3zIwUrY2wm07S57zZxf+847wAf84XuPOIqwPFsSmmBhlD6x1DXdkFDX0McOvKBZWK97grdkw6GPNL5SEyfiYGGEmCLDYPMXvOB6LWO7vbaeRUrBxgYadr3yyu3Y3/mZqsmq9R6377MySwaBeSMc7AqH+4E7ezMO9xwHi4b9RaD1FEPYkVPEi5JiJmuii4l4ltCYNu7n5WxQFwmqtn++luvL63DLqyfPE9f/NOUz5RkYKXJVCWJeWVZX/ZmtzzrdgBALpG2I5waQWzy7NjhaNxH4WKLhBJGKl612vRlByKQS13fOCIeSmnIfk6xKkoC1Epo6Co7nFwP4kwqdTu2MR+OhPm+FtQuCq05RL6i44p0qzc6CdrFgtpijCO1M2Nvf5Y033+DOm2/x7/0//wvy8nh69ufZKgVa7/nL/+qv8tf+6l9CuxN+5zf+GX/4L3+Tt+/MWMw9Pq/wZOKgDHHA3b7DN//CX+fgS1/n0dExjx6+z3J5SkyOdn4AfkY+PSMtB5zMIAeGlFkNmVUWhgw/ef8jcjphebbEq+ftt79M3r3F6arjwaMHHCyVw8XAIszYPbzDr//Zb7C3s8tv/fFP6WRGlkQIgXXXQy+s+4FuiDgvxJQQSQierPa51A9oTqQUQRTfeFJKNDMhRbWyUq2hKCGX8svn+75OinpsovWkT8v5zo+bUg2/TQNQGKtNijGzO4M3bwXeeeuAN/dnHO4KuzMHKYN2eJdxRMiMKFgW0DzgcPQ50Q8DR2eR01UmZ3cl+nFTr/Npm99l1Qw/60bAa4/+iyuXze3zIjnnj7+uwfHMxsCzogJ1s5MS79xwnSxckIrXJNMDiYrBt+WSuT7sptdW3g4CbaEbdpYpZ8mDMsGlZkjUqgBXfhYPv7QU1tKTQItBwMbt2i2PpKxjopkIxKL5By0EROMNWg6AOpmOsdsmZUdWiBha4FS5//iEH733Lru39tnb32e+aGFwEOHPfPub/KVf/xXe/3v/NUOsNZGbC2BaBJsG1Pa8WZlYfZ6f++Y7/JVf+/N0jx7x4w/+hD/9w+/SzBa4MMMvWlya03dLejKDm7H/1V9Cdu7w0/d/ytnZCY+PHhEz7O3fo+sjq3VkuR5wYU6KkLMj50gIjrZt0Axdihwv1wy9sjo7oouw7ta8ee8Nht07PHr8iFvLFXd3Bwhz9g92+fPf+Ranq8Tvvfch6yTs7c5QUU5Ol8xmM9omWCimCQxdR0pCM5uB8+zMPZp7KCGfnIVuiIQ20MyFlDNaeJ7rvD3pe3Txy3bZWE8Rb9l4ZQvrqnV6WgzMeuyWAby94mv1iRNlHoTDXeHNw4av3Nvjrds7HO4IgR5Hj9eIyxkRDy5YPoB4RB1N25DJpCGTskACyZ5hlThZw3rYRuueR667Kb3KlK3PK0/jY3nRiv+1IfH5kk8DKbjuGnipyEDd1uqWWJPWMqXVsIBIBjF0QNQ2SEfthqcgDtKUIyAj+1DZMF3pBli9MQqSgHl9qfAMkDYYC4sh4WoJATYpNefC0PitbAacqz0MMpoUn2VEK1BL+kt5UxWU8zNt7DGrGR5QSf1q3hqSe3Z++iHtouEX/twvIl6Jq0gaItp6fvXPfZN//pt/wHs/OUK2rjJdoT4HUs6JjmWZroQ22pnj7bfu8Vf/8q9wMBdOj+/zwYfvE3Zvs16uyTJj7+Btbh3s03hBc8987x57d97g0aMHnJw95ni5NBg+KunkhLOzM5bLM8vUz0YJHFMipg4k48i0i5bFbsswgOIJiwXH/Yqjo46z7iMOD/Y4jp7FrT1OXA8nR+wu7vLLv/KLvPXld3j/3/kPeBQz695Ig4as7IbAwf4uw9CxWq0RZ2GE1bpnd3+PxnuGVcesCay6njj0aBbSAItZYD0M9AkY0aDt8MuV67p686PxSbEYN73baa2qaKliEby3KhKPkVXVahUtjZTGecTCR95ZXsuiEe7sOe7szfjS3V3eOAzsNcoiKKJnOAmoKv2QwQdySoTQEGMGyYSgeL8gp0jUDnKLcw3iInFQ1p1ysiwGwrmxOP/s4zo7Nx5Xvf8k+SIaAhdyeS55xtdK+7VsyvOsh2c99qUZA+YHbWRfqf1S6WpjCQ1IieiPJX9M/ANSWxVuHG8fmcIBTkCcQ0tZYlKL/efqZVk8weL9lByD8S4nwwK1UEF1tDfHV+qJkQL9VytgOsXUvHe6N4oSGFLh8C8GkPeKq9mSWZEgpJR4970P6boVe7duce/uXQIeR8b5GYe7C775jS/z458+pGa9m82ibBoDZXhAwAc43Jmxs2hpfGJ3Z5ef+9bX+MY3vso7b96mXz/kwaOPWfWR7GYwb7j75S/xZ//cL3P79h7zZoZzjuOzJcePHnG2PmXZ9US1ltFHj455/PBdTo8ek2NCncO1M/b2Dzk4vAVZme/ucXTWgZQqC98waz2r3vHg6AEPHkfO2kROSiaxCo5mvsf66Ijw8QP8H/w+ujcntB6yMERYrSOqgdU646QnBE/KHsWx7jvjDfArmsY83pQyUZUhlvUxJLwTZuILJbVi/BU6rlEtiaM2jWVWpaxYLaWgpQW2c+bVe2eJrG0jLBqYtZ75wrEzb2ichbNC8My8UUxn78i90OfEECMxGa+EqhKCsAiwt9Mwbzw7s8DBDGYuI7GnCRERYeh6kAHE4ySABkjWQktp8aKoDOSUSCSbhxL+yiWPpY+Jk1XitMtkrc8/fd2eJk9qfHNTz+dZmE0/b/LaEHgtVT7LtfDSjIExB7849/aroCoMKVl3vhr9NOq/ES3Q8hIZY/8bnaap9tpKA83zX0XzuC050f6N3QBhDBnAxFFQPfdqeFQUN5brUXIItJzPiRprYVa8N64Aa4nsLBfACyd94qw3o0JlyjGI0e6lDcIsOBYzx87cWSWBCilHBoH7y8wnD1bcf3DCfLHPvAksZtaER8Xx1pv3mM0d/ZrxGSa62GnkgxfeePMW77zzJj/31beJwxkf/PQ9dmZz3tz3hP4xjz46IcWek+MTZrM5uJa3vvQO73zty7RNYn36iOxnqCpD6hDJhNkcH5XWR4aTM5bHJ6wefsyhh5QjD48HHifP6rRHsrK7v8Ph/h2Seh49eMBqlWj3GmKMxCGzXCldhv2dBdoEJHseHq34+IGyOl7z/fwh7gcfcBSVj88SXWkFmYvCFCJOVjgHPnhCCMSk5Kys4pqdRYujZdktGZLivZV7xpRZrhPBBetSWUw4JY3mqdFiT1B/HW8vtp69hxCExis7M8fBzpxbuy2Huw0HO46dFtABkUQjNS9GSTlaHw3N4D2SAjiHaiAlwXk3htS8Rhqf0BxxLiKa8dlKYBVP169ovCepQM5kGXAuo2rfgmHIhMYRAji3AISsCUcwQ8AJKXqGmFn20A0ZlU065qdvVE9KjnwWCPSLYAQ8CRV4bQi8lk9Lbpp78BLDBNVHBnSjUU7xkC2+LtRGKFI9fxWKFQBZSiOfGlKYzl559AcVztYZD+zPHI23+n/vpuMab+VV3or6jUtdleBNkRuLoRrRT1RiMkg/qo4NEENpJRyCo22wfwHmXgii4AXnPVETSQ3G1pL70LSW4BhK6CPMGxaHu4iDfkg48TgR7h4G+kF5eNTRLE64feuAnd0DdvcPafd2+LVfCzxedvzkJx+zXK7p1h2r1Yq+70jRav0PDvf46tt3+bW/9Cu8efeAH//gT/mTH/2QoVuhjeOj93/K6U7LfDZnPjcFAZm29Rzu76Ep8uH7n0BM7O0eMnRLuuGMQVr6lDk5PeX09JTjhw959NGHzHJi5jw0gW4uPLjfc3R6zHo5sL+3w+Jghp+37O8d0vdrzpZrvGS6QUlZcKGhi3DyyZLT02jVAtnyLyohU3bWlCgn0JyLh76BighILwi9jW9wNMGx6jMHewt2vHB6tmSIEAfjF3CqDCmaJ1xKTkdyKg9t4/CSmTWOxjlmrWPRtszbwLwNhOr9u0wbhAaldUrjB0QiXijdFRXvG9RuvlBhF16N3no2eN+QNRLIpV+DWIvlJMShJL86RUl4ZxRZOWUgkVPGuUBSbwiFZJRshqsHJJNyxOHwocGLJyZjFnReCB4enJxxukwMsSICV8MCFzP/5Ynv/6zIyBDxFDTkNevia/k05Fm+d58Rz8C0+Itff+Fv3firwuCuIAkjy99Wet5GUx+qwhcaVyoVnBkdToxTIFTGvwLvajQqYKAkEwpBFbzixNoH+6Rjt0IzRmQj70BKxUPd9DNRgGAT4wg43+JDw868ZWdnh3Y2Z9a2HBzuc3B4m929fcQ3hNncyt8kF+PF4Z1nZ9Yiszk9M1KnhKblF3/hF7hz702OT884O11x9Pgxy+UpaejZ3d3h577xNb725i0O5573f/A9fvTDH7LqIu1sjg8t2TlWPfiZp/WCDp6cM8cnR7z//o+5s77L4+MTzk6PuXsvktZLVt2KIcGqW3N8dMrj+484fXzC+mwAFT50kcY7mply596c+8cD94+W3D9asXjYcOfugsVuy3zWgiZWveUmrDvh6GjgwcOBmAqVdInjjCEeBUkZLWRTNYZjv05VJmhG1FospZTpeiUEkKzsLBy7rSc6IbtEE2DetsxaCE3DrCj71kE7sxK8tpHCbmmrzYmiKRvVdRqQoHgSLjuUhuA9Ka7JMRGaYkwmQaSx6hURGufRlMhZUEmknGkbo5puQkNya0S8KQwHWhT/oAOgZgBkqKa05kimITQNmjO58BM450Fack6IZDRmvBsYBiE0mURCnCK0iB/o4sDRMtFnf21P/6rPPW8J8uddLuP9+Fk1kF7LyxbLU3Lu6Z+EV6CF8XXFUSlPa4DANv2tPAAsXOArvS+WwWfcAgVEFyP38UgJH5RYQkOJj+poY4hzuFLqV/sVpY0cBmoioSX5E1VYRiVnYRDPWhxub8bBYp+d/VvcuvcWh7duc+vwgP3DWxwc7rOzmDGbedq2ZT5bEBqPeA8p0cWObtnRdx3rVU8aeo6WHR8+eMxqvWbV95wtlwxdh8aEB+azFse+0fM64f7Hn3D08U8ZVkuW6xVd7FGFmB1OI7uLFuc9x2drnFeCa1gPkfffv8+HHzzgS2/dJSx2eXx8zPF6yaydszxbgQhHjx7xyU8/QbvEWac8WkGHKS4nil9mZr4nDcpqMAbH025AEe45ZbFI+KYF8axWmUfHibN1Bm9GgJNNUL5UY4wGYI3ZUzM8xyQ7LQukGnxNgKZ1zNvArYXnYBG4vbdgp3Es5pY82PoFDUtcEFJc2SLpHRIisS8BLBFEfOlRYX0sEonkFB88OYKKo2laVBPOMlkRLCfBeaFpA1kHyBGXGpwr/TAk4BszVlOu+Qke72fkNOBDKCGthMbSydNybUulhsH9jZ+hKoTGgyh9P6DakFMEFyFGnMzpopLoyZLJhfQrI/QdPD6FZWW3vEKPPw0ReNrrX3R50nO/btz0Wl6WiMDO/Hpr7ZUwBjbZCIEpRDD+X8cggyObcs86xvU3MgpxWFigEqr4DeMBsUztml6YYWSDczKhEQY5A05xvrqadjHjOLAYbZ8SKaeiGAS84NuGpl0w37/FrZ199vb2uXXnDnfv3WP/8JDdxYyZ94RGmXnQfuDs7AEnMTH0AzlFhj7TdWtWyzMj7VkPrIeevuvo1itW/cC6H+iHRNd19F1kGHo0p0KiJKjGMrYWihDxBAcp9eSsiK5p/IrFvGExm9GIcnY2Y39vn1W35vSsY7nueHx0xptvvklE+OPvfZ+mbVgvB2uxnHpm4nh8mnnUQZdrAr0iksDBKZbnYRmMloPxcDmQHmTmjTCfNfgg/PTjU85WCSduRF0ymIdfmB2y1uS+SsdrBp0vSn/WeNpGLLluZ8Ht/RkHu579hcXsZ8Gz03gkG2rg0pkp+5QQjeQ4EHtF04CTBg3GWom3BLxIR0wZ0QBqa8MDZKVfr8cVmNa9haVEaBojPzJDNBP7FU3b4HyDJmtd7TQX5kpPyhHvhZwS1vs5lY6W1t9aNZlhrM6MCiIpWUtpAWLurNMmAU02R74t91BycZz3kM2oISQyPS63ZHqSCqsu0Q01MednIYXv05frNpp5La/lxUli1jjeun09Nf+ZGANbpUcbSl8lF9IXPzYBkhInNjphGVv35pKAaBn5U9Kc80LrKs9Afa8kHUppV1wrABCMn11GQ0C1dK1zmGYTYxDMGdZJWSchJhiwMIBxEHja2Zxm1uJnM1y7wM12SCROjx8ynJ1w9PFP8U4I4tGYyTqQc6YflGHoGYbIENfokKxML2WG3l639r6ZGDM5KVGzPX8WcirliRXNkMnwqWMNineOpvEgyZo4qRlW3g8Et6YRmM8ce3tnzBpQ8RAajjvl5N33GZLj4aOebliS1YyonZnQtHC0VHoExFu2ZE0WHdsBb1RcCMTOSgIlg5MeF4RhMIPF8jWyfba4+U6UIOACzIJj3noW8xn7Ow37i5bD3Zb9hWd/p2Fn5piFhsZ5ggwQlwQHOcfCvZ+Jkok5GGw+ZLIGvCpowPuyKmsFQSnJUzI5AqLEofD+O+P3FxEKCEXWPFYieBI5O5w2hiagiAug1ufDe48XQcWjeW1VA02D85HkrF+HkBDXWwgs1o6BGe8CLnsQS161FWwlhE2wdZGTR2RG1/WEEBBamlDKdsPMvg9OCU4sqVEcfbTeHn2SsZ33ZUD/a+/2ZrLF+bEZXnlOgpnX8louk8o3s9d69jQ/9fPwGRgDm61SJ3++eK9UA8Bq+gSrArC4vJH6uNrO2E1eb/XaTdlb90CrOjAlbun95VMKMYI6IUZ70QFSOidqtuS0hCEHSY1IqI/KkDHKZMGCx85ZS2Rg3UdWXQQ9AyysUBV1vTfThTq2LzZ+hVoRIeV1oKIPWcv7holUqe1jLSZe8tJLaaLTopVGq8AWhhkTunF+xtG3rpGKX4I7HvDO6t9DidU7JwxDYsBCIqpKco6jIZO7XM0qhKnbn2H49d6Lsir3JAppRFo82mdcKTFxQOuFWQM7redwMefu3V3uHu5wsOfZmcPODGYh0IgnAF56nHZ4t0ZyCShJJqYVMa6J0hhakpMhDimRs9HuKg7vBxBF2MFLg3KEC4qTQBrsfCkpXjwuCME7VDoUrOOh87SzlhiTJev5poQqIrlC+pLRnC38QaIJLb5tLCThPMHvgiZi7ol9RKSUtvoBXIfirL+CeGscRMYF6zfZunnps6BEZ0YNCs4lwBMaQXzAiafxmRw9MSW8KN5nJDqiW+Odp+tWrIdEHPN6HFvkXq/ltbyWV14U67mzkzL56BU1BqB4+3DJFlM0plaFV1+r7Yszms3hzFrSDDeUWlU7XYI+13inGIFLjX8W9LOS7mSdDIWKNhROIlIuyYBi91OsEtsc+5KPoKUWPEdqlYRU5Z8L24E40GoG5VH5b8v5EXGTUmUyeoyfYKPrYHkge5yqlid0QDb+SLVxVfVCyuVSIdqJESSB08Lq5HzxTEv/iCA4bx0hczGqDAcwIL+erzJMqlrpnt17Ycwv6I71aFBCUGatY38xY38x4+BwwZ39OXf2PIc7wp29XRrvCMnh9AzI5DwQtWPoHM5b6EOdkp1gDJK9Kd6QjWQqR5x3hVTK4UPAqSXuxRTLuFqyXB89SkDyEpxl/G/1DhQIwaPSWEgmRdBcyhktYc8Hj6oQo82Hc4o4JThXDFohpp6UB5xkclqj2eNdIOcBRWnCrCSQCuSm5MPMcBJw0pkBl4WUMr6p9N2KS1ZGKG4A6QjBDAPvhDh0dEPAScPAGfhdYg6QAyqJrM5YIEtPDa38Ha8DBJ+qfB5QgdcIxudHtIRTd8VxOIDP15uzl24M6JbCG18syXv2/gj/l7etgQ8jk4+KozYEdmMGuZVcrSMMKY9/1xwAKaWHNdGsilCND7ahdSlKTDxF65NiiVmrKXopxkOW+lymjGpMu/r8We36TqRUUlw1OZuvb5sMm5785lNoeUKRc4iATp+bCJBk44T1fzI+n4h5gU3bsre/h28cp8cnrFadnWMon6tIhQa7brk/cWa0eCkVHEEI3jNvGnZmLTuLwGIeONhpOVgE9hfCzk7D/k7LTuvw4gnBETTSCmSyMTyqQ7HSQ3JGaUbuBpzD0aBDTyaTKUqVHkEJjXn2w7BGBZpwiKZIkkjTLFAdSAmSSvHeIfg5OUZiSGQNhYbasI+cElkHxAspDwSPxfWzoNGaUImaZ+9FCE0ga0dOkbZZjAZI0wQ7ZwbvW7JGYlxbCMZ7Ysx4b5UdqDdDUxy5lNlq6bJltMW59PSY4dyACxCzGanD0OOksTJMJ2RniYiasdDUoHg30LgZKSWGlOnS6xDA88qV1RWXjOurXF74eh18/kQwVHffQyuQ/PXm8LMpLbzq9dETnjzcesQUeS4bVQERkOrommLI0doNV4NiBARES+IVIB6VPJ1XKN0JK2ph8YKcher7GnFLvaeiACt0X7sHljIOcYXgqBg5I33yU79Y24ZC9f3Pj1g1qNRBCM6gZ2fKPPaJFCeO++rX5Y1zTOedfgpC03je+ebX+aV/5Vc5uL3Pj9/9Pr//m/+SvuvM+Ckln7Z5JRZz4fbBDm0wJb6707A78+wvArNgWfp7i4a2QP6NlFJPl2mcM9KfXHpCZCVrD9mThxW9tAwxkaW3mDogOePxuBDwbUuOiRgHvHjwlnwnLhCHjPdCVxL5YkwFoocglpCnoqhPqBNCsJyBFHsknxGjFeWLeLzLxBRxzuOdL22pzcBwrkFJI1eGa4zvPyeIGkl5bYq7kCN1XcL7OTkPrNc9zgWcZEN0cqn/d65A/pGhjyiGRIgoKQ8jl0IrgXUfyRlaathN8cGRdE1KkRDakoMhpOwgW5VBFgfO43AFAVoRkyej9DHRD1d/R3/W5UmlkpfmQr2iSv61fNGk6ISCSs+D49ZMmO2rJRVdQ16JaoJt2f7ybCbd6PgJU5rJXO5JheompXH5UhZ4Xt2GeSGlLasqWisZCvxun3AjM+F0T+f/FiqDsL3ljMOgdbQzj7hMzkqO0K1SMSwuO9em6MZnLh+P8TNi3oTzQhsoORcZghHbKIxVGYJg+MZmZvjU7U5Q9nZ3+bN//lf5b/21/w5tm/je7/42P/rd77J8eIrkmvyW8eJJKROAr7854y/+4pe5tevxKK23+naA4EFyj3cJ5yG4Bs2NNQoi4d0MjXMzAGQg+IBqJscBcY6h94h34JagA5IdpEDyK3JSRGfkHMmpI2uD9y2qjjxY/kEm4kIkFxKnLMbq1/VnxfgQMr2NbzayIXQAHwiNY1gP+OwRL7SlVFBdYh5am9ecEW1wPthcuzVKRGggCXlQwBUuAm9NkHImxgEkkDP40JA5o0lrgt8BiWR1aHZGSuQdkrW01LYKEVVHRln1SswJIZO0IcUBkZ4AKJHgQwF8oiVmikJWdEiEWbJ8iCQk1qh6UhxI0rJcOVb9iM9dXKGvPcVLDYLLlP51DIHPk7HwebrXn02xUKz3wr3DhluHkd0DTxdfYWTgyXIunj5a2vbemJBWqQBHFV+V5BQSp1hJYPkFWhT46BNPWX0jgoBahUFV9hVOvxzyOwfsq5KGRJcKYyIGA49JextnmbbavPVqfa1kMYyJh5sJhE7KvWUlro3KeZMyVhWCU8RJKcdjJHraKuEshkkzC3z9O1/lK9/8Mj/43d/kB9/9Lj/+6cf0OUPT4Jx5riKZ0Bi9b1JIyx53dsLO7p4pf8nmxTvLcvehxZEJLuBdC97yMJxzDPQEHEIPbs2Qgnmuzlliphc0W9Ic2uE0Ir4lp4C6NULEZyGngOV+LHHO07gGXCYSITcgM5xXRCOaDCEQ35CSo8HhmTPkpY2982iKxBTxjREW5WSGTdLBqghKpr4DNPU4/Fgh4N0c1CPSl/yEnf9/e3/2JMmSnPliPzUz94jIrOVsvaAb3WgMBndkrsi9V4QvfOByX0gR/sMUvpAU4RspwxEKIMMZ3sEAGEw3cHo5Wy0ZEe5mpsoHNffwiIzMyqyqc7rO6VDgdGVGeribm5ubbp9+6q2QNROCGyhmDWSYIjF1SF2DVYSA2JpaK1azpziS4w+KDW319K1jZ3TDQ7w41iwSI4SgVM1uaFn0EkOrBOkIaTUTEWlVJARfX9pKOANUFUaFjJ1VeBd5HH7iFhfDQpl+XxTr6TjPkShd5M3yrVbfTClhCWDG9Srw00+g75VxMG5ePOw0f3Rj4FZL44NL63+3wwt4yKIfFKU0bel+mFMJx6aA49yG3f9eWsOi6exhYhBcBVbrjidPn9Kvn7Ld7Xnx1Zfsdw4gu2vJnxI7eVki1DqNcRGhCLQoxsReuAjQS6M3DmHZ6K4piEP1wYRH8GuZlxoqmDVcw2REBCMkb9Nr6umDqkJpOffWrWGe/66LvPjmK/6f//f/K6++3LIvlX7d8au//hWf/eTP+OqL3/Obf/xH8jjS94laCjUbX++Ur1++5qc/WjuFcjCKFjQH1AQlgShD2BJw3nzHs7kXnm2PmWKlggZXTOEVboh5aCuUgVpX1FpQu0E1EmIhxA6RBJSG71g1hVtJwVjZikJH3QQsV4L530L0JlBqUDVSys6fDYJI8HRFcGXp/QHUI04mRAnkAhI651FgJMRELRCkoxaagnae/xidCthqAgKSlEClkkkx4i2kV5htUW5aj4HSsAQVs0CtjpEAr25QzU6cVRXM+wZoGHHeA8E5h716IkRFtPP10DmGolaPPKwDwEgpBZWAxtCiOvXeHMF9yuFPWR6S9z9XUriUi3L9Ycp9z/X9GVeubyQZHz9P9KlwcwPjC7D9B1pNcHrDh/JAmNxzWb4nclC6ugAKTJ3UDohnPG3v6VAno2n+t+K9AVSnBjSNolFAWhnd9abjz37yGX/+V/8aWV3xd//Lf+Qf/+N/ZveqNrzAg+6u/Xv8oodgpBSbcdJIc5qitgacDGmhjGxu6zTzB9hkEOBlgBNocsImLOMnAVivEtdP3Ks3cW6Em1cj260dTtpmddhnPv9vX6KqmCVS3/GXf/Ur/g//p/8jf/Gv/oL/+k//xP/t//J/5h/+839lu82kFDCFV4PyT1/s+NlPtnzy1Fnt9kPlm5stL28y+31gLIpJYZ0Cn3204kefPmXTKV2EqtoIdRIpRVKXWs39DpGCavRIQBkZqpfndSkhwbPkSiV2kWrmZEVhanzlipLQsR/3oAVT73MQ6YDgTYysop14WaMJfdpQEl5hELxhD7ZHTFu/iEhnCmFHrSNYaCkDN8TU8kz96f96gWoICVUlxNFJrEokyhXVFIkFEQclhrhBQiCFycBz/EEIUMs4G3gQnGK4GQuqxTEQFjGDQnZjoBkmaCVqj1RnFew6GHWklBGRDgsrbxZVne+h8VXdKxcj4Lb8EOfkVFldDJYPUQ67//OrxM8+TfRd5sULsFFZPXBZfqfGwNFiMt9AxdP+6Fl9u4C6eazbme9mfLzNIML5GxPa3aaUgLUSQVkU9bXmyKaUCrqHF3XA8q8pw2s++vFPuE6Bjz6+oo6FcXi7EI9xMH5CgNW6p195mZizKIJVx8CPY6bsMzpxAYTFSZznzoO3NpUcHtIix8aH0UX4yY+e8cmnTz1fDuzHPb+13zPsvFzy8FJHJ1nyUQLGk6sV/6v/6X/gf/O//h/5+EfXRBv42x9/zD/9138mbwtdEEI08gifv1T+7jc3PFlveXWT2Q3wejeyL4qaEUNsBkvlt18P/Nmrwl/+tOf500SMHVUinYDZjqxrkJYvrxU0YrEps+heeilbYuqpolgZcebm4v0FY0KYvGmhaGU/upKj9KTe6Pue1F9jXU+UTMRL7RTIFhEJrKSSopHLDhueMO521PyaLlRM92gZibEDDagYMVaiWksvNdR/M0xi6AkhUIsiKbUW28l7ZdRM1cGrMEJAxImRtFRiiBQRKqVFNQTEowlBEil5aCmlBERyLZh0hBjQOvqcoq3nQMKq+jPRCSSYSeuetVwz7gtZvYR0KMKot9+ri7xZ3oWr8TF9Cy4K+fslU9ToIU2r4HHP93A+o4vCL36y4kcfVW5egRWPkpMfdq7vvrRwUkSLPPfkAR/1jZ/8/cnrbX9XU69qc6KBlgZo7rE4crrUQ56+ncIJfsQBheChd/+u18wPpfJyu2f49e/4zX/7Ayr4BvvAGs077xejX0f+9b/5C37+iz/n6voKU0fB77YD+92e7e4lX37xB7783Ssy0pTz0tdXTGz++cBB0JAIM7bB7ylG4aNn13z2yXNK9VTC8MWWYeslZcsmdMtngARSEP7spz/iv/+3/4bPPn3KOL7mmz98zssvv6aOGaowbNXbNkcYcuXv/mVHn4RcHbj25LrjJ9eRJ5uO66seEeXLr7f88+c7/v6fbwgB/s0qcr3uiBIwBpCKaY+SnQPAmhdtGYuxGQSxgTJHLCQwbSj55KH+kLCwgZgQiUhVYlcwDWgGDQFb9/SrDV3qUPYNXJfI1Q2xFIQUE6qFLj0ha8U0EFIPotiwAh0dvCfehdIwLAZqAKGwiqkRKxlEn2ONhog/C+d+8JB813mXDGkdBSUI1RI1ZzRGxKRVx/QoHbnuMd0SU0LM6FLCMFQLNRjU6ORSFC+rbOWOgxV2WdESeLJZs+lWpFC8xFC0gWHdzK76ZkvgwkD4fuRDb150MTy+XVnO77usgSDCR08Tv/qzxJNU+GoryE6J2XggfvC7NQYmQ0B1AsaFA6DNZFFaOOm2k7uY8ANz29qAiM3fM4NizhhoTDUF6m2JpUULjs7q9fkhBJ483fCzP/8ZIfX8/l9+zc2LV+Tse7aP9fZL8ZANMZhz4n/y9IrrDrZf/44XX3/Dfjuy3Y/st5n9sOdmO5KHFvNoe/FkNEmAFKdQgXjpWmkGzbIDpEzjghi90U9A2G63fP7PX/H6VW2phTNgyJauSUn46U9/xJPVNV/87iv++b/9E3/z7/8Dv/nHz6lDY0Zs6YlND1ebju1QeP7xNX/xyYqPrjqeXbkh0AdHsUus/PyzJ8T4e/7TP+z4r5/v+PRZz7oHSR1me4w1grfRFa4Q7QghUkqHRMckEJ3bYbVaA4kQVoS0oVog55GYAjH1IFDVSyy7apRSoHU5lFCJATQroxlBKrk4QI8QWAchFucsqNXnJcdIjStCFGK8AoUybqn5JVDIxemDMaPvEkWFoM4QOOZMNV9jgLcVNiGXEaW2xSV+TlXMCqqpeRK0PH6PUhhyRkJPCoFajYrzF6hAwSg5o1YZ9t7BKKRCHwWkshsMDRGtlaEIqYdoicG2SBfIQwGDrEq9tTpuy4eqvP5Y4twikzPzcPDlQzkGzimNxyrqC3HQ90NO18RDOl+mqPz8xz1PNyOvvsm8/kaR7Oy9+sDn/UeJDPi/0yePXNDtOzIZD23ijs7XUgoCjbLXMQIWbitQEPpO+OUvf87/9n/+n9k8/4i/+f/8e/7m3/2/+foPrxpDoZ5c42Ei7X9328J/+Jv/QohQcmEYMqVOrZe0GRoBLGKt7NEjIdPDd/R+aOmPalPCY0lRfDByJEDXr1hvnvDq1Qs+/90XfPVNdmIaUc7NeWwhlCTCuN/zT//0D/z61/B3/7//xN/+x/+Fl68HsnrPCPUWd2h11sL1KvKrX/2cH3+0IumeVch0VKJC13VoVTZB+cufPOWLLyu//3LgN5/f8PGTns11TyBRNRJSYyikp+87ckisnvQ8uX5G32+8cVAnrFdXWHVCnqrKfhicbwDnCiglt/kykrhSNhNqHQgygPa83kVGqqcUzKmZNuvEXiuxAVpLKZj0EBKqoxP05EoXKrGvVFuRdcVAppOeREY1Mw5bUt8T8f4OsVZyGQnS2mJZQUKgS4lhGKhaqObgv1z9HgPTiwzRKiEGHCxZvUQTLzdUg0qCpHSpYip0XXKcizhWxSqoZsdWJDfiYuoZ917Z0HeZbOLdNgdzlqy3zBH8qVYhLDfwd0kTXOT7LqdK4tt/roLx7Drxr36a2CTlDy+FMHgTNaEFIR8gDzYG3udL/u4hxgPuwLetFkIXRwNMWPlDNgVCCqRg7PcHcB6t78F63fPLv/hz/u2//WvWVz2711/xL7/+R1692JIHB6Odu/PziOAG8psPEnKBr1/sF+MRkNYqyQ61EtPx1lIfE33woZ/D4txnQY0tFmJCrgOlDnzz4jW/++1rr3CQKR0zUQPjoQtzOt6AMZbC3//DP/DqxdcUzXz9zQu+frGjVCU0+mbv+mhsc2DIhR99smb99COuPv2Esn3FfviaqnuuGnLfRx75+Pqa/+4vgO4G7XvC6jlPP/q0RT42xFWg71YIHam7IgejS5GrzZrYdXRdo+UNiVIyNt6Qc+eAQtuBjez2lVEbF2FKUA0tFbU9IQqmV9zkQhalFicOUjNnBEwDpY5sc/BGP5YRIp6xH4gykgxMO1QiWXo0RSyOmMBYA6N1xOtnfPyjP+Mnv/gLxlL5L3/77xi3n5OCklJHtUInSiAhsSf1ARkritGn3mmvBaSVxZgJ6NTRMTFqQa2QooAFVmnd8AoF7/pgFKvkWqD2dDFhvaGm9F1AFMowOIBRInXwPhrbrGwHZy+8yMNkiRN4yJ52iit4iBHwvkB8F4Pj25HbWJHJ4Tr27t+EG7jrb/c/NyGmwM9+tObZuvDiD5lXv6ushgDVnciHyh+9tPC8TGVv/ttUZ386KSoHaN3sh4t5RUHDGmhrdZz61hp3Xw78AAZEb1+cJFL2ewYdqbtd65IXCMOyMv/+mT2MbzFOmcZ/pPInOAOeqV1+axFqPNQfTqiIN1yfRpXcShyJ/Pb3L9jtdaZ0lnk+FycWQyhedWDG7794wRdfvMTE5mZNwTwtQ2iOIzb3hzAtlDoS+o5V/wlRPkHHPWV/QxRl3YXmgfb8jz/f8Kv/AZJEfvRsw/Pra0J0Tzam0Pj5FTVhHdQZC5OzFapJq3jIXk1B8kcYK1qFXASTFZISKXj9fjKw0dDao3mk5JFRlbE4zd6wz4S4ouqImVILjJpJck0uRjClT0Y0I5hhktC4oqgRUkAks46BXiLDCKMGiqzZl8TNi8yz5x/z/LNf8vn2BUjF2S9Borv9QkE0OlOiGkEiJRZCXGEayHmP4h0uU0xoNaqakzRpoORKzjf0fY8ZaNEGAzX6FDEtaCmk4EaUSSX2gd4UI1BrAjp6Gdi9qA6ofQdj4E8tKvAQ8qFJLqmVH64cO8vnq88ekhJ6qMEmDaukVJ5sEp89E77+aseXnxtlgKSK1bZHy8Oq4f5oxsDyJZoVX/ttmsxDyaFDAAVay1f/NAXoe8+nCzjILggmga7r2Q+VV6+8JEtVPSy6zLHjYdZhP/Ivv/kN/+lv/7/ELvH3f/ef+eL3X1DG3DbsM8GfOy08aYp+SvxP45//eiTnArJzG6JH7B3SUidgWAUrQko91WymIjZOrFfD6/vFSxuvnniuevs6U0a3lBSDqUsi6u1uU6CLkS4Frtcdv/izT/jrv/hL/vxXf46GShcjPUIoBTFFqyvvFB0TUENCx0onSgyCqTf1UTNUK7QQeCR57n9fCGLsy45q0PeJECO1OqlSVsUskjWRUa8qDK1rZR2RLiL0lJypQagWUBFEqxP6DCMS3IBKssIIaFBUVk7GYwMhdOzVc/FjLt5+OVY2XeW6Fzceq1LqSDHhq6++4Ksvv/BeC70QVk8pFV4OI9EKNSmJSnRggK/LtEF1BCCQyKV4Q60UyaPjX1T3BBK0RklOzlQZx4y34zaiJFLsiCmgNnpkRBxnE0QJDAQiakIXzY0qjZQxk6uh0kpe71tv7wn49EOTN83FY42ly9x+P+TwXO9/vnf1prh1vgdwVphAjPDx0xUpRL74xnixVazzvjohCN3U1e8B8kFEBo5vfMrRH6ZVxEvz+iSsVuL5U1G6GOij0y+aeF44RNhsruk3V/z2d9/w6mbk6npNHkdvXcxxAMcMtvvC3//9P7Hf3pC6jj98+QVfffmamh/fvPVg0jzQwnvk+e+UKWUizpD3u89/R4gZHceWQjlEBdxm8FmQVpkQ1LsRfvrJE8Ynha++eE3N7pX3vbDuAutV4KOnGz56tuGjZ095+uSK58+e8slHH/GLn/2Mq1XkZr9DCnQSHAmveKg9rQCDkr3BkAYqSi6F1jWC1KATU2jLMIZSEYOx7jE17z6osRkZSjWw2FFM0AiI0w+jyjjs0OLGhoGnE2Kg5uw9DFTJzRDtYs9Y9xR1w0WTQexJUSiDEroOSVeoGcP4mk3a0CUlhg7VwE4zFjuu1j1VYyuDHKjjnjoqRSOjduzGDqVnXwpdzfRJkKju+dsK0UoSRatg1dtCjkNgKIaQWUcjBEM1e2WFWTMm6tx/o0sdORdMIqrFKaENcg30qUPH7AYPDu6c+SsIFC2cN1FPl9vxm/GnihX4NuWhTY0urIB/fPlO17+1nTsYP//Zc37+k48oFV7t9tRQyVTiKhA7sGLIh0xH7Av69gJ2oJ+Tx8j8uXtjKcFmJXTJPdIQ1GvY22MgRCR0pBi4fvKUEBM5Z2IU1ldrxmG8HdIDMC9OePFqR9HfklLgZjuQx9Zk6B5z4Fx0YDY2xFzptUjBwSt/d0t/6r5IU2TSPpywCBXhyxcD+/F35NHDxkwkTTLNczOyxLkbVp2wJrPWLT/+aMUvP/2YJLBZX7G+vqZbRXqBj6/XPLlec72+JsVAt9rQdT27l19x8/WOvkt0IVFDJNOjoafrnTK4FPVcPAY4Cn4cK2ZK6pOH0DURQ6A2OuUQAgGlqwJEEG/MJCqYFESrW74tHSQxoUWptVIMaivhax2kvOFQ7Cl1RMKKro9o3lJrJIQV1UZIaxIRK0apStaBkgdCvSYQHAgYFWPNPgu7PHp3QVFiSFippOBzahH2uVCqUWpF6RhVUIuMNTM0HIZIYD0OrGJwBs32TlSrjtdAiKEjEBAKatXnxtRZJmMgJKFkBxlK9OcdYyRIwKpzLDhTdutrEQKqSskjJo4XybWRVE5r7eKZfqtyH2Xxfd+567gPuQPiD1m+W0PYK40++aTnf/e//yv2X3/O73/zmkp1vViNzSp5LxTmGvo3yoONgXe92SNe7hbyP7j+BuZgvhitlfJZY+vzMHUMzYtVPYQ9zCB27XzCZn3NZrNhdbViNwyMQ6aLiRRT6y1//j7MoBS4uXHjobSuf4ca/Lu/e3QeWcQDZHopbUY03u9v3Wco2OIYPKQsizmV5uU3I2FiVxz2hSiw7pxoKEYhJqFLwrrzevpNLzzdJDb9iutNYrOCdd9zdXVNDIVVf0VaPyd2EdFMMOgkEQkOaDMhSnTWxJQgRSTAzfaGGhTpBM3Cqg/ez6Aahoe01WoDrgf3YulIXSJKYooOhRWIVVINjBNJU6moKrlkrCpWxBVtjIxlROvIftgTU8LEqFodR6F1JjAy8zK+iJciqim1eJQkZ2cORAUN5lUgpSJSvMcCHWM1tmVLDD2p0TBXVaQW1l3njIMoORvVOk9xIXRdARV6g5CckGlnwliUUaHLwrp4d8VV5ytusw6kkOhD5ymXKGCJQASp1FIgGojQxxUAfb8Cqez3W9Q6UuzbujBUvYGSmXct7PqOUStjKW/kGLjLA7pEBd6PfCjG16UM8eFyQIOdxp0f+P1Hz7HryJ/9+Dk/2US+/LrwVS1crRL0ClZ4+mSFhszNS2P76gOJDMxe/yIJPuXEBc97TPgGaWA2E9+AuxiJsTUuDu7FpnCYbMWwMgJKSl5/HQRnKSyVMla6dYdq9ZrxM1vWtLmJhOYRTSC16TrH37rvwR36DiySBa1ZkLlNc48c8vrI4VhdGALS/iDmZYbTvUrw0HGMQoqeTtmshKsueC47BGKK9F10FsQ+kkIkWuSqg00fia0OQ2IlhOQetyhWC6GMJIuoVap0oJF1CHSrNSn1GO6tqwm5GEPdM5aCyp7YOjUJcZ5Xbbz77s2C4aC6OhbyuCWGSJTgBkjx7pSlMfwFc6uqaPZa+6KU4rOUh4IzTnovgZK9EiRKZCgNNKgOxiu1Ar6+ajTvAVE8f+52gKP7qwrUiKgiYU9h9LRKNUptXRnxjo5a3WDtxKmWS65UDdQaGia2kqSyjsZV530LSgG0Z6zGvhpZArX4OV6NW667QKpKHzKymtJiAaGSQkDCipICSgbzagNfMooEpUtGrcWrRXKii4EiSuz7ecN3hkRvfjRVf9xenZc0wPdBLsr7jynfzdwbRorG7puv+Xf/jy+wUnnxyreY/kr42S9/zub6ObvxC2p6SbHhQed9b8bAYRHeLmo8rM+JLc8VmgRny5MgEGQOcZm5Eo3JedqDQAxCSkInASS5J4R5HjkkUkoYBQk9pYy8fvWaWmHdJW8f6yPh1PueC/sa/6rzzB/Ql2cJes7l8vDIRpCJl545OhCJZPMQ+emCacH+44jCPE8wcQ0F/OcUoUvGOgVWfWDTCzFVYgddH+ljZJU6Nl3gKklDlgspeUvgGDsMJbWSuRgM0+qkTDiS3ImPBpAVWgRCZtjvGa0Q+g2blNgPBSTTdYZIoU9riipFR5BIUfdENY/U4gQ+gtPpOrFOox0uU2PlPYFAVaULHm5PtSdGgdg1joSKYZTqqPvUCHUsVmo18rhFJFKKgERCqGh1T9+opORkPd422bn9zdQbOiVPOVWDriaseutks4DWimnG6QRHb5Gc3fDINqDqEawYkvcxIJBiYFR17EAFNNB1gT4E1p3RizLmlqrRzCZ5tYdh1InAJj3htSoMmRSEPlf6BE/6SJ9GBi10Mc3PrusS6n2bvLRWKwL0KWFmrPoO1UwkUFrkS1UppTCqMGabIwNn1/17SHFd5NuT92kIXIyKx8jjogHvWlofxLjuA6EUXr40iELoIl0QPv74ml/+7Bc8vfqE3fARf1h/zj/sPn/QeR+eJrjXrXX0ogMbmNLkHEInxxGCqQQutPz1xMI2NeMJ4sh4MW25TQdqhfZlw8CcDjaFRAje5CYkwaggge1uoKpzt3szl/OIzdNOYl6H37r+eSdetOVRXUmen4cQjNRF75rYyA5M3IOPEhExxtFmQhdrQILYIiNBhJiga42T+k7oukAUP1+fEps+crUS1j2sUk/fJYIYZgWCEqIzzKUAQQ2rlRQSiofVhYhWV4ASjZAMZEWNiSG3rV4qKERTb/krmZw9H67Bu/8JHVQj50pK0KVE7pVshbHsEemdPz8I7qFmjD0xBGICRKla0KqoRKK4lyzJDYbUdR65SCu6TolpRS3qofwKybscocEwq9SaQbw00UGG3mMAIIWIoOiQm5EZKTgTYBmVGL3enxAhBkq9geCRjFozWCUlf3Ylu8JXqw34CLSukKUOrTQyUmtjUgzgDYz8nZAU6VKPWmZfIjdDA1hKT5LApts6aVAAMWFXlVzBLHKzL4QcWHWB3VBZdwGtSpI91+tAF4VaCl1yzA0qiERW3ZpSfG6q7pyES7zKwkwQ8zRWHgpj8T4eJrfX+SUqcJGLvKs0B9YeZzwsv27gad5esKpkAdSjxIbx+tUN/+Fv/oY6Kqurjp/+4if8m//+3z7o9I+IDBxCijApzknRH2yjeR+ZitqbCK5op+97RZWXwQl2gLkHmf+mxSCFVjvvNe8ScQS0MofGG26OGI2uixgwDNlhajmzH0fkTC70LvBOTLC6SsQgjEOhlIpqQMXr7W/PipFC4OnzawfCaWlMgkaUyLpbA3Bzc8P4ej9djJSEp5s115ue9Sqx2kT6qHQp0KdI33VQlBgCXRdZ90aiIhXPFddMKV4rrig5V4K6MTUWZxy8qSP9yolsggVqLaQYSbFDNULsGVXJtVCLEhJ0KVDUQX6CekvikEmrdVMklVErOQsyVELc06UBDZ67D1LoY4+sOmIQQvBWxilBTIJaIImD16q5oRSld7xI8vx410XXzxJIdaQLPWMBrdUXvqnTLRukGKkmpBgpVolBvO5eA5iCZug7ShnJOSDSURsvSM1KrUYI2sCKPSoVkYTVjiBKqd4FUKJQjcZ86OkfMSMFT6mUWqlaKdYaCkVXomaVECMhKDUbXfDmQsUKFiNjjiT8PEKgF9oYW9fBzt+Zal5gO5ow7hUkYqZstbBJgT4YfSxewhkgRafb1lZvbDq6ER47Ugg4tbS2jodGLkZRmGt3OUT1lviZi2Hwx5NzHvvc0+UNwMK7vn+R9yFvhxl48HNpeicKPH/W8/QaGEcm2r3cGOrqYKyDcvVszfNPP+Jf/eu/hvqw8TweQLjQqcee9XFe3eTAAehdBM0VqXokwEuiaNjBaUK8NjJEr580M2ptYdxQGxmKt3ENIbV2xUJMia7v6bqO1XrFbjcyjl56tb3Ze0vaeej3gKPMr2+mjPuMVc8Z69QwwJZQvuPzxCh8+unHPHv+zLvR1T2Cl3r1XWS9SnThM/avX0AthJDou8DHT5/w/Mk161Xn3n0ojhdQoWbQUlozGgWGhoVQtO49zFv8PiUGVn1HGWHIhSo9GiMaBI0rogjDOJBSh4bEvgZQwXRkyBlTN3hCTeSxEFNLH1hwBHtUQg2MdUS1EGpHSj0QiT3EmomxcyUqRqFQVYix95B/jE6Na+pe+4QTqdU92fYyadlTWZNi9eMaGLbU7BUAi6jKsBso1ZvsTJUfMbrBmaJjT6xC30X67poxB2AEArt9hqiYJqBg5uA5LUIp3j3QFLpuRa3Qdc5KmIuSokepxlwIIi014g1+UhBK8XI9KR6lkhAQVUxHT2lhrFeJIIFSFVHPATqrpLCvhUAPElmtvOwjxohVRShISOxrdWNXhd2g7AqsoveC6IKQQiVGJYk3lEoY0QJitY3ZI3OO2fH0wVBotMgHzMv0Xlzk8fIuXQwv8kOUt48ITF/vAvzyxx/zk896/uXX/0I18QquFPnkk09AKh999JSr6w3r2LP96jVf/e7LB13mwcZAgJkRb6LLvUUW1Ch2aah6mwwHc6W43Fys4QZmwyLInGv3/HtAooc/nGfAw+dBvAFPionYJWLq6PuOVb+i7zes+mtKObTmq8W8ZnuaTe73blTNFXGZjpeDAuL2iz2dMQRH7X90FUnifQZS8DC+MNKlxCoF4pOPkZrpu55VF9mkxHq9JiaPaBAcHV5rIKvXiocA41ApJlQNmCgm0UvgrFDz4PUZGqhAkQ6NgbEWkMhurFj1tsBVw9z1UcRD76UaEj00XsoAeCSCpKTomf6OFfv96LS9ooQUMG1c+TUiXU8IPTEmAoGu74h95yA8U4J0FDPKOM7rJXSJftUhBEoNpOjebgwCqlQaHa8EinokRCRRaySrR2xKLUjqkUhLqQRv5KPV2RPHShSvu9dsRISslRQEK1DxygFVV7hRFFoJX98FSqnEGCi1IiEQxRCFFFcE6zAqEhLDsCO1NRySG79OkGWkPhAD1Oq9ALA147hrKRmjx+hCpNSImqdOHFtRSWKoRoKBVq8OmQzUYgULgaqRooFtsdYISdFSWodKpY9w1RU+e7pi0yesKBJaMM6MoiP7DPt8KNS5a8u6KLYPU94Xs91F3kYmbf0wpr+lPPS5GK4vexFefv4HwuuO3a6QTZBnEbHC/vWOPA5sv35JrZX9LlOKshse1pzg4WkCm4BFzB5yQwEshsvk6rsabZGTSWEucQM6f4u5bl7V8+OqkKvRCwSJDiSMQgwTD0GHSESCh5JTCsTUOYhQzWurY8BoVQRHrIPnwX/Hm9y5ByQzHkIWEQK/H9/8b775mm3MPLlacb3Z0CdmPn8zg1pZxchm07Hqo6PBtVKHgZrBS+ymsDSUIqgWVAslG0M1b8MsSp88bG6hYBGGsaIFhlIoKl4jT/XzZQ9n11yIuXj7W/F6x9o8zGjeAVLJxLhC8fbC0CHJGfxScvwDGBYSKm7g9CkhwXECMfSukFPn99Nq/PfDnjFXkuDeakr0q54UI1rEQ9qhEsOKvltjtaCm5CG3qIgvlKrKLhdKLa54JRJTpFtFoFJLBq1Yqc0+FYZxz7AfnaSoZkoevC0xEcUNg1ohECBUYhR09FTExG4paOMSAGmgkL6PeNmr0YU1ZoWpEVQMEK0w0WMblb1lirrR5VgKAx1ZpY7A6GyRRdBAAy8a2qooMKFr+JdclVyNXCvVlFyMqgFVj1wUNcbRP1f1xfrJdTPwyKyTkAIYjXjJvKvhfpxnevniXuQt5WI4/SnJ7TT0e5N22i4YH10nrlcwDoNj2QLsc2aohe3NnpgiEhQxKDXSSaTv3zMdsW9xh0B709/zYBs0Yk4JTPz/8/QIh5yWTFwCNMDeAXQgAS/zmyhaWzTCoWhe204IxK5vN1kQaaHg3rnYRYTYwgynwMGH1EifiwKc3vMhzuH3kEfjD79/TdkNfPrJNfpR4fpqRZfESV+A1PV0KbnBU41RDa2KWCZ2HSEIglJ1ZBzB6Mh5pOpAzgGlpzZOhZydna9qoVRlHKrXxZtTLCvqoEcrjrdQw8QZGaKJp5FMkNi74mqUwzFtkOAKSFVIKdAaOGAqSIqELiIEurQidoHUR7xiIbHuN17X34gVUkotv+9gxi4F+uCRndQlxzjEppxESTF69KHvsJIZJZHzQK2FWo0CWPToQZBKjD1dd0VaiadMLFJ1pKinmNSKp51SaEBDbakKI4WEIcSuI1NQHeii8xvUIJ4SSCtCdA/dtJKDp26gkjonAApU+uQlgVPTrCiRREGCkLNzIsTqhqpSiYjjNwSEitaRUoSiofWR8LWvpSLiZZ1DMcZs3OyNfVaGbOQCuRi5wpidL6OqkNUaFbVLrsIv9oEfPVuDFQoFU8ej5LGy3cMuN1yOKGYcW7wX+SDkbahrL/Jty1uG/3kYZmAyM55eRT79JLFJmSAdqyIMWVupWSSqIApSA3lQQinuZD7wPX5418LpfqVBFlok/hBAPwxdFgpz+cPElie0/gKt/FrC4VjHDfjtN4gAFRwZL4FaIzE42C0THTg3Fiz7RWPXMQwDXZcIMpwNe95GORzLkXHQ7BG3dg7W36mJUU14vTf2OfNi94Ivv37Js+sVz65XXF9t2Gx6NlFQMQZVqhlj8XOuO2HTQZIOMaPveqKsyKVQLKG5HkrBKFStSFxRTBlKwzVIpJoDKxE3gsSaIRKszYN6GDlMVRq9P0jzjnYp9agmJBoxTg8lE0KE4EpbOyGFyLpPhOQMhKELpBhZRTd2qkG10qIPQuoghp5gnmsXAhK9Hl7V10WM3nAzRiccgkhIHYnqAJKSvcmOGRCJQekSxLRx89g89+7sikqMBuL3WTPEvkMo5FJY07tlLYNf1wyRDBQHHGJ0KREEUqwoAa0eTeksornRZQdBJJHHEa2t9FC9S1TXVVLIQKK2UFrXyjprUKJGj3SZl1iOJTBWJ2Yq2ajFyKae3tDCOBr7ArsRtoO6EaCOGajNqNb2ojrkZEGlLbAvbjwWdQ6HWuNs1m8H4+utsstHIb9Lzvt7Ihc64j+2HMWJ3/1sp6y2Al00nvUR3Y6MVLouIgVk63TtIjjCONMcPoj4pmDvu1HRnP7HN55g0+/WCgGad48cWga2X5c/TKx8QcQdzsaWF3CynxCFIEpVqApjNmISNwRUEduTSvRSquh59i5FxjDy6uULPvr4I7oUefbsCb///c0Rterp/RwN70Sml0qpc5Rj+sZMAczhwZk5l8FYhPLauLlRvvx6x/Vm4On1DR897Rifb3h2vaFfb5CQyLkQohE10FdlryPrDggrVDIpJjpb+TUaUY5iiESGUnyO6jSmRJBIldLy/0YgNR4HWnXG6KWM0VMXXYqUWlA1Ylh5uD1EnPw2IbEioRIihBiQ2NP1a9ZdIqZI6hOp6+i6RAzOErnqXPll9R4BwUtDmKmPQ0RJmDSeAYHUBQiRyGoOs2ORXMdG2CQESXQpQRkIksBGrFSq7RGJUIsrLYFa1KMcEUouiEWkFhIwVCGESNf3lFE8704lJvMogO2ATJSARSOGgllPUUNtR4wBw7zCpBpKcEOnxc6CKrWMUwEAu5IZNWDRyw6xSK2tNNGUkmHMmf2g7MbAbvL0szEUGAtktZYWgGzOJtl4pVscImDia5D5NdQ5kudYXTf45gqNsKZWoVK5GQe+3noayqNfUwLwolgucpGHye22xQ+RhzT8igIfd4HrIRO3zq1j0aAqfXVdWqfUvIJFj6x7N17z8sMHyMMxA80ScK/f8Oa14kCnFvb3m2sHix288um7TKmCw83LIR08h+Fjaz6k6nnfOhohueGgBlJtyh54qZspRZWgcPP6hs1mw6cfP+WL5y/56qtdy9lP417ez0m8oCn5CVglOOJ6Hl/jQ5jGTrv+4SEa0ualmFGLsHtlvLjJfP0y8/VXOz562vPkyYbN1YquS6w3Hau0ZjsWkMJ2PHQI7KQB44JSJdN1gaw4q13JDPuxeagFiStq6y9sZg5wEyEkcy4CVV+uFmbvHBvoYkBShxYH10nIJDYQBZENIdlcsikSCOZxJwkObBQdnBVQ/e5rLc4lkPB0kAVq9VI9z5MXJDpqPRrEuAYiIhHTQC3a1kulWqBSsKrUUqGCViVIoeqAF95pM3xGX48SyXlsuXYHY1oZERtbO6QGRg0RKwVLlRAVzCMHUkGCe801d5h6OaC0Cj9hRKNAFcaqINmflwMDPCXSJQY1xlK5GZSxRsQM04FaYbf3/HzJ5m2PizEUY6hKNjzk38oBlUAxndegSWicHo2Z0xqm4GQtypGx4H/rYnTOC9wQEfzZvNopL/dGmY2Ai/wpydv0R7jIUgSIb/3tN1XrbICPq3CddW5R79w7HBwg8/R6beleT+46sL3aewYQzrQBU4hgjusfexBHt9VwAdNfpij7pHBROYDDxM0LCNTipVYxiDeGCf794GUFHga1KVUSiCm59xpcCxUtXG82/NVf/YJ+83u++eaGPFa3qObR+LklBPo+0HVejrl7NVIy0Nr1ItJY2WhTfHuz9E3Y/2Lz9stMYrRXYXxtvN4af3g1sukz11fw5Drx8bMr8rOndCm2fLKCKaso9EmIVijj6LXwSUCVYEZix1Xn4fVsTgikLQSs2u5OBKnWUOcg0oF5WWeIQowFK5EUeip7go1QWyMgCiFuCJ3zEnShEfTE5HiOsYJ4F0FV9QqKkMh1xBtLdUhwgiMzB+il5LTDMVQiEbHUsCMVqvrz067ZaSMgSCmUMjJmw7RgBdA9xpYiAbPe+yRYRmIgW3aOCcXvQQyjuoGm5h0BVahjgbInSHFPWZRoMJKcMdAEre35RkGigXpqI2tlyA2oGYxaR8Z9JWdr5YeFfa4MozKOUGtBq/dlGDIMxUP+WUHNKFjjL/B1qdN7I8KSkNrfw+OG1NpeqKUh4F8Nvv7m1y/4mrDYjolerJpHXu+MV8OE+LkABb6P8tAGR485/iJvI6fvj/AQXMA5URxE/0lIPK1GhzTl7tGB2gD3EY74dsCVo+D7yxlS4LPyCMyAl9gJjmifIIMypQimLeo037HM2csCNIjRaObnDnrWDqjOrkJKAUl+LQlhvtkorVZdjBCNkLziIIg67W7w0POPfvIRzz59zs12YL/bo8XBharQdd4Uh2h8/PFHPH/+jNfbLf/+//U3/PbXX3uUIAgSQfPUW6ClRB46ae2e/aGIU74Oxs1gfLNVVi+UL78sPL/e8fRZx+aqY92vHBkfe8wyYcyoBnKu1HGP1kZCFIzrfkWUSJXIrpEq5REsV7BdC9N3xNCTgUomxOoleGJYdSa6rILZiFlHYoOFoVmWe4IJfZBWslYQeiR2BOkIUgkhEiy6AdFK6UIQUghuK4tz6VuEmgfqkJz8qD27Di+Dk66DUlvu22hhAI8K5BHNI0TDQitSNfVUQ6iNPCdRtTDmQhmLMz9GRVDyuMeqIdJ5xUR2ZkFhJFHpgpfujUMjIEpOzhQE53NAKFopWdmPIy+3e8axkEfvmbAfM7u90wtPXv1YHNw3pammtuKlpb9aL0UmwO0Bi9JAuOc2jkU+f/lunX/vDr9bC+l5ZYNzRxAjtW7ZjpVvdm6YHOIIP0w5997+kO93krtaIb/p+IvB8LZyPmXw0I6SrUUOAfg4BT4VWNUWB7UWrV6cvpqnEryE36vytKnkekIidp88imfgWBZa8SipPoUl28dNGcbGD+AYA21f9xNMHQJDkEY57KcuChRXGoJhMRJmmLM6oK4qmjNKBzJi9ISwclreCs/XT3h+/YyYhNWqo+9XdKlns1kjnSBJuF5fc9U/5ZtXr/n1f/k1v/vN1+49BiPOUYHz8pAQ27wImiWhGGMVcjWGwfjmZmD9TeZqA8+vE0+frXj+9Cl97zX9VBjHEWNkvY4E6dukQVEn55Fa6EWIKXpPem1hpNYoKag0b9ExAkgiVyfHFyuuuC2Sy55uYiuUAiFS69gmoPc20WwgZiR1RDZ0rZwlBAGpnueXSNFKDF56WMwaF0JlHDIxrql1h2rE1OjaCi4FT21Ep6PWWtFaqToSRVh11w0kKQQdUZxyuOYyr74UAgmvNlBVZ7I0JaSKZXW8S/CIklUhERnHkf2YGcc9AWOomTIUxtEJrMahsh2N1ztlO3pYvxRX/kMxRnNLvZpHkmaDt70LU15LOXjrQmhpN+8g8WAj840HnlgNbQ2qOXOimVFCpppwsxde7Xzc90UFjqITDx3nBygq3AY4X+Qi7yjHb294p7U1pcCfxsBnQVirl1Fri8H3BkkiGaXgvVqKuXPtVoSgKBY9YvBQzrCHRwZsqnG/feal/vP86eLzdriaLiwUaUC4Q9tdm8KkTdmH4B34DDAViukcFpn4/xOGVXW2u+K5XUXIdfC0hr5ivdqwfnLF9fMnBAs8WXVcba5Y9SsseT14Z8B+y3jzgrzfzyFaU6M2joBTboGHzNfcdEkWXp9MJp1ndkZz0OFQ4NVOefFq5MnXhadP9lw/6elXkT4luk7oYySGzgl5qjKIAMGZ+QRW4llx6ztn0Ys9xZSSM6ahzW2GkJzYpjEbijr9cQg9IorEipYOsQ4aVa3W5EC/uKVaZi3Xjo6wgcTac/5Mxk5jOwweGo/DCJ2TC02idYfWwD7v6GJkHAdvJKSBYIloHRIUk+olcnHFqmt57y5RtYOS0IaBiCmSiyBdRMOIWKbkjFYh4BUSUzSglpFaMqY7L6u72bHfjmyHHTe7yjA0zz57OH8cPbe/rzCqunfP5O1Lwy4cTHVpnn4j7vbPG27FUyBuGB4nlRZG9PHbxVx909ZNe3u4vSJvfzZ9wyM3gaKeutFivLqpfPlCudkvowjL0XDr88lQP2Eb/97INO7vczJkWeVxrr8KHDslDwGpXeTdpRXmcii+nxzXSUfe/cb4LnHYD9ZifCqBJ8Ud5wGbowWi4CwybQ8KraAqQsGQDuIqENfe4M/qw8b/Fl0L54z4UXpg3tImQ2De+JhrlqfKA5mioMbh9mXqlucn0EaYkpI4KQxNwQYjiZAUCpkiQogO4loFcX7+PqIa6NKaKkIoPU8UqJXdsKdb9dRdpZZK30cGMYzIy5df8erV9nB/Nv3Uqscf8B49NBR0ujAq7lnmUbjJ8M22svn6hvUmcL2JPH/a8/GTFTlWEh4qGrJbgCKVLjjwLnaCaMFqJqaekls3RYxqAzG11sc9BBK1uqKx2KGyQ6RHtSdQCAoli5fvmRAtUfJI6oUQO6oK+30lWiWmiop/LkWpYU+W7Kh1cT4BIXlDp5DAEqON1KwknJtA0ohJ7yBRa+FsVsSu0MfAWjpS6CFUjNFpqGPAams9HDMBpxUec6aMI8N+x7jfo2aMuy3b3Q2vt3u2+8I4Vrbbym5X2Y+wy8p+9JB5rTA2Y1CtKX+Z1sPRE2f5TviTbWu4pdYaQGZeS9NLYPNXJldV5pVxdA1xoCDzxr947xrTpi0M7ePRtegbyjgUvnixYyiQR/h6q/y3L5XX2VMThxHes35/ILrk+2jITLJkRH2scj9nGFzSAe9H5Ch+PofN2/x6ufT81xM9YdMrLUYEPonCc4zUnKuKA4olQImQkxCSUAOQhLQK1IgX20ewKFgyZzjPDxv/o42Bw9K7nROZ/3owiA6Uw+1mD/ffvOXpKwV04W54bt+w0ajifxOYwYRYcLxABFHvhmchNLS1UxV3q5712nsWVK0giZpHdjcvCXijFq0dFpQxCy9evWY3eoMjFQ8nmzFjJOTsPZ+ZIzv/or7xpWtplGLmrXer8mo0Xm6N7c7YbTPPrwNPrrwbnYp77oGmbJM54U4evV5fMjF6YLrvV2hdezhfDLGMSOR1ya0s0VMFmg2TAeuELEbUDaVG+ujz0sUEauRhS5CIUBljIeH17qlbUVUbK+QKtamFY0eMrapBC0E6kiRCFLCBlBKhegy3MlJrInUrohnrFJ0GmAxVyOOeajuk9QEoQ2XYDYzja/b7G/a7ke2rPa9f3XCzv2EYKrt9ZbdXbvaVfW7o/QK5QDEoeElfbQr3ONA+Gb5w5/Of1vfJ3nxclnp4zgJzy+L5GnY4/yIQwOGo6WQPze1PL2IgZ/iXbwqvt16SWhRe7ODrracI3qRSfkjq4q7ox/dJjgjRpsjjGeV+H2jwYgS8P1kUm+OKfzIMTv9txy/nXoRoHikWlGdReBqEqEqNoEFctwXFVoL0gRwhdG4kSAxOZBaik+2pEgmkYF7p9cDH/Chj4Cj0Dcz1gLPTLG2/krYNuU99bqeZt9umcKdd1ObjJxY2/8zRkm41xYQ3YQmQWsmhs/e5sjEzNusVT65XrFY9Ka6ADsOZ3Hb7HSl67r3WhKDkIGSUqg2tbWHhtOlhd36kZ/TYF87tIb9eadcvozfFebWFrzaBZxt4epW5uupZ9YlVimgMbPeFEDpqdWKcUCrrtTcpCljr/+DtdasKeRhJ0dkCpa6IsSPGigUhW0SJxGQIBSNgoffHFCKYe+cmQhX30BM+9k0XmVpRq/n5zBJaGgmStoCYFfpgOA8/XlpIZb3u0dCs4jFTbUu2gWFf2G2V3fgNpQ6MQ2bYDdy82vLqJnOzzdzsM6/3hXFQxlEYSmVs3fiyGrU28F4j6/ERO9GQmRyw++1RBzv8ci8Oxw4/KMwdFY8sYzwUOK3rKea0PMnkoQsHQ8K4fW2bXYk3DciBtzuFz18Wvo4BK17CuCvGUMMiOOkjPHebPxS18UO5j4t8aLJ8FyfFv1xtt1fezGVj3mwtReNZF/hsJVwF74SbukAW7x0TCU7Ql2yOVmoLu6sKtSoT/TlBvRoqCNv8nksL5+Dkwns5igTI8lg5iXUewytmcCHWoFPtKDn8PYiT5YRWSxnFLaAorotSFNYpIF1AYiRK8lrq1NGtE6mPpFUg9ULXCzEZVQckJO9frwkjoHhdfBCHeVu143DPlKehlUFKOFRIfBu5uIYDsFYv6rllJSOUsbLNxqsbYdMVrteFZ08i15vIk7WT/4gVn//o480108eAaCGoswdigol39nsavZNgDJk+QdUIoUdLoKgj6qNkEkKyxKbTpl8SapUUgxP+KKTk7XpXcYNIJNfq3RDziHSGxuRVIQFMM30qSDWqdYzjwFgzUSplACzwOg/k3Y5huOFmGHjxzZaXrzM32z1DrtwMlf1e2e0LY4FddeKcrK3utv2nPqUN+zDN88JIncL/YQrpTypRj9T49LX5mbewl+v1Q7psboslzOWl0+8T6dbhlMsXx70KmdJS3GUITC9cOH7NlscwGePifSkwXmVjaIyPJsY4nbsZFnfE+e6U75ti/b6N96EyOWl3tTd+n9d53+f8YcmkdBuT3htkms8uwJM+8Ol14GqlrHpX8ipQxDCBkJLTh4NHg9WrBiQ4q21F2u+QTBh2znOTgZevHzb6RxsDs9VziG62AIEudg89ihic8hDYHO5sXdJOo++C098G5xFw/eEWT9cFxxFECDHNBDIpJkLq6bvEpu9JqSeFNYIQLLOKK8C572PsSeJ59ypGlzxcPORCKXq0eS9bKoXgTE9LY8iWY25/enfmtuMt3j3nCBKdga7ArsCrvfHNtvBkVXlyVbi+Dqw3zUuPiT51VA3sa6HmQiDRVyHGSojGJgpdaPz9JsSwYpRANkhBCTFw1fWspbKKe7pQ6MPakeeTkVTdwx+0kAP0fXRL1iK1GpWChIhJgZJREQYMq5mtjeRhz36f2Y17yrhn3O8Zh0LOxs125GZX2W4Lu6FyM6oD+rKR1SjmIW5VaxayoK0PhEceDpEpmfm0TwxYW/jnC4Xuxxw91KNlOinoOWuFHQUCheWmeS5dNOFnjkO8h7EtLZXDuG3i8T497cmSO33nEsq6S2xWXqkxDJVSptV926Swk3+Paccv3IQX+WHKu3ExePO3uwyBI6I6cf6ViPE0CD/q4VmopN7QlXfNzdlQK6Quso5GtepU72Ojb0/qhHwS2W2VVzuby5aH0aiN9l71YffxCNKhsNSB/tnxryd3fnzMIcJuR4fMEVMO256XXBlBwaQiIoTeaynF48kIgVoKKkJB0c7opyS/OMd9COrd84JzvqeYEHOEpQRvXoNOOmDaGVvORwKtGG6WEMQZ6WyKHuhRimTK+rwXH8QmVGqrp5+izeLeqlpgMBgH2I7KNzvl6ka4Wheu1omrtXG9VvrkyqxaRIIwZmNDoENY9SuiVtAM7BFdoepUllBZpURPIdYRZaCGyDh6pztjT9d1YInYd4y5ECOIVecTqN7oB0bEBLNIzntUK8MwMO537PeF7ZB5dVPYjUoeKmPW1pBHGIoyVqNYIJvnxyaeCkMcPHfEN214xcCRfw5II6larL0T3Tt7/vga00m5n6QHjvXvTPh7b/PSo6oS8WcoIv5OzeM5pOBgau8tt96Z+Vdxr+BW3dDChpjGF3CD+elV4PmVRwW+UeVmZ/P1zsnppxcD4MOVhwCXH4phujASujwcDA4Hg/ohTIS+Rwm+D8QAoTq/TjKn3i/tmD45IZ5YJcXIMBrDKIzZq9C2g4JVT/mVRlgmHk0OJoc95wHyuEZFRx7BgSJ1Sh+cu6Ytf7hjUEe+djuZiDk6HShqUBsLnBlRIY+FFKY8QvC6SlPKODBGD6t0KSIpEJKT7wTaeVGwSM61hXLFSY6AJE7V25jmF/d6wEp4E99JCU2LYMJRHOIF7/ISeTmZIhaaV9s+b0aZG1I+hmrCmIV9gVdbZdVlNqvCk3Vg08PVlZfjpbDyOcPzUfuhEAMgHVoK27FSuwgROq2EWjAdvGSl9S/ICkQhxMhYMmKVUgq5kfSIFvI4tJbFyjhWxlEZRyNnZbuv7Iuz8+0bDW+pXjqqdSrVm9o+ey7Ng9xT/n6xXprCXCrECX9iC4PgEI0PB++/Ndw6re5fPjEn2lpc9PQA4JQhEDl26sO0vmAupZUwXfmAVVi+BdOycb6Q84p6Xmv3vFfzwA1ShI+fR3701Jsdvb6pnjY5MpmOT3Q0F0tj6b7LXeSDlLuIhx7DXPinYBScK9W8/76XL+DD5sdsetMiQYwUhX4VUKnkClIC+9GoWTy12yqcbnbGbjT2o5KrJxTVDlHLqRtwlMX+qOf3j3PyCACh3PrxOEQ++cXuzT74XHDEXeAAukPo1LvaQc1GFEGSGwxRgpMUxYCKMzOVCl2XUKJ3c6tCrQEjOjUuSi2GSSCbtw4W83aQaoCpN/aRmQduFmvhFidFOpRITpu/zuM+vreHh52Od3Wb/k8g3IFcEwtuMLQNv5iD4oZqbAfj5U1h0wtPtpWrq8LVqiCrzjsSBsFKa3RVKt6SGKSMRDPnG9AIXYdKYDfesFejSz21ljkXvhtG9+ZHpWSljJVhrAxZycUYsndnzNnL9IZGkFEbYr96FyUmY8u94raOZqO2hfyX4NXFSjry1gVOl/8Br3fbi54MhQnQOvFknM74HCa36aocvtw8eLGpY6dNHzOxdlq7hqdXpvvw8Xh3zuPYwvx3k1uYgWnsZ4v9TycEMH/chKpIEfKo7Ad/Hoc7P3+a6W8/fDVwkUlOFeIkj/OUHy+PNTruimC8i/HydtivZfXAm88rk2OCkkR4uoY+KvvRePkShlfKNhu7sWAKqaU+C15VF0JzKhqg3nWEl+KLHqqhfPt4OMH4w9MEnHoKYfnLyY9LWOBhd5KTbWcJpprIFCbvyW+mNZYRCK0RjH8PJHpfAWnczBIiMUXias16fc319YqrdceqvyKmgISREIQ+9RAj2jZbMafrLVpYb57w0WfXfPNqIFRamiAe3c8EoAzRIDhXtBaQ2nyrBy/AxVzMP9b2Q1OG5kBClTmXMc/pPB+zcppeBJ9lb3gT2Bd4vVfWr42rtfJkVbjeCOu1d34EbxAUo2Ey0ofEmAdyy2nd7BOlGLlkch6oVSlVKRVKgSErYzHG0ZH6RYWx5fNVW1WJ0YwHnYd8CE4fMBlMzX4m8KQs7hc7moKjtRO8P8JhHg7ervNcWAuXH4yLGQg7rTURN+oWT2ey+CYFf/CNb5MFAQsyocUNztmkZioKyFzu4/wAHvlpKSdbbBx3ef1nDJ6lTBU5E8GVmVGLMnxTefXSeFVgu2vH3IoH3LrUyV1e5EORxyi7c9iU+75/l0HwbcmHRob0OEPixDt+w1fNJrI9j9h1AYJCVuHLrfFa64JkzLVC3wnXV4GYAl3sEDyqHfsVVSt5N5JLYRyVMkIt094beO90xD6k6cQLrQyzhzVfc95ol77FbStuZhtsNy4cNnlTDl74fCkHKYYQ5k09hEhKHanr2Fyv2Kyvudo8ZbPp6FIghkAIBlYxi67gJXvHvBCw6OVvqVvx9Jnwr//6r7jZVn7/25dOyMPEqNiiFHgjBUFZrSJdl9jvMmN9A1nLkRgcpRkOnqa1v7nTFxANaCgnC+wQH57IaGBpueuMOK8qqApjFV6PxjehsO4jmxVsVrDqM32KdBGGEaIJVauH7qtRytA8/OoGgDrYUvEoRNWJTppWmndQm5zppjc//2lV2OGWDq764dfJJ7ej9XFQdEfriekFkMM5Ft85TN/kOcBk0Z/o7smfP0l/yaxk73y/TkMT8wkO78HsR9jCrJCpTOhoiPMvEwuhyJTnf1iP8inGhAplr9ygvGrP9k20qRcD4MOVow6Vb2EUPPYa36a87TXedg7edM5HfsP/ecMtnFaeeel1c35UsGgUYNTD3iktuth1Quq9386q7+n7q4ZfGxAxqgZkvUKqA7W1FrROu6Ye7Sv3yaNbGPvPJyZQ2+zmUOqt+TwfcoJpQ17SEvsthHY9AcJUUdDq5VEPuyoVUwEVuuhkC1oq47in6p51t6Lv1nSk5nUGNEZCUSd5MPf8qwp0I/1K+POf/zljVl6+/lu2L5SpBl4ksFwnVoVxW8lBMX2MIbCcUJjbS4HHc6GF/oEJxnbGnjK0AdCmqIUxsdEdnX7RcrmoLxyPFlRWHVytjHWn3hM7e+ldwevyS8VTLer8DCbWIg/NA55R8QeNPoWlPKJz95zYrR9YrKtp+O1cC+/ce1h4kyo3Jpbd+hYe+5lLH69aORAJmachJqDmdL0DXwazwTXpejl9JgsbYFkt4Gc5kzZrhsAcBDiypg+jPYIQ2mQsTp+cn1+ZBtkOCeZGdTUoCDs1HshQepEPVD40T/pt5b4mSvcBHI+dguPPP3Rsg7/rTimuJk58JsZYDwik2JrDxSRcXXWs14HVesV6c816fU3XdQzbG2rZk7oVuVRKzgyrkRf2mq0VSjVMrRH1vVkeV02AgOiCa+j2JvfQx3Dsu7lM0VIwNCyUiWpjIGzgstAMD/MmO9gWGQqpRixDraPT7q6FGHp6USfbSbQyukjsA4RIqcHb7yp493hh0yfWfWIXxsm5vVUGJmKux+P91vN56/pOt7LNa5znZpoCsSUWXmdv0g0InSZuYbDIAUy3oKo187xTVSiDMBShCxUwrDjqXpv3qs1Plknhx8MFpkr40zsRs1sv6F1y3nBcRA2mn4/DB0xVHo7zsHZftrx5Z4/kTISsedV29IIsNpAjXMIZw+LU658MFpt+CtisiZspsLB8pg6dU+HK1KRrPt3pBRe35e/dIfx24Bt4g38vDhTMEqla2RvcLoT8YSiXi7xfeV/e90PSE4+JRJyO6YMxjh4wRYdeI4aEQIgwVmU0b5yXZME0YkYdKhVjrHvqvjKE1/SdoNWB8DvdMuTKOFaKKcNYFnvD/ZVOS3lEo6JDWHWKeh58FkHkYBiczU8t/tdPdmIIGESRGT0tFmaguKlhwbvBOVoCYvBOfCpQi7HfZVIqWF8hrBF6zAYkbDASMSW3kLSQUvQWruoZ05icP6AW5Ysv/sB/+g9/x6sX+4USPRYRL2J49nTD5uma7asdL18Ms+Fw+/hz51kaFqdh7MVcAUhcRtBhYSwc5r8phUVo/fhS04Z/ALt490TI1TsaBjvUyE5DEJsU60KDLZTnWTUyLcST2zoypk4+P/cyT375LYSvLqZocpknpWhLnv3bYmfGtfTqj++hpYimOZVpXg/hgBkYaJOinoyAFg6c8vbtc7XDxUznpFAzovyb3nxMZiNoGm40aZTdU6ni0oC4R9prs4e5ydLUh2CKf3zYvtRFTmX5Lj1UWd913Jucmbc1Au46732lix+6V39O7pv/80aQl6Zbq27qYqJmyLWyOKRtQcK+VsYxe0m8DN6YLU6l7kJVjwIUbQ6gyqHietq2HyCP601w7KC1rW8afYBmENyiSrXb+++8QbbNfPKJl2j9eQNVnNxGxOsmatv6Q6RU93KFivVCDIIWI64732itevfBkt0AGEfElKQ9KsFBi6EHImPZ81/+7h/4zT9/RS4nCpVjxa0qbG8GhjFTst5pCLxJ7lz8yyjwUv8s5u/o2Al1vqRxPHvB28+iBdznp3BktDH1hVgqDGse690vriuqM5uBTeF9Wk7sTRvYtCHhaNkjL30a93Id3pPPb7d219ZnS+U7W6LtxzlysFjf7Vx2RGi0qAAxQ1UbnoH2gh+Mm3l6zo13MhhOx2uLPy4PnJ/Z8a+THagG+1Ip6hwV02hPQb33yRKieZGLvE95DNnPt820+Fh5ExDz1meLN0gMxrF6mXV14nePILa6vAZ+riqU0qiGRZEqpE4IKbC63rh7VwwqWKlIcayAqroT/QB5i66FB1lGBqY96qAEDl7d8liBRhx0qq+cRU6aMQCtlA9PF9TaPCFxiwgDsak7fEsfqHnjmupgxxhXhJAAo+Ta0O2RXCpqAyEmkA4KxM7Yj5kvvnrFWO7fIKd1OwwKwwEk9z5laU7dOrOw2MznmV783RZ/XVoUk8VyrhRm+YSOvyVHPyy+8YjQ3NK7j9JC6eF2dOR8DvF2CmQa0sGMbF4Sxy/b7XPdbcVPPjzGxIO9sH8P8znHB5ba3FhECeRoHEeEQ0xPQpphYY3/Yh7hYfRySDwcTGPmwMs0J/Prdo+oCbk6d4e2WTsx/e7//pnj7rNlLvLtyWM96dN36jEh+2+7nPDc+P7Y8t3xKhilGtuilAD9VWAdm+NTFdPAPiu5eGUXJog6lmDVRz76aMOnn33Mj3/2M1ZPnjroe6iM+9e8evWK7c0rXnz1ghcvHsZH/PbGwMJ5nMB/y5sEZiDTUplMtfNuFNx2YI9TC64wTBxQKNOmKda6E7kFFcRLDEMLiQx55PX2BkIkpoREkNi3BjEREyPEiIg0NsFCrcKY9UEcDQfFtvCVz9zLW4sxK5vlHM5Ofzvm2Ms9nv+jELK1b8k92SNbpmgOwYhzz+hNebo7w/4L71pEDvZI85hPXfppTs/PrZyqyPngWx7+fL43vOTzpB3YC6ds1sLvn02C+V4OZgQTIHRZvT8ZKsfXCvPfZPG+WDOU58PMx7U0Dk/n4SgMcNaO9RdOmaoj3h7uelH8H448xpue4n52WNm3znHfdR6jGE/H9aEp+3PytgbAY57B6fdEhCqCpsCzJ4knT58wjIXXu5FXr0d2Y3UDftr/m8dhVdjtKqI7pARiDmw2L5AY0WoM40AeR/J+z347UsZvIzIwGwDNsz+3rVjDt586qrTNdMo1+650dqOfhu7OWSOBidK6OIEEnZWJxISAlw+KUBWiBkpV9sNAuNmi5tjpzWZFCJFqTmMsAl2MSFBMYutbMOmE29vvdxaKksMim/LrtpisEzv/6IuT+jms0aUlcXv8c9h6PpXNhy6f2wE4uLjaPTmy+zaBOQJki9K9O9br0SmW62i2MhdfXYTgD989gzk4s7kdzbNr79OnfxwtkXk75TDHNEPOjnT00qhYGgizkXZqDHD8/kwRkFNf3Ozw893a+vBtXRx/ZCdyMtwzcu5vF8PgjycP9fYPhrEc/fvY87/p89PrfnDAvgfK6T4B72ffn0jkDva6OwNVjZeDsh0Lf/hqYMit3woezZue3rQHm7Wuq9l4WQqvty/5/Lcvl37V4Xri5d8PfQRv1cJ4npyjiyyASItd8/D7QbvMm247wGDmGjjaSK2lBNrNqTT8gEGxiqqzqqUAGp2qMIZIiAkJCYmpKbHgyl0CEqS1gozEIE7/pj62FIT1Js23duqP3ecJv2+5D3R4/OPkCrZ5Ovra5N9zzrW+dW5rXNnLrx/mYTrfsTJdrokjA0a8XfJdm9aR0TBd/xaw6E0jnsZ9xzOS25/dZ8BMSt6N3UNt/3QqWxwzn35pQNkh2OENuGyew3kW5VAFMbETLscxDXWOQoS2fmF+uAfD7bacbmD+HARTnWycw7HH03SR74l8Gwr2bT3400jD/Pl9kcE3HPddyrte/77yx+XfhVY2OKm9VptsBtthmr+D0j+chyN9K+3c035QcajA7VE8/q1+R8zA0sM646HObWJlYRUcPBWmj/CIwRySnayZyRnGwWMeJgkEkdZMB+/AJ4FA8Ha7ITgpkQQEI6ZI1/d0KXmzBwkQkv+9ucAh9JgUYopcbTaIfH3La/uu5HFhp4VxdbKzT2jzow/PyPE1DkyLoufu3pf0abnRMr+4POc5K/vk6keRodPvnUaNXBHTFCntpVrOxMlQaSbqhCWQ81GBs/M8rcfZlprOsfTymcOvPqY2f8ackVnMALPly4H8+HQMnhWSw3Ocvn1yg2/axI7u0yaDotEi37aXLgbBD0AeCry7r7b/LrnTEXrHlXMuivFd4BQeIu8yhluRRzjSec5xIzO3yeHvD9s75WiDODr7W8ujjYFjZXX4fIk2F5iR7TIFBIyWk7b5oHlTnTbI+Z5aWVU4kL64XTERxSxBe1OHuUCIHV2fiDEQw9TlsFKr0zSqFrAN3apH1YmMPFQtVCuEEIgxLELAZ3gUTizoPy6KdVlrDkdL6sgQeOgYJ+XsFpgoc8rA7FBrcNf9n77Yb/Y2pvPpnfN4pLjOHXJiMRyC9zb/fTZBF5b16XiX9zWfY6H155U7z/fCwIDJJJhTE7Zsx8lkyBzGNJkCh2jEdFzbIDDesD+/Uaa1odMljRnkuLzPP/62e5G3lWUN0PuQ9xHe/xAU+fuQbxNIaM0xEDmkTF0aWPCN31/Wfr2f8b11ZEAWIX//9+BHHrZLDqENOb/pBA60r4coAUgQrOEApqBCkEUhY6A1FWotidsfqho6DqgqprXVYBZW/ZrVeoWEDgmJiKFBSBHUFAnOz79a94QolOp3cDrmc2Hv97lYHhquOxwTFp7kchwHo4nFwnloRbkbAXLwwP2itx7iVDp3eg/LMS719eHW5BAw4vyLtwzRH93ZybkOIbaDsXnfDM7rc2nULK5h4rwBM6/A0t6whaHB8Xzq/AzCvPaXY7Z5bC1BMFkPZ4CTIEdr66Gbsx09a5qxezDAl+DKd121d4WIL/LdyXLu37QXve1edc57f5/nP3ed70q+S2yDzJHB6Te5c6M6jb6e/nzunTs9Dh53X49qVHR6WsEWaHOZ4rAc0cIy7XV2fCaZvJTWqhX/ukzHNqa2GKacNHNHwYD/DC2wbYqqQY1MtMEhGKUqyQSbFWakqjEWJYWCqlPvOgjRKFbpU0fXBYZx9vUO93GHFf5tGQRvzru9iVtqshwnxkYWmvTokHZu/8OkUgN23HxnWrsL5XiXd33baLo9tGNVPPEPHJT5pLTvntnFvbQ1MufTpuuefHm2Oe325xOXo4N0jlNZ979Si7UOiIRbNzxFDA7HtXfl7IkXx52e5w0K+MgQEP/92CiT2bh71xV7MQLen3wbhtWbME53pQtOMUDnvvuQ63yf5Ns2CuT0jbt1mYNDNzvYd0Rdb6fmbxsLt/B9D5BHtTD2sdqtz25FARYe5IE05eCdLAc5e144WFDC4dgASPCuTtK4mpdqwm84tHOGeeOLSYjRWxzH4Ox9ZkotmRAiMFAN+hiIIWG1NnIHoUuR9Sqy3Y6HHfV4Bs7Kd5XnetxDPlYMh5M87Jt6WJ8LL3waB7PyXS7OyXCbjoGFV738Xvt9AtWcEFK2dXU47wRGXKYfJiPUzAhih9ZP4v9z68W2wxgPhuvxPU2NsQ6jOEl3LLxr/9KC/bFd5G4kdjPOjsb2uDVzugbv3LRnEzjMxnYb3kW+A3kfyv1NCvmxiuuh+9Nj97LvQ1rgvpD/t5kOuFPOedcPlhPHmvcz9kcZA4dqgONQ46Ecy18CTwnY2X1uUmR+eCsbbOHYptebAnKNYQoWGjRLD1fVpqGyGcEgJcNQ5xBQoRaIBiWMCBUrgZp7xv2OrkukPkKf8C6EAdV2cVPWfSQFb9IDf9z9821RvidnOfZA71iIk6I+5znLIipy+liXinnCcdwnU4RC4MDsx3TRhgsxPeI4WEZKzs7HFL2YVudd4bfJOp0iG8d6neXkeKvR0+seW/iH/N5pJODtntk5TMoRmGj5HOT43+m4AGw64fqqYyzKq22l2OHZfR/sgTOBpO+dPMYQuCvieJfcbXA+TDm8VeXAyXe+D0bAY+S+9+5bkZOI4+Nm83gvOjfexxo5j+ha6OhHFnn7aTwypQVk4WktNvJZSchi0ZvNDVSFZRvchY/VfqjVnI2/ee9zKoGD8gqpOv9AAw76f0JKiZj89xCFGIUu9aQUmErpRAJmgaqVYayM+9GvMREmPDRf+x4tzG8j93d8gYceYu3/J199YtebIjRMR50o7jedfcEWaIczuyKf0hJ3n+QAupsIlg7rYQqFzOC+kwjFHKmabNiFhjx5nU7uQ3gIuOdNcgqufFN1w1EZ54khMI+zvWMBeLYK/OyTNZ9+es0fXu7Y/csrcrl3Oj9IebvYyYch7zuFclq6+yZ5n4rsPsPj2zII3gTyPfe3tzn/Y6//bcgyOnluDJMcxnLskJw79m3kEZgBvd39XFh4ZO2mlkbASbe86fjDhi9HJ3Mq1wPMymY3FeoSsHhkjUDE6YqtGSPxyDutzmEgMp9XbI8wNYTx8ao2paF40yKdkA/xoVPk4/sBWMsn08vh1wOC3o+bsux+rNl0xEPEOGAeGscBUwTitrd7SgJ15+JvhsWs5ZdhDuM2hmBeosvrTR86bfPBmHi4X/0Q0NVj18rZEOfCKAsIn1xFfvXZml/++Anrp08ZTInxtRcjc24b+TDl+zLO70reBkwKx5Gkx373u5CH4hLOHfuhlCC+i5xb548x9s5FEm/vnQ975o8wBpZklsvPmqeyjM0e/bOANE2bavubHR1FoxZuDWna3yRNuQM5HBPbIhc/PuC5fnfi1cmEkJlxzYBSKjkrtTpLoRZneFKZJtSVmgMSZTG6D+vlefTin5/F3SGwo3PaMgA1a33/UzP8Gl/GW4dyD8bitCaWnAiHa5355u3P5eRf5sDAHHs4p+hvn3/6fDk/96c9Hvos7osCvK0sR2QCvRifXUf+8rMNv/xkw0fPr9D1hr57TRemeM77LkS7yH3yIcz2H/v698l9Suq+9+R9hPDfFHn90OQxeI/HHL+Uh7cw5gAom+dqDh9PvNetTFCO9+gDn/thsEGOzy5BkOTHxQYUDCLerXdOCZjzB3SREBuIzIwUhZSMGKIDAc0wdW9ftbauh0A1bxvbSFgkeFtY1wDGoVwygAXUeFCvgvcqZ3TUWy/+u5zne7xqk3DkodvJdyave1K4k3cvj2jbOGMTboXD3/QiylGEB1hgDk7i/FOEaWb6aivQFueaFOQbqKfv+vkx8j49GMfGHMiPeoEfXwX+6rMNv/zsCVe90cWArTY8ffKUzfr3vNwplfDg3uYXeXd534r4XT3hu7zJ+449le8kl36HvMt7+BDlf4rB+T7Im57FY57Xo4wBM5m9Qow5sDtPnpkrk5llyL95vH2fnPU0nCq4YSBOPOQ89l6qVScFXxWiEaN7/6UotlPA2xWr4cBDPXjEIhAk0EUhdV6yODEVhhBIKRBT+91gzCOvb0b2e2nsh9Psnr2Rdo1vbxFJQ7+/ayL1vqjAMj0g7YOj5kjzkzx7ZmYFvXispyGrxVKZLyKL752O1RV5a40t0zq0+Xfu+O4sc8Tq3ITNJur3agMQwhyJ68T40SbyV5+u+cXzDU9TIOeBorDeXPH8E7h+tiK9LGjloVmOi3ygcoo3eai8CZj60HMsv/uu78y5MXyf3sP3LY8FjN4b4X3AOU/l4cbACS/6fFlhzvea3F4gJt6CeAKV3cqPNGyA0HrAT5lpk0XNtyKtPqoaFGmVBwZHpWAyKX0hxUBIRkhCjND3kdVqRYxCjEbqIjEmgkRC6EhdIKWE0VHHPdv9a/jDC/JYMJ2TFnPOfIpyvLfFe68yW9zgW8pDrOq5myRgE3HOxPY4qew7huAVADMS4+z1b+X8mjI/NEQ6ttAPtk84+f1gnN49J8tr3T9v7+MZvi9g02MkYDzphZ9eJz7ZJDoKw3ZLFYj2nKurJzyPPZ989JTf/nZLLoqH2i4WwfdZ3qVS5fT3dwnVvw/5EJT/hzCG91Hd8a7yDr0JJhePWYMcAVXaISITcU3b0hfzbkyqpt1UdVa2qfJgDlTbYeOXFk0Q/NxIoxUWJSZhvYpEgb7zRkSpi3QpkbrEer0mpkgIXmWQYofXSAQkVKcipmMXYLTMqk+ErrRwhPMYiNnBW/62DYE3fedNl3/k8Ga0+RSKlzOK/Y2RCZnTLUdfO5PrDyfKXc+2WD4uizwt/7tb3o/38uGKEQWe9IFNZ5iOjCUStEBIaBGiJJ5cdXz6/GOur75gNyh6ZgN5uNl0ke+r3PUu3AV0/eG+Nxe5S97KGDgsFGvNfu4Ws9ay9p69u1HhO7bApnSBzM1eQjgoCZkiESIQIAVPF4QodCmwXiVvd5w6utQRgtB3ka5LhJgIKRIkkUKi60KrWIhIWBHFqLWSuo6+rFmvVvRpII9NCZkCi1TJrXv9nr9MbxjySXb+rCyySP673a1q6smZpududk9me5FeeJN82yAkuJ1v/K7EcPxMF4SAUGslFyMRgUiulWIDqb/m2bNnPLvu+eZFYaznrbnZwL0EDf5k5Xu5Z11klnd1fh6BGTiHjDV3786InP4sxykGayUQDvw3RBfkui0NMKcdTJdVikBjK4ye+48RUgx0MbhR0PcLLEBEIpACXd85/oCARXGCouDsdRIiIfSA0PcesRif7Ni82LLfj+6d2lRPMeXO3neo5i4wwt1/ep++3LGNo7c+edOdLkyhe5Ei4PN21o48Wz4E5+9TObPSHuTq3v3c3hGU8R2JTf+ZIRG6LtJ3EcHIlhnylmHc0V9tuLq+4tmzJ6y+2JH3ilm49VSkgTg+1CjBHxO49n2Vb6OK5SLvX96FSfJ9lls+IjIw1d2HxX45tbw9hJNPwd2h6ZOW3vc/CcQIMbUDbapACIsrtYMFJkcxiHMISGgYA8E7FMb2byetY2FHDAmiEbrouIEUiZKQEJBgmClaA6igYqRQqMEwvLVxoLLqezabxPYmUxVMvO7c79O344NB0GiRp0HfNYtvjB7ofN9HctYt/xaw4a1Vodsf0hr2nBnH2e/SsB4TwC+0+z1Z7FMO6cjWOH9iZ6vUCa0xp5zskIc6Oe8d8pC0ytFJHvZC/bE22YlJMQShS4lV1xGDYlYwrQzbLbsXr1k//ZjNuuP5sydcX33FPhu5+DkO07WoHGk/fEiq42II3JaHVgRcjICLPFQeR0d8KtYiBid78rlvzPXa5so8RmO9ckVeijGMiuqBwObUGY4hkIKXG4boZw9BiCItOhC9k2Hy0sTQDAZptkqQ2IyIxkNg/pmaoFqoakiLFphWVCtmkDqhXwWGYh7Wnl/AxkvQ0hXV8yHMAIrHyGMOf4On+24v/yGDPxluE0PgQ7fiBWXUYrM6GdM9Qzyb10QI1kxEW3qvZyIB5wf1wGs1o45bAYoPTkR8jacoxKhIKA7eVEVHZWcDNzdbnuRKiiuebD7j6eYbXrz6mjxHVBo2x6Y7vz/td8rhdJGLXOSPK+/TUBa7mNwXuchFLnKRi/xJy4WD5CIXuchFLnKRP3G5GAMXuchFLnKRi/yJy8UYuMhFLnKRi1zkT1wuxsBFLnKRi1zkIn/icjEGLnKRi1zkIhf5E5eLMXCRi1zkIhe5yJ+4XIyBi1zkIhe5yEX+xOViDFzkIhe5yEUu8icuF2PgIhe5yEUucpE/cfn/A+HeDhNjTzWIAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Let's choose an image and ask the model some question.\n", + "image_idx = 60\n", + "image = vid_frames[image_idx]\n", + "question = \"Can you describe what this man holding the cat is doing and how he feels?\"\n", + "\n", + "show_img(image)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The man holding the cat appears to be in a formal setting, possibly a business or a sophisticated event. He is wearing a tuxedo and holding a rose, which suggests that he might be attending a special occasion or a formal gathering. The presence of the cat adds a touch of warmth and comfort to the scene. It is difficult to determine his exact emotions from the image, but he might be feeling a mix of formality and affection, as he is both dressed elegantly and holding a cute cat.<|im_end|>\n" + ] + } + ], + "source": [ + "result = model.predict_forward(\n", + " image=image,\n", + " text=question,\n", + " tokenizer=tokenizer,\n", + ")\n", + "print(result['prediction'])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAErCAYAAABDzICRAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs/Xm8Ldl11wl+194RcYY7vffyvcyUMlOzrMGWLeN5wi5sMJSrKSa73EAzFA1FGburoMtNYXAbTH2KBlOG7gY+gAFjqjA2xkCB8UBZtmVbsmRbsmzZGjOlVKZSmflevunee6aI2Hv1H2vviDjnnnvffSkpk0/7benlvfeciB079rDWb82iqsq9dq/da/favXav3Wu/aZt7sQdwr91r99q9dq/da/fai9vugYF77V671+61e+1e+03e7oGBe+1eu9futXvtXvtN3u6BgXvtXrvX7rV77V77Td7ugYF77V671+61e+1e+03e7oGBe+1eu9futXvtXvtN3u6BgXvtXrvX7rV77V77Td7ugYF77V671+61e+1e+03e7oGBe+1eu9futXvtXvtN3orzXliJ/5Q80DnDHzHG9S8EFEFSQsTSR778za/iO//yN/PI576W/8d//538+3/zS7TBLo4oO2PHlcrxxocv8V3/5G/xss9+EzgHKKiiMaAEVANohBgIbY2GQAyK4FANxFgTVPnI+97N//Bn/iYfeXqJxMilqecPf91v4ev+5B+gHE3QuECkRaMSQ422AY0RJRDqhh/933+Bf/S/vYNby0gjoAqTwnFl7HnpjuO/+j2fx8tfOUYkILS0TYMotG1NVOW5GzX/8Wc+xgefWnG4CMwRDldK3YKqoCgoiIjNIcrGLCIIog7k5NzfTbJJe4RSOAhB0NQ3gEMZV8JOKTw4cfyuL381r37ZmFERKKKiGlEHqxZ+/K1P8K6PzJlFJQYliOAL5ULp+LLXXuHNr73AzrjFVk2hjWn5IqoQoxI10raRoJ7bx4H3PPocH3+upgmeJtgYX/7AHq94yR6josXnWREFsT3lNCHfCBoiEO0dRZhr5G2Pz3j6dqBuIw2gIjhV9rzjoYMJl/c9E6eoKG2EG4crrh21HDUwa5VFhJVCVHAiiAhtiCiCKjgnoM7e8ZS1EJHu87zGqoqIMtktKCeKOGV1rKwWoNHZJpNIOlaEoGntBOeF6b7DlYGy9CCReqlo65gWQozWd1DwAkfzaPssOrRQytL2XDkSnNjIQ+2YHymxtfl1DspSKMcgTglRiVEgOEIbEBWcEztn6gjqIEZisDkiCiKK98ruRcGVMDuKNHMoK+HiBQ+i3L6lLJdKJCLO6IUXsbMY1eYZQVROzKkTuDB2XCxbQlBWUWkUwuCfLYvYnumWRrr/OrW95MRITD6LDvvMC0TguFaiCBf2Ch64VHBwYcpituL6UcNsGVi0kaAOjWpjc44opJOsoM5ooK7vh7XmhKja75H09jEozkaLasB5R0xzIFEJG3vPOYeq4pwjhIA4RSOIOOtb0llWxTlPTBMlKj15cUKMEcn9qqBpzUWEKEbrVbV7rioUYtcBRImJrtDNvYr9Lun6/A8EJ2L8QtOXdlW376NGJMm5w/mTdEacOtTFtXNmqyfdAHRzD0g/uP6eNFZAoxKCnUnd2IObZ/q0zza/G7azrtv2naQunBOmY3hu1py45uQ95+QQn34wYKsraUEqH3nlfWO+6re8mld95kv4Vz/+Hn7t/dcJ3W3K/sSxVzkmUfnKL3wln/mGB5lMFecUQiQ2LTG0RA1oq2gTaJuWEFrqNhLSjmtCSx3gE9eOePSJY24vPHXdUhaeSyPl4fvHTEbCuAoUjkSAwSfS7gTqIPzqx2o+cE2ZR01bSxh54dLYMZGW+3YLLkyV3QlMxx7nASJOHHWrLGvHs7cbbs6Fm8eRWVDmAerGEWK0PtUYlWJgIB3PjlFLR9TWp3dzmU8jNPlA2eFSI3LRDpxTiImBTkrYLR0VyoURXN6Hg6nHiSJO8B6iczz+TMOzt5U6KG0au3eRiXfsi3L/pZILU0flAniHdwUutmiMRCKoEcw2gMZIE4TDWeRwHqgDtNERcHgX2Rl7RpXDe5sI79KsqOKBMr+bKi6tW/SKLws+8lzNjXmgaYWGhBxUKESZFMLOyDMpxfqUSGiU+UqZNZFFgGUU6mhzpSqIE5omYmRSjNBiBHB4iDWN705NChhPoRrDagHLudpad4QnEzEh04aihJ09h1RqBFRhtYiEVtDW7r+w65lOA4sGjpdKG7yxExcpq4JV3eJLQcQbcY+Rpo4IHu9LogqjiaNp5ohEJpMx81lNPQ8QBJwBhr3dEcXIs6qNede1Ufeo4B1MKmWyZ7N180ZgfixMSscjL60oqoabt5Trt4Ktn4O6VmMQMcErNSaY9/Vwjr3AwdhxaRTSqREiktYKIo6okva5MVbbJtrRJZcOh0tnRBMQ6SYbpY7K0Qqk8OxMHJOREKKNdRU0nVdFE1iUfOBsIdOYBY9LgFgTGNw4zCKoDM60ZG5piMQ+V5x34NKcxGiAPlqfcXB/psl5ffPvBgaMyXbAw3B0T15S3+mPBOBJYIAODAybJnAeM/YSA0CCdkchv/ImU07iDlHTXsgMPAFtkcTU8zkT7c6IHQHFSSd6gEr3rkbzTjLsfjz9951ApkbvNERiwACBSjcXm20I8od/b3vm5n15L9yJlmceKgk4TSfnAwPn1gx8qtvJF7DmARxU3nE0q/nJt78P9473c3VuzMTAu0mDhYB3yqoNvOM9H+W973uM6USYjGwzZhRvay39gRZHJBKDHZCAEPDMl6AOxlVAVFEJzKLw5M2aygmjqWNUOpzXrk+AwtkGnUchpGdIjPZ8wHsjJssGrs+VFZ7aFRTeTpWTghBgERp7P7HDIgKVGGOpcUTNzF8Tk3EDnj9A64N2Fso8uTEh4dzuWt8dtkQ4bIDpnkBUZRmF41AgDRRFYsIN+EJQp53EYc8w6UVQoijLWjkuW0Y+QBS8KJ6YWZsRnhiNUKvSYgjfO5AoaR1MqmpjxCciasQliQKiBCCoGvlIknrphZjeAekBlYnZMREOwamYhBQd4hKzcOC9UEaTKRKPosX+jhjY6FlFPqImOeZ5j4meuVMIR16bGJXQwGi3YHIA/pLj5s2WxXEidoP7Ve1dQquslqA1xDYRq2g7xSuMK2VRt6xaQUXYGVXsTEcsVw03jueUKJcujnjwykUu7O1Sh8iTTz7JuCq4fOkCOwf30bSBlzxwH7/y7vdy7bkVL7l4mSO5zsIvwQneCzuTEY889AAXLu3jcYSm5dZiSbNasaobvPOIBuq4YrWasTyOLL1Sh8hiESkaWC1tDynQNqaB6HBv5jq6ScD7E5HX2CUm6Gwzo1EIogRVllFZtkIpjkmR9m0ncXYHxCR6OxzdTg1Aq2IU1SurGFgtbL4Vh2QgbQuUGFBiRJ2+yEBAVNt3QyCQl9gZy0zHMVMCyZwuSfKJESfNj3Fo7aV373AhS8KgIZqWoBteogesA4F8lozXmXbDadYw0I1JpJeulXU6k98pk5os1vTMvMc16RJj2On+DDIIw3ECEgZkSvvzoIN36taxv2+TPG6Cr/53PXFd/jwqiBNcGn8ImoBlD7J6rchZgGMbBT95//lamvMBwLpTe2HBgIAmlRADCab7OhH2UoRSIIbIEk/bRlbBELWRVTq1tVcBDy3KLAptA60vKJ0iLuCiqRNF+oMUtUk7If8dIQoxRHxixoWzw+u9EjSyUsGr4mKEGG1Di+A00mKq1iZEkxxizzhFTIVYJXV1iKDqCCqEmA9kpAmBRiF6B87UoWD3Iop67Psh0dNEUAZzqN3uP30ZNlHpuno63ywIpqJ1IuhAfSpqaj7JSDlJaI0qEiNa2/ctMSFlI74hgQAy8RIjfG1jm8Opok5APEqDRm8SQCJwIERsraNhIQQMAIgQcN0znJMOPKr2e8bAl5h6Vx0iwZiMJsmtI1xpr6X7Y4DGgYYEWHWNhFNizGYoyIhhiuHi5Lc3E89AOousQ7thMw2CEmo4vG37775LYyonLLqHkSibSUMRh6iwXGh+YAfzvCjlSBlPYHenoo2Bw2PwqhREFqsGpwUalNVsxbP1c6z2ZlRFxfFhw+i+feqF8vEnHmVZB57e/QSHR0uObjfMDz9BDKairnYKdqoR052LJsm3nmJUsbg15/b1OXUzo1nMwBcs64j3FculUC+gKoGgXLu1whfQNEITspYKisLbbtdoWpjEjTIAzNKuJgDdtKZVqvyAYaT/aoQYHW0Q5o0B9BZh4k2rIOlcxI4Ba7eW6qCOEJyjERAfcYkeoc7Gp6agd0mtkLVDMthrqjGNxWhKHGg4MkPN53CDz6U/jAH1eMPOrSRtQ0w0C0Db0P3unOskd9VMcxxRI5rOSj764jIwid21NsZuCHRMKIGvIfPK12b4kx3WoorRmcF+RzPooAMOJm1vEjZNZ3w4IcNzJN01WWAaKAvodbjK2tjzODafluaz/0q6d5WkifQi0GqnJfxkawGedf92ANHv7e0i4vb2goIBIUswCQ0OFoCk/nESO8YdBGIItGIbZmjOE3HEaMzSGEgiqgptsD4kaZIExYv2NDNqsrEKqNBqsj1CzwxQXDrxTYhUeGIoCIWhWFEjTC4RjFZNMjDp3WxlIkmViwMHgYgLjjpCESLSBsQJURwhKHUdqVtPiEpwEOteyuxsWqKmnkzMSJAzN2/+fNuGOvlZOn3pieoiIq6TSvK3kubUKUn74QhRaFuB6KgKR9Bg9v5sCs3PxNkBFJOCQkgSVTRVoUYDVk5KoggBNZNBFCMfMdKoUHfEWAlRkgklkAUeSWKYkCQt50xrQ0TF5m1FYJTE8lWIpirOZqA8bxixjgFalzQyidA2rRJaOtVo7BhuJg5ms0VtH4hz9h7a61klEdt+5YwIC86kTsnoXmxeGlv1m4crfOEoRkZ0YjDAIrhOkjJmqd16qZjtu/COooQoincFMZp/QFg1rDQSojCaTLlyeYKI8vEnr/HczTm7xYpZrSxWR3xcjmzfxsDN+TGi0KiyahSNjgJhtQi03uGOG+rVgov37fP4MzN+7b2PEoPjQimMaGlHwnNzZTwKtK2wXBrj0QghKtT2NjGpYFSSvwO2vyRqAuBufRdnoKWmwq9DItQxc6TUbzrLdUwmg6jMaqUtYKdwFBKzPG7nUUwToAhthBClEyzEOaMhCjGGbvM7czQApGNG62YAh2m4pTeeB+2lZJE18Dg8vwZ2BS/ZJyUDbtOGkRh7PoWORDO7Pgy0uKGmMJkGzRJijC4muiOdgTQ/P61L+iT3g4CLA5qdfRTSeclagB4zr3F+28dK0nQNBaC0z+nfv39If/uwmaYlnUFCOiMDAJL61QFLWqeP/TplQKhJm+iTf5NIXiulKB2htf2p6simm2G/p9HrYTvdnHv6vT2tHmg7z9FeeDNBmrSM1LrPiHhM9eqdOZVkG3kdTa1vCNmuN4c6mxTvxJyJOqJsUrdPBzBh+c7ZR2NylEqq9yZa77lP5zLxEWqgdIq6lqjRHNYUnIfC2ztIljbFwERMqDkqtCLUITJywoXdCbs7FeMpTHc8TnZxpcN7hys8s1ng8Y/dYN7CPESWMdnZgx2WkMaYCaLNw/pGOWF+OWPTnFBXpf2q4sz2ljew9u41pAMUIgQRColMJyWXDkaMx4rDtCuFK2kAOW4T7OvV5PkoVyPP/sUJk8okKnEmdWfvFAOBPq2rrU27ajm+XVM3iRBjgGAyKTk4mFAW5uCYN5pLT41iajwzHQnmYylUzoi/LI8Jq5aQCE9WdTrnuHzpgJ3dCkpQMaZ7ePuY4/mcNjobh5pTWptAiCM5PyYi6fK+zQQntXXzQGIKSWvWEcD+pBCjMp46fKWMJ8LugYGLGB3zYzg+igkU2Hu4Qrh4+SJNPef41hzvPaJKs4SmhdnxgoODHR5+6QUOn/oEEuGhBy4xbyKXLx3gC89Tn7hBXLQ0GgkNRIRqb4KvPHE+J6waNPG90sNoLOxNhSuXRpTTgluHKy7fdz+T6YRy0RCco5KItJHYRsrK7OtNbKmc0KY5jmBS+9oMJEabeWv+RlzH3FXzPuvnVdV8OkaqSUvSM68YIGrEqTBy3R2oKqsYCWJnJSKmlUo2IHXJ9GQSRre3afNQbLE7DVyWPxO4ztK6Jt8YUU3Ov2lNO56ev6PbPIqu+zqmd8maBEVwieGuWeyDZn5qGq8ho839aG+bFu1eo6NreW9m+tNpBjJoSb4UEnuTQKcVSNfK4Bz0e30gySo940/oKdMPc3JMdDCbHQYubVnT1++OBATSvOZ3RMzsl8+l+RlgwhYD+ijaPY8M0BMfkcHL5GeQtKa+UCTadSFknxB3gkYP5/2sdqpD6eZ1d7xie3tBwMBJG8zGxiNt+KQCi4htUrEJdqLJ1gd5Z6okSQSY4hnvVOzslJSVUFUwKh2FEwqXnGicUFaesvR4B0483hcggq8qnHdce/YWv/rejzNbOupgqPaByzv8rv/iy7n8wCXKkaMsPUXhTCNQjCiqEWVR4bzwlre8jR/8gZ9j1SbhQ0xjsGyEV7zkAn/hf/rzPPiq11F4oSgLfFEhrsAVjsKXLBcz/vb/6y/zff/s55m3Zm+tWzuwiknoJkWuk7q1udxg/lsdkAbfpRldY9XddwrRmT21M0sr5tEdhVENr3n5Zb71//lnecXrXwWuMeleHd45mlDzr77/B/n+7/1JloHkL2BgYm8y4lu+5f/M5371Vxlz7uBGTATIJOCO+KdxHd68zt//X/4hP/UzH6BOoOLBg5I/+U1/iC/92q82p6mYwUCP+PNBFkxiN2nH44sRy9lt/t9/87v4sf/wHgNbHotAQPicz3wFf/47/wKXH3opiCMQiTHwUz/xf/BXv+PvMFua1iqKdPZj75SCJLFDb94S7R25NDujZa/qtjM9qUYibhD3axoaMA2AKrSNcHveMh55fFXgvWNUCnNWyYdBKSrh9W94Ja997St559vfyfIwaXCimtasNe3DeNmwmt/uxrM4PubG8Yr7L+4xvW+H6biiaCIupD0QI5OqoI5KWEU0moPk3o7j4Sslu6PITqnsT1qevnnItauB6c4eD6iyOxnx0vv3iYdH6HFAnbBcNuBL5rOGeTSwXSbG0qqioSfK2TTUqWs7fe9ApNuQoEVMwm2CEn3S3JE0OWoAu1U6EyGiuLSmKrDqdk8CKEMBVjMNGzC97rxJYjjGPUK270ft1dRJUjZGMtAkZeigCbyqdqdz036caaak+zTtuaARl+lrlvrTc0MIZsYcaCy22aXNJJIBRAKYSeDJYEoyJ9fMFE/2k/saxrJnoNLLr/29m46xBgp6Rp3nK69XJ6qcAII9KOj7zp9qh0gkAYZtmhcDC5r8z/Ja5D450VwGj1FxRdYKJoAbhzZ8XR9XNy8nBbpNp9jzNuV0+r/ZXjjNgPaSpf13OOmgOALmGNRGpQZ2x55Xv+oyn/NlX8Bbf/YX+dhjVzu0DBYStKxb9qcT/sz/8E286Qs/n6Kq8IXDOfNML4oK5wtEHN57fFnixBkaFbEN5UpEHE8/+RH+xP/lT/LYe59mhWN/BP+nL3gj3/Lt387k4GFUkp3SxcSoHAb9PI6WKw+9jB//D29jNrdj4kUpHYyd4/79MZ/12a/n4BVv6kUDAbRFg4IEYjvh1S+/xE4hNFEQFxNDsyNjmzURGDl52GwuT/c4zd+fbD1AM4afwu6SylkhScy9qabBmMorHrrAb/2ar2Z0/2tQCswzAAIeWPGVz97g3/3Ln+W4bdEYbO5V2dsZ8WVf9RU89Hm/E5USR2tzKoC65G2sg+FZyJvjBj//lrfzY2/5EDONOJSLF0f8tt/+Fbzq874KlR2SS3X/3skFWrJlXvo9pyJoc5vP+IyHeMuPvYdVUIhJ8yLwmkcu89mf+7mUBy+FtP6OmpvPPUM1Lrhx2CYMk/wFEiOxyIFo/XfEhyTB9u+VdxIIRSHsHDhajRwfR2LjOjNTVjOKQL0MqEBohHqV7NGJsga196tGys6uw8VbfOA97yQsj1HMyTINM0MtYlRu3F4yciXVyDNrGlZN5MbNIy5c2qEshdtNJLYWVimi3L55G195xEXKwrRk+wfC/iW4f08oauHWzci1o0gjyieevcbLHtrHuYLYNtR1i49K9MJKA6tGGU08zdLCfqOYJNpv1ySOdSE1Rjg6K7l2/zmx7xUIojTRNDeOBLCjOX9lJyub4QSCJe1kZ0xcpTdCZGzZa7x6hmNaQtNkGoVIfgOYU6tdYN5LJKfY7FWfuF3/zumduiiENS2DMcOOgbkk/ZIdBQcgIfXVecAnMBIF+vCszNpzc51dPPdrv2sfHTDY2J0pUUGT7Xyoou78djTT3ZMO5Jo6ymuA6EaoqGxcbH/0GoWkf5RtTnPa35fBHIrGgbR/FsNNIZPr9ycBKh/nTJNj2keS30VN4+2Ftu0B/aYMP2T6m88/r1Ygz4kMfjvvfZ9WMLCdAXX4ciCZJqmqA2rmVX7fwZSv+PxX8S3/4/+V0bjle//Bj1PHjMwihRPAMy5KPut1r+YNv+XzEFeCtnTu60mTABHV0KmJssrLzmSLxoadnYb790p2SqEApiXU169x/bFfZu+hpyEEVBtUWzRENNZoXduGCsqj73gH7dLeBpdNEw4nwlMfu8aP//1/yGs/62G8b/BAG2radknbBJq24ei45v1v/WUevCAUx7BohRiUhhTCohkwmzrxtLCUs8wEp6LEIcPq2KYmR6aecUg6OIVAVTmW82NuPvs+7t8H3NTsgoCoQ8OKw6efQEKLl5AcvhUctKslNz76fl76pteDmxK1MWfB9JIxGho3t2Gf3ktoljcZh0Me2hduLdOo5jWPv/sdXH5wDzeamsYnH+yYbLlkz+oG1CVTEahEVrM5tz76CcZF9vuwcXoVnvzoU3zsl9/C/a98ORn8aVSuvf9XOJDAkVdqNT+HmCTVTsrIu7uTWpPj0dCDiSRZIfjKsXvRm0MagfrYnAZNRxH6pYqSYsgjyckhObfZ+igQGmVxGHhODrm4L1w8KDietTRtpohJRSyOgwu7jMbKfHbEqgm0UfCF59r1W4hEjo5q2uSXERIjliBMy4pRaefBFeBK4XjVEhrP7Hrk5mEkFAWjAo5uL3jfbzzJeOS5df0YbSPjqgSEoirYFaUsC643c4KCFJ646vTtHXOVzEMZSJTd3+utI6DplZuo1OopiYQQEwNN2hs1m3yThQSfJels4MrSXnLES2Ckl1+NwXrxiIJ3jkDoTB2JG9MdJuiZk2LaoE5qTGcoq7UjCMmpj96pMTuu5Y66caV5yo+QpOFQ6CKSigghRUO4tDeH5oTe+yJrJmL/Dn3PprFNM5AuJms0NA8ibXLpgFfPcF1n2ukRkCQ6IQkACim25wRTs7+duMEnqe8B5mbwU9n4LDHznKcjA5X1MQ3uT9EZnSZV+zF3l3UaBe38CDL4KAqITiziICQ3cZdpxOk0fTiu8zQd/HZeDPGC5xnIL3UizwDrh7mQyIMXxrz5FQd84x/6Ov7dz7yLf/ejv24x4ES8OPYqmJaOiSpf/7u/iK/52i/CFwGJNTE0hLpF22i5BtqW0DaE0BJCoGlbQhtSzHPLsmm4dvU2b/vlR7l2K7JqzDRxXyV8xqsvcrBfUUqgLE0aRSLEYEQFaMTxrseW/MpHW5YhEhJCnzjh4qhgh8DlPcfFg8D+1LMz9qgzD8cQI1Edq9Zz9XbNtTncOIwc18KiNfVmG4UomgiLEYswQH134yuwtWn+j+3czEALlyQJQLA8Al6UkRf2p8LDe56v+cJX8Dmf/RDT3QkHlx7EV7toLFjVNf/8X/57fvYdTzJroaEnZvuF8OWvf5Df93u+hIuXHyDSEGNNjIE25MQmMclFHpWIRM9q2fChx57ibb/wfj761E2aAGW0XBCvefUF7r9/l/sffAnFqLRcAsmpEzUvelULKTUmUxKlZrao+Tc//us8+vQxS7XY8Ig5Ce5V8CWvv8TnvunVVKOSxkWaNnL12UN+4Vce5eNXWxY1LAPU2clPsHDQJnuF2x7JyYgCKfwrMaCsEvYl7F50OKf4wnF8o6VeCa4QJhNYrSIri9ijKg0otm2WBWHt1Cdpc1wKj7x0DLrixq3IjSMlpNgOxePEcf+liocfHnH11i2OZ7A8Sg6bCWEEZODTZozFJ8A+nirlGKYTx8FOBcDN6yuOb6WVK4Q2kJKFKd73/jWC0gS6cFFRaINLCcEM4MRBFElme4OdurWdFrrlHeyUMPZqTDEJFjHx6CAQUyyy88lzP/Zx6UCXQOcE0cb2jHfeHPkkCRwbwC+PrzuvYTMhj3Qmof5f0jTRx4732i1NfgsDaVCz+jppwlzSLnQ8yoBuN6Hp8z6RUfab2XzDodS8riLP73+qBKuYGePE91tU7dqL8J3UL2znGVlTkqVwcuRRHlW6ZuDVfxbLy1Eo266TzlmC/qx1JkA6bTMb85B5Qe8vIai6BAgsGZQkun4aGNh83zu1vEcdMB0L1+f/iecZoLO99Cg/6QwYeU9oWj7w2FX+xnd9L9dmQlSHI3ae+ojFtbdN4Md/4hf4xbe9k1EF04mFEGUEl9dQuvAV0yxEEVBP05r6cLmMNE1gXEIRQJ1n2UYee+ImVQmTMUxGFraIBJwrEvpTgjiO5pGQHYkSiBYn5nzgYBaUOBcagaWTgXQvBIRVE6lVLCQt6SW9TwQAaJ2gzhFDNOmJXkGZ5/STC2MZSKtpg/tEpFXNE9t7Na2AU0ZFQR3hXe99nBvXn+XKA/fx0oeWTKcHqHhWMfDs9ds0Axtbmn6iwqNPXOMdb/sVPuuzPwcpC4SGkJi1U/NgDpq8/Am44M18Uky4dN8O167dYFmbcHz9uMY9cURgjFQto7HZE01SS3G/GilcQQwtGkpbm8IzX3rqaGGJeU8aWYFVEJ54Zs6V+w/Zv3CBKEqMAZGCg50Rt6sGr0IhsIxKrbbmUS0rYM84bL9mR6XkSpYcCG12mka59VykqpTdC45iAruXHOORp1052romZ3+875JFb1y71lruAFnfC1kKaqLSRgcBqtKDaxOTN1tx4QJlFVCpKQo7Oy4UiEt5HNUYsVehbSOzeYOqOQqOR45yJIzGBeOqYDlb4pxjMpkASwrnGU/HtG1LiELdWMZOh1B4h6iNPSq0UYmtMl8otWgn/efdmM/tyZ26roE8zUSWpeKmVQtblqTJcYJ6W6+IRQOQpFBNob9rHvpD7pme6b0ndOHG/Tg7ps86IxMRiGY2iHq6xMvaig6c2KR/1xwYIbEfey8NStqDmYn14wqqnfOqQnd9Vst3zpBkjchQ+s2/D+hFBrVp/teA22CPr72l9syz+zs/e3B5zAkmBnO+Kb2v85P82foY+7GevHfTLr+tZd+QoSpf1vobqHoGzVZFOk1J9lrxPmVTDKZxyFqqYQzA5n4+ywdseA0wdKU4V3vRwABgqqPuD+2ctryDIh3QZYTVHBYrHdjhMnM3oiQRGuD2EqpoToVlIXjnUoKbjNoCXkytpFhq3BgjbYAYvMXxO5MKcBF1kRhhGSNNa85X2kac2CH2zoCAYClsV0E7kLLxmnYoo1K3wryNSBPs2th7/9ZBqaOFdrVkNa6k8MkUAtWjpyRxrhPCu9YIdIOko3duAKydCK4AjVAQKZz5QXiB2La0XlhGx5Ipxe5DhPIitSsRB01oUSk6xzoSg5eULnQZIs8e1ry6dTjxxFBb9jJf4tScnDSmQyQlGguiKjG2hGaJd85CRpNpwpVjRnuXWalHW6FwnkgktiFJyoKrLP1rG5MzY3LW6/IopHlQLAzPi2fZBG7MjtFRZX2oUDcNLioj72hcJHhQsWQ+iO2Ffh6zD8FQgtkkGaZGJQjNEmaHgYuXPUXlmR8Gjm+taBvz4XAijMsR86Yhe9Sta4UgOzmJWGhs20RWtaVKzUtdjZSLB579C5HD+ZwLO55QQnHxItPdPVsHV6AqFIWjXjV8+LGPcfPGAidKOSp4+KHLXLxwEYdw6/qzeC9Mdve4ef0qVVly+cpLKcqSNkYWyxW3btygcMLOzh5C7DKCNk3L0a0jmlVa15Fj1QTqFbRhnaqdtsfPjpyxLqITSzcNRJ/2utj6SDqjmdkpA496tAv523SI1qQOziaykLzMRXzSssVufbr8BxvgwGzx69K4+QRol+J67XBmKXOgyTNVwYBA0O+D7r9r9CPd3zG5xKgGkn9+t16CT3PTaR/ys7TLQ9C1DtOcZLAZOGUQ0uOtHghpfj/OZoJD7cCJU9WdjbNBwVk+Vds0TR2tzcAMc3jPz49xo5+crGnwtwDOC4UIMUhKvU6eiBPPuxtB7/nwgU8LGOjsPRufdqoU+7N/5YQqXWKs3tnvIR1MTUlFMgmVFKZuwpapskWVNgYkCi6aqtdrshWT4uHXJiiFrKUQnhBbWqWzzMakWopEQvRm547KKnk2C4KPRiDMG1nSeEnfJ4SXzyjJ5qrQBGG5iogELH5cQAwENNGSqwQVI4KJIGRVcIw9ADi3MeistdowLWS5oHcwy7m+STnYbdN7pykDZM5ZLhS+IMbIajnHi9k6PdkJTvL/DSmLECUyW8y5dXiD0XgMaglbaCSF5gVbI2JSzzpUPPPFguVikbLISZIsI1FaIitWKyW2jtJ5nAihbfHezFx1u0RjDUHwzlMEpVk1oCHFtmsCqYpgzo6rOnDraIY4x6gocKqsVjWrVY1LCap8cqyTxPiTIoCcyxB6oGZzenIt8tyjgrZKWys3r9fU8ySxqVl450vliafmlKWzJF6ZynTbQrvnOAerVUMIzqTBpKIUUS5cKrjvQAihQVWpKqgb5fjwiPG4ZFo5HAFfjSkKz5yGcelTBgRH00bKYoSocnh4xHLZmOlgPCW0yqxeMp7OuHSwB8tjqgaawzmztqEsCi5eOiCGmmZVsHRLbtataQXHjtHUUdVwO0aaqD2PGOxVHfoFnfhuyDwUcUZ4FaXRvCKDCB1JnvUCRFOrB5UOCEjHucTwlySCnxxsNUvViSFLMlpn2i4J2OfwwK1SaJeUqBtGAiqmSnbQnaUumiIR0KjandOeScV+32nee3ZfzmGRgbrLEm/XZ8+UbU/1knaMeU4G0rDmWaD7N9yLbPydozgyF+i0Bwm0a5orq3uwfX3z77m/Xk0/fK4O/l4HGMPrtpkghtesAY4MMAaSvv0yfPPheNN8kc1difmRaI0k81n2JYh5vk+OcziebUBhzechjeZFdSA8CQRyy3DWvu8TPvSMyLnE6NM2bdUkg5ZeohoCSZNeE0OOigaQYAwk29aMKFs+Ah0MJTttGDNpCSHHtZtjR0wHQ1I2mbZVGlF8khAimlQ8kphVHpx2ICegyY/dcuCPxo7JVJiOHd7b5ldx+NKzaiOLqwtmS2XRqiVDCpa1MJA0A2nsSnpux7TpwcE51EhrG20TvGnnO5wOrT0vJBdkO6jmNKcKly5f4L4HL7B3eUpZ2ro65yi0YDQpTGLoHD8xSRxl/8IOV17+AOMLE8qysLSmg7SiQgHiOwK2amvquubW1dsc11a3YZnSmN53eY/LL7vC5NIOZVVQFgWS7L0ORUVTYZsIWpqpANDSowuPlNZPx6RTv947Ljxwkb0UWloUDidw62rN4SqyDJ5aYaWROgpNWhArhJM8kLudnwnTtvUZHHaB0bigGkOxUJouK0sv1VvIaR+BP3QQc1l9qULphNWyRcRRlIoslVElVLuO0bhlPouUfsLuvmWyqGtlubKsj0e3bhDrhv2Ll3CTHZ67eoNrV4/wRcGo8oS24erV61wDlqsGYiS2DctWOJrVzOc1i/AsN288S3O8QBdJw+egOVjRtjVHs1sc3azZ25/gRx7xDUXp7Txlqti95DqB7DQg3bY/RfpzJEHD+ghkLU1KjS3aCQpWBCnlytfsxJbBVY+7yEx5ADw0AYGcSTL7qqxl/8tMKY17bcgydNuDzW1i1IpuPFkize/ZORRm5pOGboWP1lh09y69sKLdFtxU62fg0dM3WwyVpAnN/SZt/hoI26KSH65Z/s4eYePMQsgwLM5loWhTeBkwwu713foY+htYi054viZVo+0n7+00LaJbx4X2NUIyoOu1SoN+W8jY5CyNxfnf40UEA9tavziu20ywtuU6KR61ymc5tCzbx73kA0BKIUq3QaOAL6z4j/eOsswozjaXdyDO4QtbDOf7eFBxHu8980XDs88uWNaeJhhzvzAtefXrH+bCfRcYjysmo4Kq8hRVSVmWVKMRZTVGfMGvvveD/MSP/wrhOKUWVqXBMtVNdgv+6//2D/MZv+VzGY0co1FJUVYURYkvRnhfUbc1P/D938vf/f/8KEcNTMcFy2XLMvRake6cpsOxicRt+s5vV7LZGwKDIWi1UDJzaBZawGlEgsXSR3W89hX38Zf++l/lkde8Bl+WCUhEvCtQjfjL/4Jf/+D3sDw05psVG9OR509/0x/jK3/fH2BUjiDHLWuOY5Dkye1RgpkRFOaza3zP3/pb/MZPfJDj2jLgvez+Cf/tn/1mvuirv4airMhZCTRmtysL0bSEU0b9jNAY8lwtF8yr7+aJ//UtEEzKCmr2/Nd9xiN821/7dh58xatS8KuARt76Uz/O27/tb3PzVjBtj2Yvd6UsSZnp6FamB1ybf289LZSVsLMTaRpHW7eE1hFT2uYsoVpBGAvB1E7ys59FCdMC9ncch8cRKR3eKQf7wmtefYHdfZgtl3z4gwsKB5OJ53Ch1AvhyoUDptWIRQjghaYOPHf9aZ65cYwfOUoHhNYSgi1bdnZG7OyWPHv1Jm0TiX7F7Lhhf2/C7nRCu1ogrmRZ10kChQcvX2a0V3H78CaH8yXj6YjooQa0tvoWGq1mQba59irfNEsb+Z6HTDmrocVnc412zCp3oWA+DLnyXrB6GiIWAUQyGyh0hNsVLj8sgfINBpX2cA/4Nol5P96ust9Qquvi6Hv6OLRn58jKzWvW48kTsx6+LJkGZ81DLyV3ZoD8LgkgdFlaB+8xfOcM0mFDuEgEJH+0KdVuguFs6hi8Sf/UDfX4aeryXIFx+Dx7Len6j1lIO6Of09omTZUk0JgwmlBV9w45n4CmsfXnPs1Wui7vje4pticKy8zaR5dsN/+e+x3OaTJ4gZMOpc3IgOnkj1NTFUJ0yUkwsD+teMnL7+fpZ25x/blZUnnZBLbRyuQ6Ed705s/gy7/myzm4dIHJZMRoVOILT1lW+KKwn2VJWVXm8esLysKcyHxRUBQlt27d4Dv/0nfysz/3KLU6xl75ks9/DX/lu/8qBw+9BudKO7A+2f2cEQ5xlvD8t3343fz6r/03zB+7RdMm8wNQOM9LLu7xtb/ra7j8pi8GV+a3zfg3Md/IF3zp+xj9vZ/gsAmUFSxXPWxS7aawu3cdBnwK2mBRFMmh0Oat0ZluzEu/co4rl/Z59RvfyOjKK0CKtIag4kBr3vg5b2JnVHBLgjHhqEjhmI5LXv/6l7H/kkdQ2UE0oEkLoJrdxSzaQsj2toIdvcwjr3g1txZv4VZtqtPdnTGf8/mfw32PvBzcxNBLJkYpKqFnwf37JRsPGld8xuteSeVgIZboJqhlTXjJ5X1e98ZXMLn8UGJk5qTy+te/FnzBcRu64lSqyZSSwhMZMK9MeDMQPh0IWAs1+FBRLxa0jaQokqH6O/3uxOpwpF7rJuJL4cEHRzxw0UHTcOsIVouWeqVMxsK4aNkpA/t7Ozz+kYamFlQnPPPsDZplpJnfwsWanQu71Ko88dQ16iZy/6UD5oslx8dLlnVktYq09ZxY11y8vENZCHUDx8dLNCqjUUmzWjEdTzle3rasjylB0e3bN/ErqIqSixcPqOuW+cLyDcxXbZfqOsRN7cpmkxPfdCpdlzSNqgPivDnvOQIkqaVbcF47qTmmiIK8XjGBBzIDiJuMUE8+4RRi3Hmui2WizGF/ku7pRICBRNxrFHpGkgX29Tnpv+zph/U+1EjkTKb5md19/Y916b9Le6i2lztJd/jY/IGujTODjiG4WZubjfuHGpVtavyT2oYOJXXvloXLTmpP7SyzwGmtf55b20sxaQOtzPcwaqMPWRx+tn0/DPaxWBQOgbWcBNuEvPP4BtyJ1uT2goCBIVLLSDcTys47M13bar+vD3YrvvrL3sQ3/fk/ww//+x/je/7uDzNf9ai2CVYkZFp4/vP//Kv5+m/+b5BynEwOwwkYmBg6GJIJc4o3J3JldczrXvcg73j7Y8ToGBXK/kgY+ZqiCDgNEFu0zXkMtCMIqFJff5q4qu19EtO2UQRu3zzkvT/3M7y5qHGlt0p82hBDDeqIbWC1anj8HT/Hg3uAOGKwkslldh5cQ9A9HMgEQYYb6oy2DW2fvAjMOKpGqVQYXlZ6R1V4rl+9yWO//PO86o1X89FIfRa0MfKR9/wSq2VDDlV03pj7bL7kV9/+Th582QM4P0K0walLVd6ykjDFmZuygIijXq24/pEPcXnqKb0dxNXxgl9960/D8pCiKLAypqZmjiEmc1AOC4u9YirttdVyyfvf/i7axtYxs54g8KEPPclP/+AP8vJXPoKoJUZqQuCXfuGXmbY1+6WwiNAMwhEzWB0C3uxVfh7JRFSZHTU88dGGZWOSVKfeFMzzGCXXld/b9VYwK81TWcH+nsMXK6s9oELTmJNtDMK152qKQhhrTbOKOOe5dOkKTzx9i1VQjheR3eOaqEc8d3POYR2oCsdqMWM1a5kdN509LwRYLCLh2hFN29eRr0bCfDFDqZjuevyopBhDaOHgYMr1o9uM2pLpzg7a1Ny4MePWcUsMZpAPao5BQyl+jWFxkhn0+9n+OcHs/9oT1CHX7JhjqqHhxPIDZIJtvO6kKtr4SDr/OljbDEISw81aySGjyhKsORvaRswps8n0kJ5xMhh7b2teE/g7wtpJ2N3ek8FnfeKyTj4d9KH2IgjSqahtnJmBrV1MZ0qMaR6F5Byc+hd7pM1hjtBI5yL7MnTj6ddC0vi7PDB3aFtBxZp0uT5X2+je3WoINsNLh4Cnf3TWcq/vDftj+Lzsj2LjVo3JqSppCUhVEHXAGO9CALR9cL73e1FqE6xLTL3SxGKybdM7oPJCUc9orj7BrqsHiTCM2WY1f9MG3vsLb+ez3/gwu/ftmuo0WMa70LaEuiE05iQW2pq2rQkhEOqWGAPNqmHZ1Nw+nvOJ9/0Gr7jPsWyUUhxPfvBR/tF3fgcPvOQyTgJCg4TWRAhCOiwRlYJf/9AxV5+e0WbpGYttb1W5OVe+7x98Hz/zH/45OzvCZORtY2mE1iVva+GJZ2sOJhbmeHOlKczSTCVtTL4J3VTmzTic4PPhwG0hK/nz4WL1QYEd4kAQPOBc5PGnbvI/f9tf5eUPV1RFS5H8KbwriFLwng8dcrRoOwJrwroyr+F7v/dH+OWf+wl2dyKlswjMptWULjgduGQOCgghwLKGjz1Tc6GMlGrx68e3F/zdv/GPuXzwz5iOTUXuvCLBCJ4T7ZwDpSuBqt3nQeHXn2pTlUNjskXCih995ibf9T99L/fvOcoyUhR2uG/PWgpVDiqhaGApVuimSwrV8e71+UzcIdPTrY5xYPH1y1UixE5JUazmk1EqIdh6lB729j1NHVgeR4pKGI3Ai+XKWM4a2pDyHIqB5xu3Wl7y4AGHt2pms8hkHLhx/aaF/qnZ9JdtoD2KLObmUnvflYtcurTH7cM5N2bP0jaxI7TzxkIwVZWggbL07IwrplPP/Q9eYTKpWK2WxFHLpcsHXLp4gePlEYpy+9Yx1545Zl7b/s5e6qbajynPQLe1GRLCbU5TmSm55NndM2LpDorm/3TCbhZOrDBS53ORw5bI5gfQYKfCycCmnel8rv6XntX51kn6LKnIRRLDTaplvyFaZ/+Pfk/Yz+yLoENmnD7o5iCds5Be0HwaZY12dn2QgZMxoQE16MejSZDqRHsbQzatDEFS5zNA3vc9MO7eLfZOjSec/JLz7pDRbnMU3NZOaGcGjFpPARbP12dgqKU54cyYnUjX1oq0/QRLKDCku2n8nR+G9B+iiE8GiZhBqCbAfZLKbxXwpHcSvVN7QcFA2p6cvgaWAtOL2TtHBH7tVz/EX/+Ov8HHbwdWTRycYZsST0Sc8s53/CpPPfoBLuwKu1OH96SJB00ZYw08aJfmNBe0CEFoQmRWw/Fc2R/DCPP/ny8j73nXY+yNHmM8EiZjl2obRPNDEEsj21Dy0acs+UxOLyr2RmYB93B91hKebNjf8+xOqzQWy0TVaKBpPfPWoTEM5kkovXmrOyxBS1QDB71AsY2R37ltO2CquXiKIXeXVIQxSRKiJAcdm8BlhI8+u+TWbMnOWCl9EhoB8Z7rhy0h0wXNaj9zjrx21PD+x1ou7kaqMkkQCr6wjHsxxN5mi9C2ZhaaNynhjYcaixy5PmuY1YFJ5SiqSFXaNd4lcOn6WHUvDglK4S3feHCOpUgybZiPgRdJxYeEm3WgPY6Mxpb10mEps30BoyKh9pCJMF2CnC7qO9NR+vCjXHAqC25nrhPCaOwoq+TzkhLUihe8RAItq9bS7UoCa4t5SzXy3Dpqk1SR1jHC4XHgox874tbNhlUNo5FSr1pL9iNAASsNHN4O0DrGhUXmhBBQF6jGBUXhaOq2M4d5gUnlk5JMaRY1VCVhOaOcVtx3+SK+dIxSroPRqOLo6JDbt+YsGqXFdWvtxFEWxlDbBHokMZY7T5YdPrPbr7O3dXks1QTIn2aNTgexFW1tESVnXErr14W9ZxVE9wT7EQcOZH0in6wbM6lX1CXJOOVNgZOMpRts78S7lrmSdVPCyV0Dlr5Q+7S75HO4KRCkq2OugzFQpcuwz9634IR6vhvekKGtM+f8brnv4edDu/9p7TRQcBIAnI8BDkHR5t/53J7V32naqTPegPWduDZV9Oum3VKLM+FLEyiA082Md+sLMWwvSp6BzMz7gyfduRKBysG0FEoxr/onnl1wbQFNhtppgZxTSu8Y+UiL8txhw2wB+7ueqpJUglfQFEYmSRKUlAo5Jntf2zpWDaxCpIkOJCSGB3jHgpQWWD1NEHxC987C1ylaT4NwWDe9e1AnRdszvLdMg7NWoBZqH3ASiEFwUtBqpG4iK3UW4qj9JvEJIImz3ltNkoWuK4A2D93zAQmSJLIUFJgkoJQEKHldO2dOWR6Hc5FaYREEFzytqFUdFO0iLbwYkonB+ncJyGlUjmtwjTAmdgWpLCxR0FQP3qXKYgFHdEpRCFVpxWVyTv6oSt3FKRjBK8S86TMQdAjOuSzsJaSeIhwyt1BJaYXT9QpBI616q1aZgIUXKLxSeBhF6STvgFDHSPTSZT8csJdOjTo06QyP9mlqzNgqjQRmx5alxxXOCioJLIvEGBLTb6Jy/UaLw7Gqh8zINuV8Gfnox1eJQThm8yXX4sryfHiP98LO7oR6NUNb00LcuH6D2eoYvKdpWkR8OkuWTXBUOi7uVSxXK5aNsjMd85IHrzDeKa0s9XLOWKMVcGprRKHwBdPdHdq4NDA+t3X3YqaxVjfJ5uZe3djXaWE7yS1R9KEEnS88cSKy7V8Ts859JxDOwENd3IAZDrvO2oLelmD7LDNMUQOcDJg+vUS8NvbBfsggfP2+ocSeX63XYHToRxKAOTl7SWWv3b05/4rVUui1Lf3P/N6uG9uJpieftU0Luek3sCltD+/d1rb1uc2mvnntNpNSr7kQROLad+dlrqc9+8T43JAmD9ZyUEKaIb1IWii88cgmA4IttH3r8wd85E7txQMDnSOK/Z4xZyGWyKV0RrwsmQ/UUUG92e0B57QDBFUBpQhtjMyj4oNQt0ByQHPOm2TtsKJARLqEPQqhDdQt1MHC5Uw8BVS7OvVNaxUEnc8evwohIGK5uJuo1DmV+lAsFGNAZeFBa0JjFefa0qEaUixtIIjlH6iDlS0OA89nGSxod4CxzRJTEppOMLnDpjx1PchdmIreJcbX5donATDJtlU7OJIk/hANZBlxszLG5miZQi5VO6kVTPqTlBbWkgNa9IAm0BRDYphq85TVXSqmVjWNZfrpHE7NpJI3v/0zQGHqZu0OfquWBa/NaZYlI24QYrI1Z6nRJWJu9u5CLKYgexKL2mEtHBR5BqNa5IuknBeDeudkYs1g+w9g8QnBQQwsNLUleyoKQaOjqTVn66ct1MCvJvs3tlfref9c50zTAeZ7Yjwk53OAVW1zElSp68jx8YIQLZpAI+xMJly8eEDb1LT7tt6z4xVNYg4xGGjQECm9UPrI8e2bhDDCu5L50YJ6uWBRLBlPpizrhmJcMR57tC05mtVULjMcKxUecrz5mqR59t7OsnIPwPLnspYZeEtk2Fr/60B6vVDUQAuc9tvgiR0jThdpylcg0n8vID4xmUQj6LvrpdP8Rt2HHYff0BJks1Qeu9mr8i3Zzy8359yGA50mEDSYqwEQ1eQgJ9nhdrBBTzi0se4Dtq49kI4JDn0xNn8ftm2M+CyAsO3ebWaH/pr+Zz+H24HDedo20LH2bE3gvwMgWK4QkZShUKw0sshgi/VzVIgQ5KQz7Ilx9i90hxPTtxcUDPRoyGICh0mABIvfrxxUXrswwqBKE8xeLBq6q9EepXpntrFWLdfzom4773dDvAlsOCFqSIlXpM+93gaCCnVjmzjgzXYYlcZ5YhPRQilan5h8qoDXMeWQ0hAXaeObN7kjpcFNtdajYEZebzZtfJEyEHpGrsBF5biZM6+VZbCYdcXsvG3KThViLqSTDlU+7c+zDSWULK/afy3Xgkv2VLqDkh2RYpI6BOdhNPaMJg7vo1WMRPCuxPsUz68uoW5jfJnw+JFQTh1lQafOFucTGBGIEUlW1eyzGRRahFYth37QaKGkKSwHD+ohSC4qY/GRDjMP5Nz4TlIiFyz7VyAnmzKzjpBKpDrQIhJLaJyxYFUDqSGta+jxWHeQY1A0ZahRYipYkiiz9gwpT/Emu+sIqaQ8/UEoRz0YbGqTpGNjoNml09y2VhvD3sUc4nJypBCsqmLIWokUluiKgqqA2TJg5W/BVR4phLJwFKWwmB0T2obpSHHOE5bCyilNjCxakHlNAYhGjm8vqecrRoczRtWInf0DQnRcvXGI3jSH0ouXIs2q5viopa5tjziNViMA6fwihuDojhKYbJEuNc1vF5UQuZsTY47Odp8RbRIc1ExismCfGOm6ursfiiRm0H+neTNlMOFcSl5EJwkYGF+H7BrV9qYM+kzAQrvvE4Nl8N4dc84jknXNwlYGk58xuC691NDjf/2ek3/3NRdOMuhNR82hKWHbevR9nmTy2xjy2b4GtnjZfLLZz/DWs+SsbWPYfL88j7Zn0sFPxCBHIA25Yt9XEn4TiIyyPq7h+3Tvn7aHqDt54Zb2AocWbv/MsHdOwGWLE9TsymD22aAp/MYl+zV2uC3hsBLEqp6VhVCOhKJyZid2pqIX51K8p8N5l5zUvOUecAXgqJvA408+w9FRoGmNEY5LeMPrXsnDr3wp06mnGnnKoqAqS4qiwJeeoixwVcVvfOBxnv6ht7M4ip2s54Cmaalbxzf8kd/P533FF1OVQlUJ0QmFL3FuTFFUqIOf/qn/g+/+Gz/Ic/MIhaVlnq2CRVlEm5tse7S5O3133o2GIAsovc7BCENYQ7NJuldT4ZYBLu2O+KN//PfxBV/yZqrS8jeA2URDcPzsW9/GP/qeH2HRhESCzU7vBH7H7/gyfsfv/1omE8tTH4MQaftxa+g0IZIyMoYYefe73s33/6//kdvHlvXxoZfs8Qf/2O/jpS97CC+KOCtwFEOwQ5j66IVv6Q6d9wVtG/iRH/0pfuwn3m1hrWgyi8CFgynf8I2/m1d/5mtNkxEsN/8H3vd+fvD7f4zby5aQ9mcb0/uLZeczXpZqVfQsPkltA3VkPrTp+xOK1jT/oRGreoYRrcIbICK9Wll5cLZuzoE2kXZlIQYxCt6bWSdG6bPZJUYymYy5fHnKE08+S+kLDnbGBLHshZNxQekL5rOG5aJmf6dCtUaDgejMJBaNMiodY2cagqa2M1lHpTk+YnG04nBmKrpLl8aUVWkhfx4aDbStvZfzPmlRerv/cCdvc9zKJp+4SfyTBNbFtmsCQBsnZ/2sDCV1TVQ3ef8Yskr0J/l+aBbEJGkOTjKr4djzfhhe29G+xLyNyeYYdunE9i6fhEvcoJMQ05nNMtAWSbuT2nuhMYVNZm//0yXozc/WGNw5Wn/dEPae7Ps088Gwj+E7nMaAh2M8W7ofzM8pr3JeMnoWaBiOpQNnmZ6yDspySuJeW2J/g+LEW9IccqbC7abhtO07GHie9oKGFkIHZLvDlD/MmndtTZpeiJXJvXzfLq99/Wt44uPP8Ohjz1r+7gR0girHy4i28BVf9ia+/g9/PdPdEWUp+LLE+8IYdeEovOUSwBdIUSHOI4XHSYF3JXhH09b8/b/1N/m+f/CTzJIX4Gc9ss+f+gv/HW/44q9CfIETn9TlJjUaEfII8OVPvId3/tIHuflrz+FESBlwCaqUpeO3fe1X89qv+l0oO5Zpj4zaYkdQv0iUyd/532lvrhiVZvduNXR16jtYSTeJ9H986tYrb6kuQj8ToZSiNSqsWselgx1+9zf8lzz8mV9AdNMkaVvSFdWWnQce4Yd+8Ce5eXxMwMwLTs3Z7L/4uq/kS37PNxLdHqLRkgwpdLGEWW2Wq/7hUYEHXvcf+d/+5c9zbTVDFT7rymW+/o/8ES498nKQspehNglzoiCdNJBQeoyBMB7zUz/9qxwe9xKXiPCS+y/yh/7EN/DA6z4nAQs7yh/50Lv5t//hbdx85oYxfbEHVqKU3vwZomYPhn5AQxnvrNYRMcVs35g6v20w/5MmeWU7oawc46ngvFVUjEHMHNX2sTqSJO3MYLJWrG+Ond1d0KssZy3Lsubg4oTpXkHd1ngX2dmt8C7SNC3NMjBfakqMmAGOabCaAqZ7FQ7lwuWLHNdzrl49QuqUgyEqEgKhaQkh0saIlFB4Z1qLYPVCcvz/+WiZ9oTlxFdbPtRsIlpnfutMi8zle7Wsrqt4IUWpiIW59tnvTp5LSXskYYv+7HaP6l/A+oip775ibPYVkNShJdLp/UKyH1T/mvm9XKdlyMPrwtU6tfPdtfOo8DevEcnAbOAfcYY24bTPz8+g7/Re6w7tdyE/3eG52wHB8Oe6aWSbWYFeSiMBT7E95vTkfhn6k6y1c77UC+4zkJHO0K6EGlFpkrPOKtrr7I48X/JFn8u3/uX/O2/9hbfyHX/+73LzqDUbLuZQtgjgVHn9Z72W3/p7/ysop6lT10nQ3X/VVHwxTY5JRxGnmrzla17/xlfhSqFZebwEdkrFr25TFWNwZeeB7KKlbE25jwFF6jl+4DFvUrBpJ+pV4L3vfAcPvuJBfLVjOQbaBg0NbVsTY0u9anni3W/loYsNszksa6UOUDjzUxhqBIBkY8qc6FO4RmS6Kic+1aTmdkDpCxbHS97/Cz/DpQtTfDUFjWgMSURWnnn0vWjd4J0VX0r8jRACj73vN/jMJ9+Lr3bNBJScI1RbU9G7pPNPufBJBPfah3+DKSsuT4WmhcVz1/nwO3+e1zWHiPeIs7ryIUSiho6pEtukwk/SllroaYwNz37w13C1ga9UbZfYRm5eu82v/PRP8qbj67bWmFPjh371XUwWh+x4YRGSicE5uhjqIWhbkzTzn9IxgxN2182/oav73jZZcjQNWVk6xlOH+KRRCcJqBrEdRCwMuKTRj97XIwesLeZLbly7Dmo25cNZQ9sGdnYLVm3LQpW9vSnTkedwUbNcWfRCUaYCK61JmEVZUI5g5+IuR4eHHC0XFIWjLD2rOuDFnHFvHdbmIKhKHYSqdEgMhCYilaNdgabQ0FPtr+uzat+rQxmqlzcJY97X52V+2VHPnunEm4N+CkHNmKrTDCDDRV5Xo0vfo2SpLtcjyCulFs2SSUgv5w0pWa89ktyjJjqwBlROaUqn0ciq66xbOHHpKXt0uLc3pffTTTna/TxNaj8PKDnLIXAbEz4xik4GHZ6PrneGAGHdbDAcw7ZxnT6GTb8BcZJ8N0LqV7t7s6AiIl3FRkn0z+Yta2WGQGYAIjen8JxA70VwIMxq7sEC5m+UTm5ymLNEPbtK/dxH2S0aSp/tcebsZB7dxigfffd7+cSvvY3dK5etR42EtrE8A7G1inihJoaGEFpCvaINxpBDsDwEq8Wcj73z53jtFeH6kY21nR/yr/7O3+Px97yFsvIIxrg1KBpbnFdctAP6gccO+cTHrhvBUSW2EJzFbc8a+IF/8oN8+Od/jL09ZbRjavd2FViFSKgjdRv5xI2aoo0cFBEaC20bidKIaRgsF3pmy0lFuyZNPP916XVLriMymWlkBmq+9mZ/dtJwe9bwPf/L9/JLP/KvOdiDwqe4fhEinvd8+IjbRysDUJo/h3kb+L5/9qN86JfezoNXwLuIRulLuqpQYGGamuIog0LTRp54ZsH+qkFLZeHh8Poh3/0X/waP3D9hMrJ0wN5yDyeGp90hskqHKZ5ck0rSwXs/viSiFE6JwZwMvQpXb874m3/1H/HK+yt2x0qhJrFen9X4VeC+cWTWWvniJpgl3mozSJdGe5OydHbt86xKviefEpVe4hdb+xih9B4nLu15sNoKJH8EN3iWdBnT6LzCYbFc8exzq2S3VFzpOF5F5qsGSUx/XFnVSnGCK4VR4SgmJaFuWcyCSaka2RlXVIVVSDw6XuKItHUwkF1HxAs7uyNEhNmsAXEslg3jylmJ6DYYztZ1iem0+RmC7yHhTBO33tSkZJW4Aa1PfQI5OVk3WfRmhx7Xnc5Ihyaq7h4GjDhiSYg0RSJgQktXvCX3k2NWO80ABiSiJg1Cr+4ftr5SYs+QuhDiLYw833P6vA+Z73Zmvl7yOH+up/Kl8zDxbc/cPu/Dv2TQb/5lGDEw1KKcOKp3Md7s8xBPXGOanT6UUtMadfVscn6Gbv57oJNNR3kLikuaJdGuRHoGEGv0f/jr6d6ya030nPqhaqCq+mRb3minqZmcKqMCLk4d90/hNQ9d5GatvOMD11kmsc3jqLywN4YS5YGx402v2uPShYJRAaVXVEOfiz4xhphUbxosGZDZlk2yWTTw3G3lqBGOl4o2SlE69kZwcayMRzAZCb6ysSpqJgkKoOQjz8E7H51zFLRLllI4mBbKwVi4MBYu7xbsTZS9/QrnHCFExHmatqEJyu1F5HgeuXojcLyC49byVDfa26XBtlbI2o41pJvGtnHIt32+MfPp2u5CRKyctMbs/Gax2eLMbDUt4WDk2Cnhyn7JffsFozLiiBTeA44nnqv50DOtxZInBOwcVN6xVwov2fU8crlgVLaWYKgzR5gTqVWVFFyqGd+2kXmtXD9UjldqBXuwMLWdkWM80hTyZ2aa7nAmB7pCXKdic2Jqdi/CU7eVx2+2LFplGemIfeVgt/AcjIXdiTD2hshXrXK0CMyWkVUwbdaqTVUvnbAKStMl0UmHu4tXT6wrnlTxba7XtjXrAYJVeawqK8EcGgitdv1u66vrL2E/1d5eXY2MgIc2cmG/pCiE49uRplEqpzzwwARfiakpUXwxQsVxeDjjuetL6pUyqgoO9j2XLlSEGMGNaNrAjRtHHSGLEQ72K+bzlkBJG1qWbQPBIn4KhPkqEnKCijPa0G59GmM7eZPNnfHVJGsn7no6Q9pCpAdgWbt+TnaQGQD0UVO9M+nGupDXTPssppqAwCALpV0xyCqYQc4WVfS6rV237rXt6vxTtALSz9tZ7GPIMM+SljfHe9o65ncZOhZuk+D7cfXruv6c7d9teYPBC8e15/Xjy78Ngdi26Ih+/+R+1nj3lnXKIG7d0RKy57Gq0RBLte0Gfffz5YBxpdxcBu7UXnDNwLZD2i+K4iRSijD2itOW44XjvY9d56gWmjZdo4CLSeIRRh4WMfK+J46YPKNMpp7pxHX2Rp9CltbjZM3LWgViFNo2WGnhGKlFEaeIN0l4GT03GmHkhHFZ4uuUWEdi0gpAbBuO2yLjP/KGi+mQFiKEGDhaBXAVLASR1iIcYqAOEJtA3UZitOiDkLINFi4lHIq2gYNqiirIIWJnawVOV9ttHjaDqdnL1Wmaa8lWfMG75GEvpp2RKMQQWa4ajlfm5S9AGQLiIm1O95wdPdT0Qg6QGKgb4ahxNOLx0oKkGOecYc1bBIWL0dSyYgWsvLMEQNH5NOeRRmKKLNHEdGwsLkvRSaoPKDksUaISU8iUhWk4RHNojwESaGmC0AZHg4EiVZc0UwYcBIHCwlNxdI5sIdIV3IJ18qJp758mXd1pTUHQIKyWaZG67GdnS055IENWq5hDUlUKAQtlHFWKuJCyNCrLusWFwGRU0TY1+MDNWctiaeeg8g4vSlF46mUNsaWaqDkKHoyYr1oDZ85TFDAuLYKhlZKjBaxWwcBfZnCnEOqsFbvz/Jz8rJPGxSVAYDvEiHN+rmmTTut/m39BZlLDszYk8AMSt66PSO+p9GGgnXOeZkfptNb5BiVJlxuGvDO0E9u+z+M7731rDoMbzG67YLf999zXtpYl3R5EnDwL6zRrXaI/yeQHQCD9OViRtfN4sg0Z+envNkgv1o3n5DV3BgJDALCtdRoA6DQ7BrCF04557v887dMCBnp71vog1geVGQ9r11qiG3McihFWmKp4FiIxHZBE+qwssBMKb/3VKTVfrIMlv0naQidQiGIpbu0psVPfaqeabgM02WM/EdYYLayxFghEnLfkOJqyENp4Lc56sWw6JoDQqe6ElAsgCm0j1GXDsna0GKCIEVoioQ00wdLatmqV8CBJYqqW7CcakyPNQxeHzdlMvyMwp0pM6wfeEp1o5zntSeRIYprP9FQ3sGEHk0y9mDQvUSx/AGC+BPREhALzzrYogRghakjaa2fvZbF+hnxxtClXfQSiRNOMSHa/TLHVcRDqF8EnYCNRiC6LUPZdTkRk+iJnmRLVNAdBgOiSH6P13UaXgFnsEsU4JwYcsWRLRSL2RZbqUsiQah+2mKVyC7U8O+znToTaPkhAgPUY8CE/PUtTmE03ISi+tIiFZR3RGVBAbJSystLeznnGkwlalWa2mbdQFgbeBXb2RuzsFIS6gVigUiBi81EvWkaTggt7Y0KztGukpBqXZhJ0irZQNxFil7H/zNZJ0Xe68MQkggznXgd0q5O8hucpQyellwA7vf2pWs5B9wkQDNdHu99iNP+iLs40PbMDRWm82aeg60ET7cxSs5oZJ+fO2dQUZF+tHNZ9twV7ttGY0979bK3BSSDRS9Pbrz/tWYOnpmu2ax2QDKiMdg0UaB3ddt232/s+qXI/qUGxHzlbbr+LMwgYakvSb904N4szrc9RovnpvGcn0973YXvb5guyrb1otQlgC1rEzop4I9wtFqvdYM6F3VXJy9z6SUw/MY2micZMvCUm8lj2vianCBU6tZ0mZoWSmJHSagpH80ITNCVlMTOCE8EFpZC2NzupS9KlqYbzZstkWUmx8MHRqsXEtzOhdQF1bYqTN6c1DUJTO5omsmiFOgjqUjlLlS7E0phLLqKZNtuaKulsJHg6YBhKqmKM8AQ6tvnXwadtVJoAq9YAW1lYFkXvLFWwYLZQi5sAdUa0YlTqNlI3Lb4AkSpJ60LhFcQcKCXZuCMB1Zao0CaSGDTnrxdiKnSESNJomJQbxPZCjMbIswSdEyhJkuRjhCCtVdcT0GTOimr2/7aNLEkJr9T8OUJW566REEl7WVLga59GWxMI7YR47gwINtduK0DgJFGXvEDCCcLb32cLL2BAChiVBjhDVEaVgzFMRp7pbonzkaAB56AsKi5eFGIIuBjNH6PyJiE7C4FqNRCbQNO0VFXB3s6YqhJaRkz3LcxXQyDGQFUUCErdhC1vM3yvdaGi26/nmLvT+7wz8MgX9AA8+e9oXGNivd089pJeojFWGyNpKLPGahjZMWAWkE0+GfBk0NsLHKTzmsMGbc37/rY5p+rGM4db4jxC5GnaBs2kme3M/uS2vTOtyvcNJe7NcXYYW5Qzz1KenwQKer4+HNjmeE6as9c1D2vf9Ew7XaPSO/924G3A/7JWQ6QXxNaHvGHWSVqo7HdgVdizQ+qAAg37OSdQ/rSAge3IavB9OrtrYxSTRHNxEEvc42gJtDFnY9+UiOjShbaJ4DaQMqGRUsoO0F5+pkCS78xfICZCGAE1L/i2hbpJiFzUkujUwqKNeG859KMqGiFoACdmw8YYkGLq6qCwjMKtxsariUmWS8tMZiAkEjBtQGgtbe+ytZzlpIyEaw5VSM5O3zGHU+3KGwf3VJtmd2ANbZI3c1Lb5gx+EiNtNO1NUCNuM4V5Kxy3kKI0KVAKZyVpg4kiRDOoo6o0RI5bWOFY3o5UM+vTmGgyI4iVTI4JJYoqqJlQ5ith2RoIs2AOYR6hXKXoi6Tml8TsBUmEWLo87U4l5SWAZUsnwRv6BjTSqCMGzzJGylR/wKX6BCEoTetooyUfMvONaY1CNBCXMzZkiUxzOjjNG/iUMzJQNZ+UPO4UO73RF+tEogeqPWBIMRDUdcSN0mepml/hHMtlg/dKNYLQKKFumU6VUsypczFfMZlUtl4KzWpFxJnTrQYEYVyYA2IILaENOByjqmQ+X+JSXYoYQjdXZ9GR3q6+jZics21iiF58Z7PDXkCX/mvtI6PWx9UT8ROSedIoDR3s8rtqKu+8LjEO13r4nH4L9aCyG+RAyu73TB7HWVqi7Sru4ffre7Ef0OnXbraeGcrg2vQy9Mzx5PWnj3sIxO7kIyVbvhsyeF27TjauWe/l1HfMaENtz6xpCAY99IAgA4WzAdLw/WK0ipcWmZAS3KUn2PpnYHQ+YeMFdSDs7HUYS4truzIxXLGotH74SYpMkpt90qOo/RHsVNAMqt2a6SDJzZoxvBHBCB2xiZCAgElCMSFvTSg+xCxvJb99sXGD6yQ7VWNWkr6ThI4jWSoG7w3hrxKoNwtT7+EdEKLr2bzEnDXORm5293QIN1DyJmJem2tObtY7q/iMTeT84+aUHwkpwiDHL4Az6dAJTYwGvIT+PjVHvkllEx3SP01MGk3heGqgzCe7fXauUlGLSSe9n6iFjZlelKCOLD9uSgxCjk/XLDydQucd6hyFtEwrk/JCtGRBoZNwCsuAqQ2C+SxI0sdEFUjj13TwvZrzYvbrGIxsjRSc5jS4jdie34fg/G04NJc4nYqZVcYF4CIhRZHk+g6aMjoWlkTTfEi8nau6Nl+XgAHE0Bo4rypJphXTjxRFQUAZqfLg/Rd51Wuu8LEnnuKxTyw4mkXqlWnSYtbunNIkH7QsDd6dtrujQ9rth351XHL0HM73WvcDCbiPSFg3ww1t39vWbcj8Nt9LOuJn4DevrXPO6iYMd9KGNJifvQkC7JkbwCQjnNRNjnzp79k+qVvBaAKZm0Bjk06dBAJDP43+ns3r1oWa7cCgf5Zbu+e84Fk3Os20g47mbfnmDv2ugYw877IRSTTQHGSQe3af61Ef+e+chKjfW4KoM0fgpjmzT3gBMxB2m9Q+YTgdRt+N8FtFr/Q5QFK0dtOfqLoVgFGOGzHHtSgEl+CyRpPPRVKxktxfZuguqYlycps0Tujqd8eYPJ/JG1LSaKIlyPEOL5bVrXDO8tlLoG2DjVb7wD+nkgr2wHRccHBxh9FkZA6J4wpfOHzhcGIOiLeOj3nqyevMj1aYJJ2y90k/K/2mcf2ByZO5Mfd3uVrkA9oR3K5Oed+nqKdQk/Q8Br4mlVCWQlGYc13pTdI7OmrI2k5NJ1nENt9oJBzsFIx8SteMqbyy6GNZ/OzhQbUzqyyWgaYxwNWl8XTmWV+ISfx2GNL+S3PjGHr8JiIoBSKR5TJpY9LZT0/FiTKunIWxis1NG2HVKG0IffrYBGRwpvo2WjsMYOvj5s9y6DxvOw9IWOs7AWLS3tlkttkRyXthd1dYLq0GQuGThihp61a1VadUNcdBL44onhoL3VRVQrDwSokmDa8ieO+Yty11dLhxwWseucJLDwoOPXzhG1/KB5845PGP37Zc/h1hHxDyzddMhLY713eTb2NIczPB1uEjtiHsPE/J7k6W+vTMNd3OMFzP/tJ9uZRxrkw3HGTu2xJGbR/fmU6Tg79P419r7goYU81q7zu/j10v6Nr32yT89T5OX7NNJn4evLu5BGcBgrXvZDDbkh+2zqNOe8YdRjS4LwGmoQaiEy6GYks/3tM0vOu/p/0vQ0dCE95VoAntuUb6gmcgHIDRwS994A2YulAlppziitXHA6RN/a0B4hQi6KiA8cQzmY4oCkdVeqqqpCi8JT5xBZJysXcEX0hqFukqFdZNw3NXDzm8vTSpF6z4TNI4eK9UJUzHsLvj2dvfYzKZcOPWnA985AbLFrRTvBpDHTvYK+DiLrzh5Xu8/GUPMRlPKMoSX3h8AaqOOkZuHS947/hD/MYHnuZwGZEw9FdN8Cg50w0PqpIl4/Pt1tMP9foGtoRMaYUSwfTSUjnH1DmqEkalsr/r2JlYbYnCma/GrZkyO7at2UKnGRCgEth1ypUJHExStcJucc2hM+bQUJSQGGsThGMCR04JztEGcM4zGTsmE0fhhNVqhVNTY5PisF1K9EF6F5+wjjrhuIbnagOVJIZpUQ1K6YSpd+yOxGovKLRBmXll3pi5oo50Wqcuo6QkDZdmLnNn1P982mlEY21NuydL2pueKOA9Fi4YoWlbojoaB957Kt/iRgV1DJ2fiohpwlZNxBfmGDselzxw/x4X79ulWS25fvUWdZ2icTRyNGtplhHnHSWO1Vw52J9ysOd4z3uf5L2PzXjDy8d84esfoA6BJ58+JoZ16U7TOeql4vQ22S/jLqY1ayiHCHdYJ8Lwap9H/6z57OTbE+aB/u815pEIYGfeGDLINTbRM4NNdfoQHGVNWRayhmPp7znjPQaChOg67eg1BwOEfFrTfE5Pf876ODZpVJ5vGcyXPdfmYTvA6LUBCdxnOiX953bd8P7NsD86jWv/LtizO+Y8BAR3K2ANWoeB7N0253wbjbiTI2bmM5LmyYTRnMzInXu8L0rVwm1tiBNFcsiP4FK4G7rNu9gh4qh8gBiZlsKFqfDA/SP2ditGVUFVlYxHJYUvKVzRrXoIllY1xKwdEDRapECIFRfdgqfaBcvaMkI4tbj10cjq2I8LLO/AVBiXNaPCsSqs6qKdUp+EaqUQGHthhMKq5donnmbqVuxOSpwEEDM7NNHSsK4ax+LwVhKOlSJVGcuhxsN5yIAg28jX5nTLARj+va1tIyQ5jW+WLW2NIIdrOizKwQVBgsfC1OwYtW1yeuxoinaaARFb2noZWAGlJI2PJB+RDAxEcaUQ1ENK5lNMPRf3CibTCd4Lvihx3hG0Zn58bPO4itRJMnVi48MZA/NO8GoOpkPnm6FHsCQdks8M3iLObEghaTacoqnEcqOCOvNSETT5QKSbMlHNGh1JhCaHJXLy0G9T9Z513Vmt07ok08ZkBC+9f49HHrmPRx66jGrLx566RojCzqRiVLTs7kyZjva5fvsWz1x7jvFoyt7ODvPFiqZpmY4Ldsael16+xEvvu0Bsjnn0Ix9j9ZxHdyJ7+wXLWU3pPJMql+UWppXnJZc8q8WSJ67W3Fh5Hr06Y3/f81kP7fPQA7vcngWuXZtz6/aCVWNndKjJWGNRzwdfZXCWb86dd1JG7wzGxqUbnaRrbX6HQGD9mv5P1R7SDL+ImHltDeBvqP2zZk07hkW3vTIwUNbiJPr78pMkf7YJWO6OyZ2mrt9+7ebkDaFUN7Iz7zntWb3mVjd+nncs/ee5L2UzR8AmINh23/mbdvsv++v0ACR3dXbip+FYtKfvQhda7bpy2ecb04uSdGg7cctSdEiEGEytu76sebSaUs6OCqVKDGpUwMjBwY7j0oUK50PKhmcTk/3vwZhs1CTBRZccCU16bAMcz1tms+Q/IFBGoSqF6a5jNBG8RApX4FzAOzsVzx3Do08HZk2W2i0Mb+xhv3KMJFJ6AxEPXBkxKs3+F6ICniZGQmsFX56+HblxpMxqIy4x+y+o+SOoSOfwZnO6vlG3Mf+z/t5cG+vPShHr4HD59J13UAI7I2HkoHLKpLI5SoUZiRFmtXJ1ZmriNvZOj94JlYuMRdmvXKqEN1xoOxvOK5PxiGpSEF1Brcps0eCrEeOyZDIe4yQSQ8vRbM7RYkndBNpWcb4EBI0t4FmsAnVjtSx8KmDlMLNEGx2LxtwyrdaASSoepXJC6ZVKTE3rk2TZRCtdvUrhoE2EIObQ07YxRTb0EpaR+VRjgth93jOes89JfwZOgrpMkLYeZwGcdEDVObj/0pg3vGKP+y/scbA7oq3rLhLH4TheLpiMPIVY0aByVHD58n3mLyBWrDlEy9xZIIQQeM/7H+exp27x3K2Wsiy5uD9mPltx/bhhNCoYjyzJFuqpvIWUPndcswhKKcLuSNgZF7zipRd55P49Wh9472M3+OBHb9FGtwZ2JVUCtT/uPrRwXWKMAwY58FYa1LbvWPRgSTrVfbfI2knuvaS65XmJEXRunBloq33aS7h3HvsJdXL672kAc9h0w6xytk/Kaf4Dp9OSO7fMxPMzzhJWtj9/HZDkxD/rfTy/seX3Gu6N/Pm61upOfayPd7j+g+u6B/TA487z2vdtMkYfuWSF/Xw38JX+J5J06LSX2lSDdU56YiqPHPqVHQfXGB+2QUVhXDm8Bss6h9LGyGxVU3iLze9VPRkpaa8JAkLbgrqupn3UaJEEiek6wZLfOPMUz7ENbQymTUAR9bQ1ENV8CsC817HQx9JB5QTxNoam7hFoTGGRdbBogkW0QjPR8tniEbrCJsmOF9OyZ0Fzm8foepTASa3B6U6GkBGqSAqNUfq/xfI2eBGI2eFJqIMNpW1Ssh6FpnWkvHxZp4AgFpNv00rTwhKh8LFLt4wq5bji4uV9dvd2CBK4efsWbVT293cJGii8+SUs5kuOjo9p1bGsFcUznY6sImEILJeRummoFZbO6oETxSJOQqp0l7IsZgFRnGmnvIA485BvU9nYIKZdMOlBqXLe5FY7uKkCzqdokY6VJGfDzjh7tgRzN+2sSAMRmI4du9MCUuY9T+DDH7vKx564TuU9GhtwHsX8AEJMyaWIiINCPNPR0yzrAJLCB1HapuXSwQ5NiDz68dusVAjqaGctz9yeY74GDlYRLxZK2yWWcoovhN2x4MQTUI6byAefuM6TT19lZ+xZtsLexKcS3haCG6MSQhiE3dnZPi/Rz7bbvplfzmnN+jXtoWnJ1kXioZ/T4C5gU8Wvw6/6ld80JbB9HYf9bJ7dLFD1T7+Luch9pLHkzzevG3zdP6WjC9ts8uvS/Lrg0gsd52sdobvDd2ddN7jjLoDC+nvcrfZkHeCcAIWZmZ3Q6p78bNhPr7XqU4pLcmJGksNvYnL/SeUZ2DYhJwgWmKIsoWOHUph7eqcqAzo1eY4jd5h62TtL/uOTJiC0LjGwHNyVAYQxG41WCFnVvMI1DrwxxVKyigzUbWImAKctsU1znlBjcCDO6sCrM+I/xLwxh8c5ZVooO2Nl5Fu8CCqm+pYoVCNhdhhYLGuWjXnfA5ZCOVfA09S7GltFTx7S5+OE1t+X/9Zk5kho05GyAhogcQitWojgKEtGSd2uKMElz+IoZANGJ2klYioiSGKwq9bAkC8spe14OuGhl11CHdyezVg1LVEa9i7uUWhBuzKAdHR8xHKxZLVqWNVC5UxqlSYS2xWhbSlQqnGFLFukUWLpqBsDYeoFWvM7ydElIuCihSg6VVywEFLvDdBEsagBTardENUcGdM8uE4qTOaQjthGDDlFNg/63UhX53YYzJKr2NlYLFtCMOA7x1lkREoiZS12We8gdLVCRIS6DTTtoUWEpAgd59Muv3oDJZ2jjtM589XIXasQBs66joi37UzbgiPgbCmogeOVIEdWyTIm41QXH59F9ERIcxW88zSVuC6RpZDG3mYd+886/ur687U1SmsdXGTlQp7X7Wad9ZhzBoLR+RjUtmu29HtKi9jezGmTcrrxYf/r+6wb6MbPbdemOyTTpc31WQdI59vP26/ZZNKnOdpttvNrC6T7eTeA7ay2ds8GuOr7vnP/IjlENb97mucEXoVN0Ht2e1F9BiQxtR4lZ2mERJGNmUuS5CU61GXAIF0IWhShSJkIK+8YlTAqTRWdS4pm+0kn3koGJj4936E4rt864sZhTWgFLx5xyv5EePkrH+DBhy+wO3GUZUFZlpRlQVEVFGVFUY24ev0W//bfv4vFM8sEPMxnQAACfOGXfjZf9/W/k/HumGpUUJYjfFHhiwIvDleMeerjH+Ovfeff4/Gr13Flyd4IDmcNdaA/XZK94z8965IdxXJiuy6DHqQ86SmzuwiFwoMPXuTz3vxqpjve1kv6GJCPPXWNt//iR9F5SzcpNuVcue8Cn/tbXsvOrofY0mhkOi5xRUFZ7HDz5g0e/ejjKELhCibTi9SrmqPZLWIrlqymbdEYGKlQORiVkdHI4Qpl2bSESsE7xNnhqCrLe9+mEpm5RHNGb7mUdkQJITIeOa48uM/9D17CO3MuDVFZzhuefuo56tryLjTB0hwbgLQw1ZBeN7PXDEaNqsQBsfzUthOqyajM523Wj6V9E+jzSfRS8VpOAkBzqmax9zDanhhJ2oSb6mZ7qHbfZU3TcL8qljAqxJ455jwaEPvy050jnxE2zdqw5Gh8N3JaL40NBrE2X/bsITjoqlAO5/QEVxgId5muDN709HV+/oCwf6f1IZ2fxw1HuHU4p9w2fPftN2yOafP3s/o+r1q8f9Y6A/1Ut7uLEDl/nxlnplPSKQms75OagQzwhp8PtUJ272CfapITNZ77jLxIJYwzcewhbJfYM711ULPjT0rh9W98GcFFPvS+j1O3npiIl1MrozpbKVce3OcP/rH/kodf+TCjUWmlVMsC7z1FWVIUJd57nCsokrOZ8wXe2++FrwDHj//Yv+av/ZV/ys25oAQOdgp+71e8kT/+rf89D77mc81PQBJzcc6808Wh4pkff4IbN/8sP/zDv8RSrQKepe1VJMBXf+VX8LXf+CdoiyupZK9JUEakAqjjZZ/5JK/43n/Lu3/jJpOJMKrALR3aWNrcXng/XRo6zf58Hp8B0mrk5EPDEMtMSS39vknF+5Xj67/hd/LH/9yfx1f7nWo5h7Y8/cT7+VN/7I/yq79y1WLHEzgqvPD7f+/v4E9/+7fhy32E7P3aUi9nPPn4h3jLj/0oT1+9zWx2zKpesTyeU9cLYttQqmNSOqZjx7QaMxmV7E+nXNzfw3llvjxmvlzRaMm8bXnu8AjZ9SxaZTotKYraSkS3aU8mp67COepUNU/FI+L4pm/+r/m6P/D7ab2nCWbLufr0U3zbt34nv/iOD6XwR5P2nTPTRRP7XAUZqdPNoVpNiqyS3bICd+uQlNswXKr7LI1O1GpOOgxEZ09mUdfF23eOTQOZ4iTwTMz5jgRxfY/KxneK62K7bWetX2/6wfxZ7DQBfdhuuuuMqTov0e4vy/k/smpgMOJEjfPxO9H3Vky0TSK2vvO4Nx1Fcxra86myN2b1HNvmhGx/qqr6LKk9o6ptPg553GeN8zRJf10TfBYzPsux9pP1FxjeezfRGedtm9Tb2N7ZQCtrWYZgaz098vqZ0A5Un29MLxgYGDq6rKtJErphgP+TXU6AB+6/wl/6tj/D3oP7fMuf+nYeffRmAg6G4FsVVq3yikce5I/+6W9mfOVhNDk45VSwMBQIctIbUphbIoxJxfu5N55iPPl+muM22X0dD+yNubxfMN3fQaQwnwAJ3QFSLJrBF5HJqEBdtLK7SRhqsQx1H//IY8ye+wBu+jQaWlQDSmsmi8ak3MPnPsHy+g32SkcMDfVc0ZCK0EqORDbGk2QoI+93gerPR8Qx04Szn7Y46wxGU1/10U1i8xyuqNPmC103BbeZqNUyCKk4jKmQBZ3NoL6K+nmHYomRSlZcHDVMy0i7mrM4PKaIEULLxCk7O1MuTqbcv7/DwbRidzzmYH+HnfGIyXhM1EBEOZotWAThqevX8Cg3jpd4lFndsjNylIVj0cJ47KibSIiR1bK1/AjOERXqOvCLP/2z/Lbf+jnsPXCBMQ7xSixnXHDBAGnevdpPX7/rLOx1sOEH0vfp63A3Hsp3Ipx2UYLb4lIq2jS2TrrIZ68nU5I0GEMis/nMO43TwKuwPe2dMaF8e8xlhXUQe501DMkemqOMelV4Htud20lfgfXv8vv0FGk9vLDzccoqJGFrf5qGtM13o1eb9++99uy7VJtvv/Z0QaH7Nr3nnZ61OZ47MeHTx/Spa2f5n533+ec6M1v6fj7XnDlniQdlyT59lLQDJ31JhoBymwYm7z07d7p+yx3aCwYGthGPbVu2S0Zk2VtwDnZ9wwP3TdgZV10sZX8MhRbH7dtHPPfUB3lgJ5gdPgY0tqg2aGghtpYIpm3QtoEYiSFACJYCtQ2ENvDYO3+eXWm4fwKrCK5t+Mmf/XV88Y/5rM9/J6OxI4aWGFrapgE1Z6amDdy8fcwvv/09tCktZMjmDQcLDfzgv3kLTz71UfamBT7WKIEQW9o2EJpIaAJXDxc88fgRe2PHImCVGkXMoS2rXDsJZchYrH1yNkcd/JMUgZEIccfQklSjDo+ybCI/9K9+hqsfe5r7LkxwLoCLydcAPvbUDR794A1iymCImuRTq+Nf/duf5OmnnuDyQcVkbPkbDi5M2Lmww6wRPvbhj7MnSw72R0yKgkIC+6NdLuzscWG6y960MkdBB6PxCMFTjSaUVUFROg6P58xXgUk1xaunkhs8c+sYLUf4omDZBnzZcnzc0tSBVR3XUnpCYBHhX//Hd/HBD/05Hr6yg9cAAjeOl3zw8RtWKpvkb4JQJDPBYEX6v2SgAWP7+m2u0d0S1lOvT9J/n4RJexMJmkwb9vu6hApnUZOhF3vPRAfQIQPm04hvpl6DP20PA2sEM0k8g4/7608d3gkNyTbgfHeq6SxFJyDTXWMCTNdTBwwHV6j2ZYkH1G9TSNr8ezuI36ZxGIxlY067b+XE6/fXyXDNTm93pi/b9+15QcPzoWF3AiznaecFCNuddIf7bB0+nyWEDQ0Gm4Bmu1bpZA9bXqQ7L0PQcKf2ooYW3unRDs/eGH7r576cyy+5yI++5Ve5cdiSnXwkiRURx6VK+M++4BFe9+oH8RIRbWnbJU1bE9o22ZcDoTVGbh7JkRCCpaBtInWIXL3RcOtYOV7Bsom0WH29Xe/YHQemO8kpDFLFwkSkBBYtPPpcw80myTWa0iiJMi6ECtgpCkYF7IyForTDmb2RFaEOkWUTmC2FeaMsAzQ4QjAntY5VqdmoY2dnPXt+t3o1b20xqZ/ye2nKp55ttLpGNEon7JbC1MP+qGA6AXNKs2RFR4vIjZmyUMvj0B0QzPFzt/BcGDvGIxhPlfsOxtx3ZY/9i5eomgYWM3b8iNJFppNd9qt9JiOHE0c1LvG+whcVOxcO2L/vPqrJFFc4VvMZs6Njjm4dcf3Gda7dusHTN5/jY9cPuTarmbWOeRM5XCw4Pg7MVhbV0G4Um8qj9SpMRNmbFJQuEmlZBrEshLFPbe29Qzwsm1StMM9/knUVM6CoRPMxUNcBgtPMO5+Otg6nBx/e4ZlnRaFskq7z7rnN/k77+7Tr7zTm09r6mFK+kTu0M2lXFlI6+qBJSus5cG8a6sHAWfM0dCrcLhFDF+qW79m4/7R1Gu6BfF8HFs+hpl8fZ39WTmPGd6NBOOssnIdlfTpU+9v63rom6V+n+bnLR5/c/8CARvQuOj3LX3+yfaeaQ12hPsf7v+A+A+f2hAaEwGyl/PQvPo7I46xia1JCV5Mj57OPHNXKz/7yk7zrPU8wKoVRCVWlPeMW++fTuZS0WjmVqYZIEGhqm2zvUpEjVQLKPArNSjhG8d6yEbqUsz0X1Vm25sMw1HhIkgKyZ+c8RBoHMQgjJyCxqyCWMyO6FIroxES5UqzwTxChUbvOiMr5ClBY3+dHy/11MZknMtJV1jmHeVnXwSTLQiNWs0lBIhFlJZJKA/dyY56TFmURIhXmvxEIcFwz2YH7Dhz70lAUJXvTCZMCRqMpEpeE1QKKktDuMJ5OObhwkQtXrjC9sMvRYs58tmR5OOP45m0WsyOWs1uE+pjYLhGJ1PWS43nkaBWpNaWU9qFLdXwipFos2uA4Kk3dslMJlTOviUJyVj5JZiXTCPlukw21XYbYXSeVn31AP6VmAjYI19qlCY6ck5+eRlg/Gdii0PkPrMu153/+3bae6fXS2SfzFl3ar7T2gy1g3zvBqRA1rpXMPVsiXNcU9MDg5Pfb2nZpdLBnNj7fdv/2SIHT5+ksif+0dl5NwEkmn+cNNs/TVrP0yVGla/MYz6+RON1EQ6KZ2tH9LVfcsf8TY4Tk0J3nwfardPf1AMS0M+taoTu1F9WB8LTWeeOqxdIv0+Z35DjfJLepLV7hLdRwlSr9LV1k6oVRMJOCSfC5kM4AAbNOqAO2gNG4O4KFLFohmkgbwQXAJYaRbDo+5WmvNUsF2oWTgeVo9yIWZa6BNqSSxgK4QYxyHGQRlBSfriZp27zYHIRBlrR1e9FJ5H23CDunYc3duiTV5KqF3vWJPTRpCvokqtmPoN+iGmNPEdP+tfRPGaApaKRpa6JEdi6MePjiAfthSbG8zWS0T+lqvChNXdO2C7zzlKNLjEZjympEEyM3blzn5u1brFZLVsfHNPM5tJZ0qvRK5SN7kwmTFYyqFbJYWebC6PAOKnU0ra6Xk1Ugp31OgGjZWiKmonQpk6G9Vkz/XFJxe6c4SXkFYiLekqo/dof4zoznbhjfWWrlk+p5BsRijYo8r7EMZRLIYH5729yXd2O/zddvk/xOOmBtMvmNBD3dGD4JALCptciqWZS+4lB/nhBS1EQfmuo4ORfne/bwnmEq4/SgU9oQCAzPagdeTwEjW/voQEr3zdqzz6vyP2/L77ppT88RUGeNeb3J2rvnXbLdTn/WWLZ9lrLlJibTAUTufqedBcS2nTHdOMp3M///yaQjlrVNZN7WyQyH1e9N35hePYv2SPLedin+rW0tWREF5sTnMpqy+90gpaFmQKF92KHFISvirWALOFSs2JAjDphYD0jQmGK5c/hTlpuNuUpGH2p9Vl6oCguFFOdNCyEC0aSGZa0EdTQhWNxEzJUbbdOr0qmc75aQnmclMugU6KoAZaZvjkc5IVEK9nKpKA2xH4dIKiEtDGhdp03JbnXeeaK57nNpWvIZD07ZbW4zFcF5B7piuTxkFRtG1RiVBpEdnNtDZIe6nrEKM9omUBQFTb2gWc7RJiDBMuMRGgoRUEGDpYeejBzVZEIrjlu35+az0QGBgZSWB66ZoFtiIycwKmxdVOni4Q2/pR2gBnbU5RoLWZeTjrEtZJrPT279TmOwp/1tHw5O3Cmg8byEZHP4Q+nzxGPvACry/cPr7yRtbleFD8EAa/vwpP29BwyntdNU3ycBAWw6H+bHbMIV2zLd6Up9npzPk34Em99nGjmMtDhPUzsXMhBETrtyg9as/9ycT9bGcDcq/r7JYOn68Lk1PjGQfnMEVP8s1q5df5l1/5MerPXe+vB8gEHP/TOd6x7wPDDR2kwmoLLWzbbNku/J83DOd3hx8wzIcKGGb2mhbcaKJUlYKVxNFCR09wuDlMXOKqyVApKYmHOWwS8J+3jfP7vjW7k2gQhtFJarQIunUSvj6p0wGRfsHpRMx8J4bGGLhXe4wnINlGXBbNWy+LWnWN1KMd1J5RzUmMdrXvUgX/7VX8Lu/g6TScmoKinLkqq0n74ouXXrFv/8+36IT7zvOeYtqLf1bkOWKgPgkq/AduL/fB1oYENaVFCXGZx0ak9JgCFrQapCePgl+zxwZUoxSo4NKSTx5u0VsydusVyGdFgt0Y0ouEq4/yUHTCdCoQ1vfHDKlWpF0S6Zzy05DqwoykhVQVwcg8Du7r6VztXIYj5n0cxZLRtimLNqbhFDoGDEpNyh1ILRaIfoHTuxYn8SOFzMiM5xuIwsFwtCHS2DYDIZtGEzXKq3+CueKMK8iYSQpbwecLoknYQseWifSFUTRMyVNNfSY5+mer8rhvypAoTP71mbDPz5tdMBxFne4lsZ9eC/2cRlbD/v5vOb2YbjuPOY+7Opa59oHslAV9Cz4E0N9fnNesNRbE86dLppx53KTM66f/vYToKAbIY5aztsH5udx+78GDfcqs6XNQY+uJ+edvXXaj/MTOeG93ecM0/iaQPPgCtpUod95Ns275f+ro7ldfdIoiXrNspNWKeDD3QDXK4/fNjD+doLCgbyMNeZVUblWWbSfo3SPWVlXLxeqDHWE/06qkJ53Rse4eGHrjAuPeNRwWRnxHhSUVUlo7KiKKxwUVlWFGs/R5TViKIoeP/738s/+Yf/hmu3rcjNyMObXr7PH/umP8ybv/QrGVVD5u1x3lnSIF8xW9ziu/7yX+OHf+hnWQSrORCx1Lx7ZcHv+dqv5I9++7ci4wudZiEz1gxTm9UxH37/e3nXr/0crnB4LyxW0fIuaEhveycMv2XuB4e3R6vD3dgfqu7jpI2IWQ2ZEuaIQkyJ9x3whldc5q9/93fy8td9RtrgiuBRhVs3nuMv/oVv520///6Urtj6LJ3wpZ/3Rv7H//kvcHBwgbf+6+/h0Z/7OdoWVk1gPquJ0ST4kThaAmOnjEcHKCOOF9cowhKRESEKMQpNiCxXDW1d46ShrmuKOGbSOnQEhTPnv53SsWgaRq4gjCraEGlCZNXU1NH87DX2NLKfJukIR1RYhg25Q5M5yAltVzUs7/zN9TBtyGl2veF6vZBM/rzP/GQA5xmd3lGe3WYKOTmORFM6qpsJ55ACb95z9wwx35bDl7Np0bbKAJyTAIiKFZGB3j9i7X0y2OkBYnYgPM9YOkl3oNHqpd+OyGzclAD6XVCU8wGD/mScpb083bSZNaB5rWTwHusCsW5dz5TwqrtucJ993dPBrU3Wxn5y/HLG+baHSL5ui4YIGYAc2xD0Hmixu2wIsES2h8YO98lJ7eD51/YFBQM9CBoe5PxtV7spXWuLOR1XfOM3fi2ve/Nn8D3/4F/w6AefNlwvlh3MC5ReeOX9F/iu7/oOXvPmN4MvQCxHgKnpHdkPwX5Kpus2HixxEBp540ffxA//0Ft54pnrtAKjwvMZD1/iy7/4DTz4OW9AXGk7Ss0WrjFVHQzgx4EH79unFKEW80HIR9CJ8MxHP8rsiQ8x3j9AY00MtYU0hobQWtGXw5uH3H7iKa7sltReWTZK44ShKduIpiQJsz9o5/YPSBvRnQACPeHsDs/amrBOZxFEHJNCeODimIMLU5CCXL7ZIUyqlvv3KiYiBLGERXk+LowLXnZlynPXPs7T7/4lrn7iJje8qfIlFoynJWNfIQWIVxa1Mls2jOsjqtJRVkoksmxnKRlQJLYRl4CIKxyudLRek5mlofCBUenZGznKssI1M+bLmsLDpMJyQFvaiE5D0qso82RpN0dD9hUlr43DnDyT9kSHBD1pGDoJ5YVl9KeFM57muPZitK2s+i6AR2a+6x3YF32WQ9348uxogtM1NH0Sol6F2zPz3HJI6fAJcYNw92GnAzaiJ3MPdP2uAckBcqWfw55RDv99Opp05yVLyv1Y7/6Z2kkl6wy4b27Q9xD6uAFQyIRqcN+2/rYt6/CWAfBYG9/a/f2aSAf4tr3YnR7Xrx+y/mbb2lYYNJD14M4AO7cXzUzQDTAh2eEUZROBCkwqx2e94gF++3/2Zn7yx9/CYx/6RM8Ys9+ARGLTML/2LH5xZH4CGiAGQgwmxcZIjJa6VkONhpY2BNomM+JI07Y89uEPUtVHPLjjWCmMHXzkQx/nX/ztf8Abv/iXzMwQG6K2tKGmbWpQNUZ+XPNTP/YrrILS6nrimUUb+JmffRc3v+n/xnQ3MCoj3isatHNcC61y+xievLpif0e4vWqpo0nRMaYkQ12yExkwqSxRbF/2rSDhHHReM2jS3okuHzClNxV8+KM3+I4/9xd5xcvvoypdIl7mRHk4b/iVd3wEJVX76zan8gu/9F7+2n/3rez5FVcfv0Y52afwWA6HZeRoHlgsj5nuOHb3SkbFhGo8pS0ibYi0t4+o2znLZk7UyLgc4WLPqF0BWrXUqyOiBKIr8d6xszNBRpGqdbh6zGhScbCAW0cNN44aFquGJgGCGKKFG6bU1iC0be8oWTgLgY1qCXOcpNLFCXTKVgkkoYS13A1nMZxPXTvNafH5AIG7CRU7V9uk63fQmGwdE9l5b/0e83UxUjPsoX+F5xdW2HvAnN3yrs+OuFkwUR2AwgGYuFNiqi07Kg+o44E6OGv2z/UXnGAz+Wpd+3TbsxLm7YSqHgT0d/bS6unzeroDXsIBXXKs/EDZGMzwXTbAkOZ8NAkAab9Gp8KT7vXXxJ81rc0J8LVleZQtXw3MD3e24Q/WPg126y4YgMF8x3CP3i0pedHyDJgz2lAdcnKBCq9cnAivvX+Hhx7a5Rc/cINP3DD1sUlXQumUUQl7Irz5Vfu86pF9yjKk8sCBECzDn0TtSoTm7IAxxYe3bSodXAu3Zy1XbzccrxzLVUBStcGph72xY1QJO1OhqHpzhjnCKceN51cer7m6NDCQ32jklIPKseOVvZFQedjb84zH4NRCGgHqAKtWmK0Cxw1cv620KfHQMiptlFQoxsZtue/XZ+35xtduxhZb6U7zudAsJXdI02znIuBR9irh0liYFoFJZZX+HOZ0WStcnSvzVmiCST/OORyRaSk8slcxWTQsBMZjqASKwuNF2ZmMqCrBuwTqYoPHs7NXsb83oZISbT0hBOr6GNUW7wqqqqANLaNqRFU6itKj2hBkRLl7kdvLmtvzGY16IhXlzohlhA9+5AZPPn2b2aomRKtb4RCaNnSFjEJU2tbCQaOa1b90viO6iuK8vWsIrld1Sk9kjaZZoaST8z4kQJ9c+2SBxd3upW3S690AnOwGkxnl8xmrpWA9JdqgU8WeZOqbfZ22JtvUxtueuV2Nn0F7AvYkr/gIQzCSk6qlngbmqs7FcCt3MLXzMKmSroF+Te8+kD1hwHjsZPe5QNaY2mBtuvviICQ17+87LNtd7cm4rnnTE2PY0rovhwmzBhUt7/T4E/fbh52A2g1meO1gvWQADgcDtXLCCljE21pTEPp8I31L9U6l33+wfQ5dEjxOvoddv8xV785oL3g64v4D2Pgz/ewPTOWhEPjEtSOevnbE9QU96hULc/PeQsfEwRPPHHHt2iE7I2V36rukPk4s0YtPv2vq30Q9q7HeBmHVOpZtREqoNBJDOkAOls7KC3sRlm2JJ9gCioWXFTEwa4RGBglcNVewk5SXAKt770CC0AQruQxqSYXU0aZnmrYg5UVwlvjInCVN1R5tXw1mL5GJU2y524jyumlg8zsbl0uV6KI4SNK+mSgMWDnnCMASZyWbvaNwMb27owlKS9upRHO5YBFFCs8yRCZVwc54hC8sv4NqoIk1R/OaUaPsTQv29/ZR17BathzPG45nt6icZzraoSpGOF/gnAHWQMB5T1CliRFpSlZ1wBWRRpaojihkQjXZJRK5eus6H/34dZ54uuZoqSzahPBoGfkUDZIK6lTe/CRCkG6tFKVyQFSrR6Ca1jWizgBBTPMliUoMQdyL7R9wWnu+YOKsqIaz2t0mZzlvWw8h3E5QT0QEnGMdzuU3scFzRQ1dxzWBOZ/fPLZBmJz03+RIpSFgyhkztUeZpw6hNxHZv3Vg0xeEysxsjaHlZyLkuo9DQVpSRUrVeEdAcN429OXb2rapLzqePKwOutFBtxandb/+ydpZ3YrDpOtsUyt1WjsJ+reJxDlra+wAwan9dWs72CN3uRAveDri/Ltzbp1BJcc0qymveDECmyXnRqFJAKJbmxjxXhh7YZRKk67ALlBPqaQ4cCPOHkv64SRVlo+WcjWqJQtqNBDENrMXcC5JeikBUd70GqOVMQ4BXxr3DwSaLAlqjmE1YuHE8hV4sfdqo9IGxbfmYJiqvVK3gRCFZavMV4P5kojrJIdoqX2zxJkdL7cw9qFTyVkquWHroyyMsOS0wobctHN8xBkw8GKgx2ugTEzRS0p068wK6nNF2HxO1EJHS+Chi1Ned2XKrAk8df0mR8ctoY14X1IVnjY2iFeC1hRVZDIuGI9HiDpiVJwrqNslZdVSliMcBXVdW5GqAtom4ksoyx2iKzheLGldTRMCi1s1x/M5txZzVFp29zxaeKSONMmRsHBQekfh7ajUbUMNLEJfAluxctsOA3sBK1Jl+CAOinBlMNqn/j1tTT4VoOCFMDvk53wqn/9pAUR3OYyToXzPb0w9w1xnwBp7pqESQdeTWBud6wFCd26y5Hli/0hHGN2wj/x738sZY806gzV1Qndn/rZjOZlm5wtV6Et0n2x32gsmpG18ov3I1iTz7obND7d8342nL6HdKQ223pujifq3HnYJ67jLNDbO1vGUcawbYbbtp36tu0/yXpH1v9cHM3Q43AQxd7fpXxSfgW2SUPaqN2lTqQpH6YKpZhWaVHE1l9EVkW7njD2MnEntfey/plK7mQBbJsEoWM59dSlG3PLKNyEQgjktogW4gBMhqOaEAUSFoIE2Kt5ZpiMNFnqICHWuBSPr+ejTi0JKYIRipgt1HZNUMRt32yqrFtooRIEQLEVuGyOqjpbs4Q/r2a0+hQS0c3hLJMQJLg4SH7k+aZCpPM1ko6pEjQaeXEqm4vp1VmLKH6F4EQ7GFRd3Ko5uHXJzpRzNoE5JgGYL5SgurYhQrHG6YuwLLh4UjEvHeDICqVEpKMce7wraWIMukKKkiQ2iBUVV4quSVR0JeNw4EuOC+SJwe7aiCUkbo1CVLZNomRNpFHGOSVlSijPfkhAZVQVtiHhnIaNta4mFQlSc9iloQRCnVhE3T4NoklhsLv7/pZ0mIW8y1E9X2/Ty7pnI8LNuVCdsqaep9z8lmpH8DB0+o9dupgFs4XJ9X715oJfqc46Pbq8pWNZD6XtYA/LS/W8t9n1r6681VbVufIdJqwOnyyz8bKqn76pJou9pzwxpaA9qBv4Um8tuF3Q/JI+r+27wxxlDE5LUOfzshAQ//FzP7nDzziHf7z4cJMA5MZ5uCbeMM98bOrjxfIHrC2cmYHO/b6xa3u4SKT2UKeUvifnl/O8idrAcBWDqaA8UkqVyRaNLjNvQqpVspTsGYeB1ETQOqst6JJqKvlWriGhSfqQaeXYPJpSlYzr2jCqH9wXOOcqyoBqNmS+VG/MnubWoU5Fl7YhAiHDxyj5f8du+mAv37bMzHTMZl5SjCl96nC+pxmOWyxU/8C/+LW9920eZtxCi4MW0F3WbpFFhLe/9ae30sJjTrwOT9M2qb1JtIVZ5UbFU0CJZVWiqxUKUg70JVy6MmY4tmCNnF1zWkedmRxbL7+xZuSjOson8xsdu4tqAOk8dLamTRiwzYylUuxN2pp5xEXnw4gEPXbyP6WTCqmm4efgsy/kxBBA/siRRuqINDaIj5osWkZbV0W2K8Qg/HlOHmmXtaWNJHRrmTUsdBNEJVbGidFY2uo6wWAWWy5pFGxmPKqRwNE1DVXhCdLTBcla20YpaqSdpJq1UsK1RUiFqchxKGRez9/ppBPNTEbr3YkUDnGpb/zT6RmyTtCQ5aQ7YcPJTWndqez7OknBy7Kf6IGz16zFGu/aZDufI2VjT3jEpYyAtpmsGDzHNAevRPwKI+EQsOkPDGvuS9F3n9NpR63TGk1YwGw9MaNsEMNtk2y1zcQY47Jnl6etxAggMLx1ome1vO3uqJ50YzxahUv6PwUVbaanmSI/NgZzeFHBqa6uDVzlz3nII8prGptcEWTOfhBOale4Jd24vSp6Btd2afvbVvGwDxqg0CR1WlWc0HdEeLVnW0Ta8BkO+Ao06KuB1n/lKXvHKh5mOCybTgqqyMrVFUVJ4jy+8Oaf5gqI0ibEo7J8vPL4c8czT1/in//gH+NjjhzSN4Lzy6of3+IN/6Ov4ot/+26lGE4rS40qHK0Y4ZxXwnK8Ioeaf/p2/z9/7u/+aWCvOeQoHEIkh8vmf9zl8y1/5i4z3HzTgkbUbecOKI7Zznrn2cX7mZz7CsvFEItORt7TIoSFsOYRwcrN+MowgAyYVS//cBrWc/anim7aZYNm/By/t8K3f9s184Zd+PlJkcqRA5Ph4xv/3u/8hP/Ij76QhaRPUcOytRcNhCJQqlEXEe0sQ1UY134qoLHTFZAbTElZ1IDSBKxd2uHCwy/33HTAbTQkroW2P8AXUjadeLWnbY5CC6BzH9YpYNxSLFRRCkIImFEBJCAFfFkjh0VppYktbB+qVFcOqCgeVJ0Sl8sKVy/vUTeDazQWL2t4/qjl3Ksbnc/GqoBbSmMloF/Pc58L+tLU1pjH4ebftVKl5eM3zHNunyz/CaPM2EmtOoS8ISOoI3Rmkfm1RTkYk5GgU2KD7p7aNiPJ0T1Tzb0p/pceaqTF2w9tc5+HOYUCgs8nxjixsazsTCAy+Ohd/3crrhtBvMMZNnrP9z/TZ+kC2x++z/ve68mnLcOL6cq+N2Q31HVufYUoX7RKWbW8nAfHJAZ3eXjifgeEfA1DTEYakI4oIdVRzxBOYFI7f/lVfwtd9w9fxvd/3A/zsz/wqJAkZjdRBOF7C/4+9/46Xbcnu+7Dvqqq9u/vEm++L88LMvDczmIAwIAJBACSYKYGUGGxTNEnhI34+tBhMW6JASiZFirJlJlME5Y9EirJNyqIpiuFjJhAkjDAYYHJ8k+flePO9J3b33lW1/Meq3d3n3BP6nHtueAPU5717Tp/eoXZV7Vrpt37r1MoCf/xP/hE+9Gt/Hc7Vk3i5zWaYxsLFMXVAdRq3FC2vZbRxnZ/5uY/z6S9/npE6lit46uGz/MD3fYgnvv37UT/AqSAk2/nVXipyQnXEYw9fxBeXuGET7DZOHbcuX2Lt1edJ50dWUjm3pBTJuaFthqjC+vpNnv/sc5xfCbgxjKKjdkqrE4fcZOyyys4Ftk87Ssrh7rzgznbJM2iejvqI8owri54PfuBZnnjPB1HXK+PqMaVui/e+96f51//yEzRJ8M4ZRa9mvGR8gH4VqMThnDAetYxwrDeZpLDVJgTDIrx1s+XlS0MeOlXzxMOLPHHxDCtLS/glSDkzjtvIWGiaitgIWR0aMpJqUquMhpHsHcmZl2I0zjhfE3NkPGpok9K2GSGwuNgj5ZZRbEk5sbQ84OypBWLcImlCyaQCC3ZuWrUwqxSwalfNcOem6SahrNut5JNuysytj3mLfQW37LPnzf7tEGF/Et6PndfqlLOuP3dH2dhx31nRoTu/6TwT+513++ddHoQda2d6J9l16EHK1VQpzOWddTvsfnRGrO+wgPcbO5n5WbwbswJK91JrDm47ZMCudNtD53DH+pb95eFe5+2tCdx+2BxYh90fdhu+O8Sy5B3nzKuo72BS3KNLNv2z6+12joqD2n3BDEw6qFMEJDrVnTKAWm23EBxPXFzle7/raT71yUf42Ee+QFucYZ2sb1Vom5a1K1fR0RbJjU3YpkhOLZozmiOaWlQzKTbkbKWNU7Kyxm00voGb16+i1y/zjtWa7VbpBeWNly7xj/67v80HvvIZQu0hZ1THpDiCJGgTkRwZN4l//VNfIyVAZIJ3cAJZlC987nn+2h//cc6uQl1lvMvkmCxnPmXaBJdvtnztrZalntDkRJWtNkLOVikvqkXNuojddFB3jvFJCRkrsZun75pqSaMKoBAQbl7d4u/9xH/N9//gz9IbFOyHN0/HaJT41E//DKJC8OYW9z7Qr4TFKhJE8DkbGFOF/qKnbRWNDk8EtfTKlIVGhZEqW1cbrqy1vHlli6cfW+Xc6oDlhSVcMFBmv9cjONjYWiPHFpdaAlbrYThMaFggAtvjlqSRLC1RrUy0YvwPbdrGV8qg16eqhMplhqNNmnHm+lrL5lYmd3GrMvwilh0yifAKk/Vt4cqplWLzc8fTc/Dczfx2dM7K49xnZztJYT9vszCAzFjCB1ijJ9K/IlbLC9LF2dH9Y8DTU2clku19h/enUwg6npH9Ff3drvzpjmEevkl0dOacfdte1rfM9H1Pi7Ws/b0utUNI7uW72mccdmgy7Lzn7Y6g2z0rE2PnAGVj1iGydy9uO3z22Km6VOqT6k5w4c7julRQnewlsJdHbmI9M7Pj7NOTnT2fO738fvAMgIHQcu60VXswI7LpNFmhksS5Rce7z/Z533sf4rPPX+e5F9eKRaZ4DyLKoHKsoHz46dN86H3n8SESXAJN5NiRDiWkS5hPSkrZMgmykrMyHinDVtkYR67cioyiY3M7kQWCBBbrzGIP+pWyMLDwgTrLOqgEaq9sp8BnXk28cqsAALFMgVqU1dqz6DOrfc+ghuUFR79vxBhOHeqEcc5c31CubST6wbG+mdlulRYYq5AKmVHK5j2JGbQgZC10snMq57HM9mqzhCeddyMqBv6bsQacGl5gMQjnFuChRTg1gLoynIET2I6Ol65lrmwLYwUcDHo1lffEdogIBFUWnNB3Sm/gGbeRG5vKGEudSqWbToSed1QOqiDUAkt94cJq4B3nT7Har3DagmQ2hyM2NoeomgIyHLVk7xg2mSYLY3VstZG2lJSNCEkdqcTnXFDqagHxUFVCig3DcWZzM7M5zmxuZ3ISEoYNML6BmXEVIVEAXWrz1CkHkjNpD2Vtv1j7PG2eub8T5XDffPtiUu5nQd2XVEmZruGTUIj3UhpmOQamG/PuTPG9M3wm1504h3P5tDPX/LZzRKcCvQNe74G/0IKVmlxrIth2iqG7NTMyEXze/BlTOYbT2ftmZsnlu/P2QvEfKpUn99g1ZrsF7USGpv0VtkN0hb2bkifVZaWkkM5+36UHzqroxUvTvUrzYlCkU3pht+dpeo204xrjfPh7cM89A7Ljt13IVu0G05ZIP0BAuXxzi6sfe4EbQ0dST5Y8SagIRRB7J7x0aZ3L19eog7Ky6KlqMUa4UhXQyIGkPLSQijKgEhi32dILo9KKM0HvxVLoyAzF0SToiTCMHk/GJSV4tfRFzQwTjNRiyEkM+Bgw13jwUDklamI7gU9iFMYug+aS0VCAixmyOpxTKidEVSPxKbwVSnn4biEcwzVn47y/AmHrVibZAm4mhUUQvCaCM/4EFWUrCzeiJ0VPjRWM8mIpmyPNaClOFJMyalu2xmOa1jIPBpXHqaHxQ1RIJcggQp6t9CMW8xWUFBMjEdJQGLWR6+vXWe3D2SXHct8bQAeIOZFjpK4d4gMugBtDHLeTtZgyxZ43HIdziVBB1Wtox44mOciJZV/T67VISjQOhmkGdKXT7IqYunBAqWookCfV1JRE3hPUtLudtGV9J96ifcMaOw2a2+53X9ox34fjtJ2u/N0KyMF9mAr+aV2W2fNvG78udU/2+X53n3Z7C09a/O/j355mkXTFO7oezdi0e5w7O2yT3eawLu/6/jaljZlh0F3fyO6/7TruKMO1W5BPwjJdsyeS28JGOzuxG5uwt1LfXe+g9VVYY4tSME+758rARIfe7d6RqXogasIkeEEl0zpIKoxQhFTi8IpkI7qpvdB3xsnXKMSsuCz0shWN8dks2FAEa9uButRY5FJqSdnCDUkz4q3MrQuYe1sgS5p6qLIi2fgINFth4yxC02ZSMm3fY4JBClgsiKNyDi9KKCGOpLmw+zlitvw25xxOYBztPOcVnyEoSDJkb5JOeM055gdYbHtZFgJ482sgGGOgVYE0IqEg9hyuKAMi4JMSNBtRlM9UISCaMeIrexYtKRBtO00ZTVmgtefECXmU8M7IedqciVo8LGqcEG2MVJUBKsVB1MxonNkcK5uNY4xwLkPtzA2RUgQ6OueIJqVygSBGJdwmxXuHqjMMsYCIoy0Fj4SaysOS73FuocfC6oA3b2zTjreJ6hgXL5VopkIIzjGWkqso5jWw/hfypqJk7TV5d2q5H9ZOykq+X21/S+nuKR2HhxhmFYD9x2a+Ph50zKwHYmffbsvW2EMRuCttt1e6c2BMnlWZIPphZ3olcBueohRCMyt7/m7MpiHuFCtzXmQ/peAITbPSES/tx54lE6UPZmthHH/97n/eDn19zme7P7UJCtKnY7frUic6mk4nwiB4Bj5RSgHQKkYJa2sLFUgCFRBEqbyR+xS5U6iGO8IdwFmM1ktHB2maUyJbDj9FOKEgzlw+TpFkcR/XJc+rubaSWDFhMuU5IGWHukwWmWi/rig3lH5ogoQjJoFU0odIlj7Z2jOqE7aGCYMpGhWxZjHOgay0XbxwRtOeDu18C2s3MdGO6cHmQqVQLYsYilWEStRIhZw5AYMYH793hcxJlco7xCmibsL82CFupwyTNkY5lRcpAUGp+xWD2kpHV2Mjd8plk8hSPDma8N7Rqx1tzrjsSTGx1SqsZ9qxMghQB8E5LXF88D7jnJBixHuPV6Eq8bi2NRBiQknRxiF4IeWWcRLGGTZ0m7O9iodOOdrTfTYvjWmRknlhyzo4IaeiwGAg2ISlWWa19Zjm3KaOEya4H9b4QQLzKO1e9P2wcdpPcZ4HpHdSfdj72lLczzDxe8je9y+v2qHXPf54m5Fzmzu/KAS7vV5d90w9L23PW8tBX+7buhoJO25GGSXZ2cfdcr/7LLAv4+FRxkp2duG2fqrqLkVo/2vvFzY4LE18J97gcA9k1+5tauGM5VlEgU2EE6TQWHaucRM6FF+ApZkZIZAJwYziyoN7MZY7pBQCQcgixEzJlrdzVZ2B+8pmrBPMgLmvVaTUAgCcJ+dUnCzmXegHoV95qgrqnic7cDiC94gT+ir0bm7hxJDw5b0lKjRZcQsV73n/0yyd6rEwCPQGFXUI+MpThR6uDkQHH/n5r/CxT77CqIHINJOgwB3IMrvg7kyt3TtGba+HU0W8YBgBE2giYsyCnTUiRr4kDkLtcZVRDQcU7wWtLUvAabm+UBgLTTESceSU2SrR0AvecWopsNpP6FrLmjfPQWuozJIKJfTqmsWexznHqGkZqtWsaMaJLXXknpJILA8CXhRxGS+OdmSKjThTYrJ0tSoyokb13GbrWzMyMFqFo187NCtXNhqq7Dk1qFn0ynZrx2SgoTA2OlNOunWq5ftU1rDr8phPqM3jWrxb7cSAqnfQ53vxzDuf096Pu33f28fVvA873O0WQJ7xbO8S0HNc/+jPsJvR9HaFYCYQbgrMnmN1O+fDbdc5vCuTn7ebNLvGb/e+qUyM0QmWc/ZRSp/nGZ/ZY0R3qD07rnfgo+zai/dLZzxs3R33fTxRZWAHO9SuNnFdiR0pnXZJCQ0gE7KbLv8+ZkEkY1zdhmoXlR0uJLP8hZSV/lKPs+dW6deepUVH1fOEEKiCo649wTmcOHwIhOAKX0CFCz3EeVzoMRo1/P/+zSd5/fUthjEgAiv9zI/80Af5kd/xo/QXF6n7NaEXcFJR+YrQ6+OrGhX4F//wH/Pf/fW/z8awMB52AjwLH/r29/Gn/8ZfZHD2cYJzJf7NxFOSxZzzjzz7N/nYp/8y29mRtMEi6Kl4LPzUC7UHufZJbFCqQipBvWyk+igWC49OkAgUimbnMn0vvOfZx/i9f/DfYen0oMxTQjMMxy3/+H/5SW5+/HmkNQCR4sy6FxjnTt2yxxhUiQsLnr7rUXnPmxuRW9uKNsb+mBSGEVyTGCPmNsoR76CuLJ3Ru0xdOfo1Ns/OwhsCRG9KZpsTqhmXO+po64BmjPfAkiFICRZD5uJqwLvArXVlMyYGKVG57oU1BWOcM7lJtrloRwU95XBTFXSGKGa3e3f33x+kdpDQnzDG3QOBDEysq+KBPpIdeVgfD/p+p7JsG/69DZvMPP/MWAjdViAz5u0hVzqROZvJkpmVvTPeDGZc+J3Avq2LOiOFZ4fziEOr3T27H3uNhdz2y94OiRkv5lGbFs1idyh2v/d97zXUUSLv6v4e1zip9+5ElYGDB8863827Cl0BeJQ8dSWlUlI+GvuWQ3j04VV+7W/8QV545TU+9gtfoIlAUQpiVrai0peK3/+H/gA/8rt/lFAP8M4XMiCPcwHnQofwst+lFBnCg/gi8IS23WIt/B/43F//KYat4KXm4orn1//wB/iR3/67yNVqeYhk1LNkuprKqmM++L53s9ATthsTnrkoAh5HXF9jdO1leoPIOCdyHJbUx0SKbSnd27L5zc/yjlWlJ5lh44yER4WUMqnQKXZhjeNamHtt7l06khZPSmfFt6KGbcBNUD2ai/KWDRj5vne/g9/x7/4o9epKmeuMqifFIW+9/gaf/8TzpW4EFg5yjgl6GKjF8fBKj3c9tMCqawg4zq/UaAiojkmqrA+FNsGmU241kV5IDLyy3Hd4ydQh0auEQe3peSMJWh7USEyIqCmBtIzaRCUO74oyKg4ppYcXKuh5bzUWQmaxv0CfhoUAxBZXKyPMUxUpepzaklCkZHjszj2XHWO8Y7wfQMG/VzvMfT7vefuRGB01vGUf5jrlLrSjz9nusJwZMUeb/4ngRWbi7x16aGfxq3naQe7pg+d15t57utbN0lYshDp7ys6rdjVWMtNqi1P+wwNvfVibx7Owu+8n+Crut87na1Nlc/f17qbyeX9Ih4o2YBHawmpXvkmYFzoWa2vBCU8/coZ/79/9QT7/jW/wuY8/Zyj0idUtNMmRnLJQJU6t1hakzi2aR+TYmJBNkZwjmhOaTfDm2KJta7wDKZIaZTzaRt54kadOeTZHitOWEB0/909+Bl1fZ7BY4zSiubFa920shYsiuY38q198lc1NDBPQae0oEeW5L73CT/zv/ywXTwfqKhF8QjURo9VFSK25nV+/MWalgjYkttUxzplRVsZiJYHFW1rhJFY9yVXdu+0p+Pc9uJuf6VEG6O/ScPL0wAIkTFn5xY98lv/qj/xJzp3q4ciTV7rNmZ/59NdokxVb6qqrCdCrhOwdOVl56O97zwWeOdNj/dZ1mjbTl8yZvpCXgrn028SWCDih7x2LAZZ7wpmlisUaghr4sO4LdRAW6pqVwYCmScTYknPq3DRkdXgnVAFSq9TOkP9LoY8QyQJbQ6EKSi94YowEMpVPuEHg6lgZJTWVUIyy2osVwzJFzcIHs56AvV7kk/AK3A+F4n72s4O27u8Qvwu7+452p563O9nQd7nkgS4uPq/AOA5uYu8+7CHhi3I88QLsEROfzf2f6i8yOWlm6zn+cxzJXbT35+MI4N0egHmVgp179M6VfVBq62F9OUo7MZ6BCTvWIbNQli3ixTbnshqKbU1X7EJFCChneoFHl4V3PXWRa8OWz3ztCsNod/MFtbXcF5Yk896HFvngex+m79VIfaQT/qncy2oZqGZyUjRmNClNNt7/cQvDNnNjq2HYOiOWSVjRpEpZqIXgMoOBo1c5QjAbMAhUztHg+fxrDc9fs2tmNYXXC5zpeVZCZrUXGFTC0kBY7AshiCkTqSUpjCJst8LmGNY2E8MxNAjjrDTZuAayg3GCUZLCcMeOl+fQBSgyHe89v7YXM5MndR+idud0JUotJ9pjYYLFSljteQbeAJ3OmXBUcdwYRdabwsg3o/07B20LUeC7Hl3kt377Qwz8gMtXrjGKiY12yFgz223m0q2Ga+uZkVqWyWIQlitY6VcsLwaqStFkWQmVz/SDZ3lQU+MsbbQdM44to3FmmIStNqHOMYpqSpUaUHKxDlSirA/hrRsto6z0neNsX7iwpHjnaDTwtRstL65lhtkZzlvVimWRp6EhMSWqs+L2soQP2yBOsp2UVbHX+rpXCskUgLXLq7tL4TW+kvnAU/cyTDPvvY7C2XASoMaTasUxPpEH+3o/dPcH2Tmhc+hzd3vtHfSOnoRX76TwNvu12Ws3eniK4Yl5Bo4WX+ncIDuV2w5xDhSeAaFXZbYa+NI33uBW4xinGZ537KUPApUX3lobsv6Zl+kFWFxwDHpGCyswIc/pUuEAYybEUORtspz4lIU2mjAMvnMhQ0qZjdYQ89F7Rqr41kIclbNKe43CdktJF5zex2Lrhr6PObLVKq7yOPWEBORSQElgpF0tMKOk8E6p1HL5xStRzHsyTl1satc8zI16VfbVvku4oERVSj2BPFHUUANmejdT6lmEKDDOGXUWPkgyRdNP5lqKTSeKE0/PZ1Z6wpNnBizkASF4enVNo1vUqlRUSBpybhBwObPZZDQrqz1heeDwNMTRCIkViwvLCGOWegOW+gOCb4ijhhQzTaO0SYgqVnVQHFkcg8owKVUlVN6xMvBU4qhCImlmuxW2thPDpCQXGFSecStstqacGbbSeCACUDtHm/Jkb+tUXe1igJMVcXv1vJNuJ71Z7odu7n6/LcXtkM+HXX/ffpzgUQ9q22us5hFA91oJuA3oNvNv9/2O1m0hO2IBx7Ps77YCOjve+4ZV7xD4epxj71Ya8X3hGZgogbv6KzMbaCWmDIRSOS+qxWk7IiUt6AonQuWEfrC0wbbk7tOAekfwJbZdrh3U4UrBgK4yVcrZytEmgzLirfKhhcgNiS6CbfzBk0TwkmxzF6EhExTaJEURmJk4TLB7oA4eL5197YhZSJom9BExWzncqJlWLc3RgO5m/3uwPk3eJNOoj6xhzrEZW5pnYeNzStCO+72kOGEpe7UXvFDSJwHJ5fvpjFuu/VThy6p4caZIeOXcoGLBZWJUCA05NMRmjJMaVWGhVyJoIiw1njYpdWVcAq7wR1icP+O8EHxCXKJtMk3b0kQYNZlRzLSUctDOwF/ZQeXEeCC8FP4IR+0ci154aHWRJsP1G2vgjJNgazuxNs4kLWGPkmLlPfRr8NEIqlJZr7mbczEFboLJuMdB77tticDBm9A87uuTCz/Mf529kNt30ytzLHzECRx3N9ph975tzcnM9rPbE3CM6E43V3spnCcxh3equN6P9kBkExylGWObTKhmJ38HggiVF4K3ojgpm9t1HGeFrLkBBXPv1t5CDNqxyWXIIlYpz5USHQJZM0G9XUGtolfOBgfMzpGyM7ZCASTviml5Ml01OmO5A0GLgB4ns4KndcqnlqGIWfeqoFmJMTNujJmvK1uRYrbUxgxNEqvcByBSyHcgZlMU4i56rnlciDs2vE7j3ccd6TSjYumFTjCWxfK9E7u3dw5frGJVo3XGOVKWScXD3HlmSlqowiSV1AMDL6z2PUsBcJnBwjL1eIiQGbUNkiPZeaqQWQyZvhOyBDKRynm8syXsHMQ4wmVhmDPj4dBCQG3LOFrFw4h5K1SMZAgB503hMWUw06TAZjNGpUcv9Ejb2ywOerjlmuCUNibWhy3DNEVPdKh2RAjeuBZEDWCYi7fLih0bGDQBqh0j4d0X0CfVjpRvPcdx+3kODjtnv+OnqG04MG1tjvZ2And27UHs895hypO7/lFCJ8e9fgf2nP18v9rdnN97T0dc4tF0VqZRshk8Tc16DB7qYJ0TTGEwHoAOkGfHmbsapFC9aoGwpmwld9s24wsZdpfrLSjthNcZUC0ERRYjT1lAPRlHq1bDADE64brnjaa2FktLdCBeEG+ZCS4L9cYQWRuDGiCtODCM38BXPP2eR1k6VdPrBXr9fimnXBN6Hu8qsni+8KWv8ZlPvMzG2NEUszqV6yWMRTHOePkPi0PuGX/cY25UFZzRCLuiuKCKwxuqU7tztbwgDvEeindDSigjpYy6UmfCFaXDOaPmLX1JACosBM+FxcCgrowJMHtyOyCOtmhGmzStZ6wR75ReCNSVt3oFWdGcaJJnHI32tBKBGAnBk9PYVoZmdEJHnUl0PANCLnwCKdsm2rawlWy8HWNWFhZxOeG1ZbXvGLct2+pZi1N36DT0oaUglkeyECgFnop3yamS1GpdlGzKfb0DR7Ec53UZntQmcpjrdK/73Ek631GO39mXoz/vbtzTSY7bUb0BxxnHt3V7QB/tOIL/TsJzJ4VFmO3HvO2+kA5B5+AudL0qUwHfbTBZiM68AqgB0SyMYHa02gXIGKI7Zjh1apGLj50mVNDvB3p1oAqeylfGOFcIfuq6h/MVPnhcqKhCD/E1rgrE3PILP/tRvvLcWzSNkPGsDOC3/44f5Pt+/W+i3+9R+UDo1XgfqOoaFwIuGIfBT/6zf85f+8t/l3YLK9db+tsm5d3PvJM/8xP/Z0499A4TTs7hnBHniBOceBT46M//Cz75+/5Tbo5GRO1AfjoFpJUiG10a4HG01W4mdNenSSmAyVzJpBSvETpZuicKks2arjwsLQS+74e/nXe/9wlQIxVChTZnPvZLn+W5L7w6Ax4sCppXTp1aZqGnpGRIf02ZZtjgk9ITpaWlTdCmCl+UsqpyaGMqZBTwwTMct4gPpOxo2q7Isj1DKBwE3nuGMRGTGu00Dk2FqConVKxIUsqZxbpH1ozvVbRtY9TLgwFrNyJrQ/MmmSjvXvyu/LagWUmlnHFXXTIXZc4YmQ9OAzvKZnJU9/NJWTbzXONu5ELP14fuGfffWPca471wTyetSO2+50HtqJv6t7SycEi7lwDWw9rd6Mu9eL57ogzsiOPM/L0T6gYcLN+ooFGt0mBrMfyHTi/wW37bD3KzGfGT//QXGI3yBGyowFar9LPjD//B/w3/1u//31JVtRUnch4RV4Su1SmwGLgza851ty39EyGnIatnB3zus3+XraHSSObcUp/v/64P8AO//vsQV5PVCGvMpZDQFFGNZM288/Hz9INjm4QTIXirLpSBtevXufa1z+O330JyAzFCtvNSTuRoKYmvffyTnKsjzcCxHa20cVuq97nOJpXigubgTfewDUjEBnHiZcBi+nkShjEcRspakPHdaAkdEG5RhEcvLvEn/qM/zPt+1a9GJViKnQo5t/zj/+n/xZ/78b9KsxkRJ4XeONOrKx66eJE0usrNzRHoLfrDlkTCV4G+9kh5RNTA1S1le9hw/nTFqX4FmkhZ6TlvhIglYyRrwnlLG+wFTy9UeA+qCSUTnG36Th0xChZXElKGpo1EoM2ONIxWZ2IYcZIZJM9WG7m6kRlFZ6EipvSwiIV31kcJzdrlxZT/pdBol/tJ5zH61tm85wG2PUgb9u52kJJ0EtkGRTeevejc19tLKbhfgv9B6MN+7V5nttyNPtzPvt87noFOcNG9eOZW7sRQZzV2gjOpBVqdCOdOL/OjP/I9xMXAL/38pxiOtstpOgEXNkkZbdyk0ptIVDRFYk5obNFmjDYNOY5JTUOODbGJxCaSYkvbNLRty3jYMh43vPTRj/H4CmzVgZgTVRrzD/7W3+Nrn/ykgQ/VziNFNFnqIpppUuKzz68x3M5WC6GAHy1S4Xj51cv8Fz/+X3F6UVjoqeEc1EopG1BQGLfKrS2lTnC6FmpRxl4YRUvNi2oWaFvCKydi6Qk7N6eiHUyUN6UA3yj3xMpxaraCPyKM1hu+/HM/zxk/xAdnngGs2NBrX/o0mhJBmAhIUUdulOeff4VF33Cqp7y13hK5QUqKl0CQRCVCm4U2JoYp4zYb6uDo1wGNiZQTorBQB2JqrbKjMyVsUAcqAc3JYvVq68wHRxwnMh5Vq4A4bg0AGsV+NimT2szyYiBUnvVR5upG4tZIaTpFwEaj4FysemVMtna1S7tgSnVq+msX6Lrzdq83jju534MmOPZzw++rEJT/3R08xjynzptq+CDiAx6k9iAoBvO0+41BmG0nxjNwpJuKWemqqcSegcLX3tX3VhG8Qj94Hl1yfPCZ8+RBj5/99MtsDQudMWIlcGthQZQnTld88NmL1D5R+YR3hQyo/I8mcsqlJoEJX7Dc/ZitJkHMwtYosx2VUSukaMCE2jlqZ/fr9wTnLW0x+EJ3i8Xzn7+R+ea1bMA/1CiQVVnuCYteWQjQD8rSwNOvILgiNtQKEY3bzCgpW62wNVKGLTRqwt9SH6FVxzAJ7Y6sgunYdm23Fj+/u7HzOBiznxcKRsEV4VfqDwjUDgbeMfDwyCl44qGK5QFWrEisr197dZuXryrjbFiBrJavXyn0sWJCKxVk79hsE1sjaFtlqVe4BGoIwTNUxUnmoZWa00ueygtNa2vIOfOZNLHBOaOeroJlOqQ2AsowWpEpVzm2x5FhA7kgWMetgTajGjviQlUxqCy7ICNc32q5sZ0ZZ8fNcWYzSsnqMO9Sz1vtClVoohrgs8scoFOspoGF2fnYPSuTAM0xN7LDNsLjbj4PwsY6D5DrIJDhPNeefGYaBoIyLyITBXn2fnejHWWeDu7Dzj2inMGxcBVvE8/AcZWlt4sSMU+7bzwDkw5MBPphB84Ous5YTdacWBnjflDalPjS1y6xrTAeTxe2FuCAA6ogrG0lPvvcm9QBFgfCQk8IkqEg4l2p9jOtk1C8EghtUQhiSsa250C8lgqJBj4cl+qFGUPbOzI+GQ9AANpsGQVFn5l4QLrYpXOWZjYGggo5gc9ashyUnKDRgjbHhIwTqOhCGkoSy5SYpmHMZzEcTQC4guew63RloDsEvGCMfUGUykFwVglyoxEu32jZ6itV5ayEMY5hM9PL4i/twjytKuME29kyO8ZJSUkgCGMvDCqH6wsrizV1zKxtjVgftvRqxyAozgtNTIX2ORuOwweCE3xRSMajhlAFqtqTk6NNLT44GCUDZaqQC6DQI9ShwjvHuIlsjSLjFjajPYt3MqVY7YS7FIXAATnj3FSBy1owLt06pwuVFa/BHLNxlI1td4pc104yzepBuNY8KW0n0Q+Z+beLpynHEaN3t91tT8HbQUjuTI3+1vFiHaXdyTzdt9RC3fFGTQl6YJoxEBx4Zy70LcXSuXInzLvFL0g2JHlWGCUliqLRk7zxFZhgMwHcRbq7NERxVuQoFas7lnoJdORE0oEcFXVaKiCaYEwF2Kjl5ygpbZraEwI4cQXIpkUZMRd5KoK/I0oTxPgOSppjpDAYCnRkwEGki45gKoybS5gcq5Vxc2pKjCugLF84/GtnBE5ddcmu6JSKQ6XE1NXghp1A7Cqyl7IHeGeKzbC1dEmvMOgJZ1YqnLdiQTWZ7GCUW6ras5g9MSbWtsfIoKLvreAUOCQ7HKGbPlIyJSV7zzhnfKjQXLIiSmcUMaIpLbUkEmyPGjLCuIQPFAfeI6V08873TCf3q71YISxRqqLkWBir8A3o5IxJVcv7se0cF218v9uBHAbzGiEHtN3ehp2KwO7fd/brJAXIyc/LyYWm7iYHw4MghB+EPpxEO84aOnFlYK8Xci+X3tRCmp4JpUyuGndAVSz5lCGq0CSYspIDaspBxwvQVe4yl7uSJlSxVmc+5cKm18G7FMhKVkdKeeIK75DIBpozvvnkBFTwXZpftlQ5J0IQaDUzLjF9Ayd2Efby8qgW4Z3NGs2GFUAgJ0vD0yI02qw02dFomgpQKFbslHyppO4fKRY5d6iAXMCC5gkQJwV4l0vJaC0ljTsYoZnHXT9dNu+LOpsDHIayB7y4CVdBk7JlKwhUARYWKvo9x2jUMNZMdoKIJ6qy0C+lqn0pNa0QslLXHkSJTSblSOUd45jIZaxVYdZJb14EwAk5CjGZIjCOSowGSI3JkUoZ6+AhIFTeU6gUykx24ryspRK28tn4KXreOCmclnRXEXIBF6bO1zwHiPAoG9Q84aB7GSY4bqjqqK3bE04qTfFX2v7tXnghTvr6D1Js/kFt98wzMDu1U2KQ2e9NCXDltfbOeO+hAwhi6G+6al9Tuyp3rvhyLApNqyWjIJvgyVPQ12zTIm4TRuSTihBEnaWgAS5nnFe886gXsgiUincKSKnhowougOa2xBfNKpyA7wTOP3SO/kqfOljqoTirldulGap43rp0hRdfvMaoNZKaDsCXFCJKk+xZdB/rcl5U9EFtqnIpIs48IzotHdsh4ZWu+KSFPWKEsVNabNyTi7TJCIlELJOAUnK6zcZH4Ep9hl4wjEIzikiacjS0UWnHMGoalnrCYuUIzpgjm9ZCBb7gOdrU0iZHTmWNiFV7VISk0ciAkoUGLGXSk3KiKQoBAt4b4VPHmhhcpnYQvCmalXc4YlEDTOnJajgBVeOA6LARuSPB6iaqi2d2SsQJ+waOwzlx1GufpGv/KEj9wzb0u5K+2F37xK/89mj3S4A++N6W26/7dlcs712YoNvZmeIEdreOMrgzN1ORoBkxt3lxs3fjL+VaWQAc4qFecPR6nn4V6PU8VR0IIVAXwhrjG6io6pq6VxfegRpf93BVn1BVfPWLX+KLn3mRUQOtwGIQfuNv+G6+79f/IL1+TVV5vA/44AlVoK5qQtUDcXzqE5/kr//Xf5e19UTqhAJm0T/26EX+zF/+czz8zicoOgBOBBFnvzuHIjz3+c/yJ/6Dv8D119cKct0Eiykf0lURPlItdy2xbQ45RwpAqlNgBPNcmOJhAjpFm4uu1oMX6AVTdJ55zzusaBDTF6X5+mvceuk6RCYKBaqYd9+qNTpn9MI+ZxZDIOVIqD1V5Ql4xk1kOynj5FnuB3pVxiUlR2gaJdRS+uxoGsMQ9Ho9UorGkVDIo1JKJAzkN4kYKJZGqDDoO5bEs9UkttuEINQOKg8ajDnSlDiQnFF1xe0vDBNIKcFt1MOdImhrWIunqZvPO3VrH9TuHDeyfzuKUjCPR2per9W9EEzzWpAnYWnudf5xPBv3WmDfDav9bl7/l0u703VwT5SB3XE4mbi4i9VZggaJLo9ep1Y5Fjo4c2rA0plTvPbaZcYj20g7QFvMME7Ke595ih/7E/8+Fx+/yOLCAsF7QlWV/z0+OJz3iHM4HxBfmfu7SGYpwLmP/8y/4j/+Q/8l17fGtKos1TU/+EPfw2/9X/1OxGOBZRWMGrBFUzIlJWekucWpxcBwUxlnCweYwHEMN7ZYf/VlVgcCuSWnMSm2pBhJbUvbtsSUeOlrX+dMNWa8UrMxahmp0qowjhaH95QCQDvQs3NOxt562M65kikpjmD36Xj2O6VOSsiFaFULz5/q8/v+4O/mt/+B34MLUgLrZvH/1D/6Z/z5P/PfMNoozIzlwipC1fP4JqLJagx4HG2TaFsY5kzVwsAZq2GTLNTQ95HaO1MmCmp/nCxDwVWenAWNyQoGFQ4B5wLjJiFOrEJlBBFb/rnsPSEIA++pvWexCmyOWluDqohTWjGlpfJWxCorSAF8RlW2G4s/5LKWpWBUVHIZ2zLGndtoDmXgOKRCe51/vzfYefP4H5T+dm13L+52v466of9KiuHx23G8Uvsprrv//nack/tTqGjSug3RzP3O6lUMWa+YcrC6EPjV3/UMv/qHf4if+Jv/E6+8cmOys+acaZMwjErVCzz18HnOnl81CF+KkFrSqKVpW2JsyDmSYkNsW1JMxNgQ24bRaETbtIyGI772xa9wYTFRaU3bJioS//T/+f/hhc99Cu8LvC9nco7EpoFcyINS4vk3h6xdG5swnfHjqwqXrqzzV/7zv8aZJU+vytRVefpspZW7Z1kfZ0LMLAcQr/RUGaqFUjqq5VgolLtxPCzVajL+0o3r4ZZJxyVgoMBu7hxIBpGCmbA/jZuWT3/kI6xUhvR3haZZFb7wi58hti0dimKmgDKjUUtMVv1wq1G2RhHNFrIRhcopA5cIzuo05JzYbhyrA6GqOlxHmIRRtHhjmpiQZAWmFNBkZNQ5KW2CGBWRTKNihYsmWS2KaiIAi0FJEoxLwhnmI+VM5SyLQh0ghUvCuKWMY6DEjnLBpnTPbd4c04RNCWby7ez4zxNn32/TmdeNfj/jpztIyI6Ih5ht9/IZjspLcC/b3XJVn4Tn4lulTdeqzozLfDwv90JB2GsNTFOX51uj941nwAbVKsTNuv1Lr8px0HewMhAeXq5YPrXAV15dZ30IpTwgYNXiFis4WzvefXGJs6ueqsr0neLVBFruGGc6C166vlgVuzZadkHTKltNZlth2EA7trzzykHP26bfCxCqUglRS3GagmF4fcPx9euJ4Uy82Cks94SVSljyjtplej1l0LdUNYoAUrUaBgnHsM1sDWF7ZMx2I4VRLtTLCsNYgHfdQM2AKpmML5Nx7ETwJHTN/puIlvCMiqVP1t6RUgHMlewJEfBauAaCY1ApZ3twpufoYameUkIh10eJ1zdhnIqyp8Zy2BPL6x+mru9WwKhXCUuDwIrzhAAVivOg4hiPG6oA55aF0wOr56ASGMURPgipTcSYaFubc+eKUpJtPLJaxkgsn0cZRq15XPpe6VeFTyIny/ooympSpcHmZ72FW9GqXLrZF11t/YyyGDmUWiphx4qBlFnQaYBAZ+Zp7/dk781knnz7va61+/w7bUfxRMx+t1e/5wkXzM+VcfyN9yCLcb9x3++Yo97rTttJCZypICnXPcFrPyhtPs/A7DGdW3V2Fz2Je9x5O0gZgPvEM7BX2/3izAIIDUVfOt8dX0RXUEflFMnKtY2GS2sN2yM7vqu2p2J58JW3ioSvXd3k8vVMvycsDBzB5xKxBcERutK5MsEfW3y3kBC1yaxFqwOgqAORTBbHZjY7b6FyOMk4VbwIARNiWWEzZ0sly+YNQCxTwKFUTlEyY1VQh6SM1wzqJmmOKtCmbEWZnGUlCI7KBssyHkqOmjmfuzCLpRpOxhgmQPVu+c0u307T3XMzEruiQ8zVLZZS2BXWEbFsDydm0XsMILipAipUZKpoGRwqmc1k1QKRqYC0bAg1KmFRcnbU3nF2oc+ZHiz3E0u10T17sfoDGRi2ge1RaxkdwcI/KY7xLuHUk0olyVzSRJ14fClslBKkLCXsYWDTmA046JwRSLni0jdDXgkFXAqKV0eBhtKTTFULtThyGydFpNQbNsOlAh4VSlqh5aWomLfLZq3z7RxcXW8/q3S/Yx9UUNNeAvOkheFJpBjeds09PBi3ZUc9AB4CONmwQSf2gNsA398KbS5lbTIKbiIv5lEC7le7k7k/tjKwu8LXfu3gOMw0jrr7eEFwYmlsmmGsRhObdJa4pZATiaVxBVESiaYocJIygUI4VI7NonhANJV7mODIqlZaGCU7Q/GLmCdAkbKxG0LeuAa6IsqGb1AKFW02q8dJB4C0xSQY4r1TGrI6AxgmhWxuY00W9ojF5Z0Fc1/bf6bAKMWTUK4qFEtzZpzVUh7t152uollhsVestjtGyhyI2GcXBJLNl8M4IBxK5R3eKaUCMU6VIIrrcjfKu2SUzFOt2okSghjwoM1Aph8qHBg9tIPGZXpBqb0nVK6QQTkSZvGPxy0iFqH3wUIAilUqLB0niyPnNCmDndUUqlbtc4sQySw4oxSu1MbZKk7aOutqQAjGqVCJkj2IF4IzzEIuyoUTCFmIvvBXqBWpSiqkmWREJ5S+3rkQuZ8ArDtxgR5HgB6aUcD+3pQ7afMI2bcrf8NhTWBqDP8ybB3zKjB1DszZ7hYnwzzXPmoY696XMIaJG3vy68wIdx8dxkDoxQYgZmjTNN3LDjOh60RKKqJZ/aIZUcshT2rWfS7EPqom6B1WdliKOzers/hzsVKtsA3Fp2B96srupsyk9HKTTRDHnMlZiNG8DBaRMJfwRDAhlmsOxJShKYwHajS5WpQMS8XrwgOKZiNUass4FLzi9CXtxvYI8asdc7LPeR1HgmDkQs58hYhYTQLvxKiYxTwImtQkYiiCroRJTHOxsbAiRXa8L8I1ZBgnSx0keaLPSO3pa4UPhWrYCUlTqfYoNDFa4R9Rgi8YhiRE9ZYmmBUVSyvMOZeMAatzEDUbwZAqsTxn7T2VZIJpikVR1QmTpBfBZVtHdbDsBBWQnHCdNSrFK+AyXqWkFzp8USZjV8BImMz5t0o7DvDtuO0omRInDbCb5x2b934nqTTMKiIn9czfWirN0drUS915BOY1f+9vOwjkeFibWxnIRT/q2lGHpCPe2fPv2FdmiWmxwEwREApyPhmqXbRM1EQaWgZAJQ40E7XkiyUQb2leYlIdJ0pLIriOE7CU7C2u95Q7KmBBShGbLs8fN2UETNlIdSxf3Nz7yTnUCzQZ6dwCat9B5yqG1dPLLKz0S168eQScF5yzdEV1nhsbW7zwzUtsD81ytRCGBQImlqajCFUmK/c41tDEEyNTtOzE8yLd5mLPbaIrle9K3keGLsexS6WLqSOCKqOsIGpBjJSnY4Yac2QlQhKlVUVyoqo8o5RpkjJuIDogOdrc4JJHnUecJ6owaoWgjpRacko4gpW9FiEWXEXK5oGJSWnKPBjjpCIeFipHv/L0nKCl2mTGwjTOlYwAKxmJTyCTcbLreoxYKWUjwUpdxkTBxbgS1vJiCkKXbtjpSNONZvpenchmfgDW4CTbnSCpd6O0j9oOAs0e97qHhVoeFODg7nYnfdoLC9E99YMs/O5W2+0ImSR2qzAb1rvX2RzzhgeP0+ZWBmwojh6Pm41N7z5z+mAdoG/qIegyC9ps1lYsFYNVp0eZYViAYSip0MUG73GV0F+qWVkaIE6oejXBOYLzVCFQFX4B5x3eB+pejVQVMQovvfwSX/7SGzRjE1gi0K+F9z37GL/2t/4QC0sD+nWFD56q7hHqQOhV+KrixRdf52/8xN/jrctbxX1vyk1KQn9hkf/kz/0p3vld78cVN7PzFc75QjpUIeJ4841X+aM/9uN88YtvliqFeZqKqa7kqc+Ooy3dbi3MuynvpUFqGWAVs6wFY81rZVqyVwBXNKMois/Qr6A38Dz8jov0+h7vPL7wL7955SZrb66RWrO0FUEKu1+9WHNmMXD12jbDnKArAJUzw7GRByWneKdUpRph59FpcqRVwcXihUiKMVKYkM7ZCIBiMi9Lm6HRTEpSMBqw6IXFSqidAUWdqpW+zjrxDkUUfFG8skO94kNguB0hOVzGUmELU2LqMCM4y1SASfqheUqyhZro3o3536e92lSRuH1+9wsF3e82uzZPWhHovruTTfpebPDzZo4c1+N3x/3b4x4PEgblbrbbnlJm8oJ2eWMflHanSv895RnYqW3tDL6Y9SmT46IqKVLQ2uYlcF5tVy1WmWqJ1WfYHGdWFgI/9AMf5P0ffJaV08tcfPwhHnvmnfiqIlQ9vA84V+FcZeRDPuBcwPsKCcEyAlLkYz/7T/ljf+gvc2U4JomhzB8bOH7dD3yQP/jH/hjSG2A56lpMY0VzRjXzrvd8g3/4P/8zrry5Xcxp81QIkNsWN9qkn4fENpLTmHFsyTGRU6RtxsSm4a033+I0Wzyy7NlsI20UGlXaJMU7MWNxi0xGtVsL8y6Eg15uFUP8S0mVSwjjbNS60ziNeT+CwFNPnua3/MiH+R3/3u9m9bGHcL7Cux5ZlY995Of583/qrzC+ujWJmvjC8f+bf/sP84Hv/y7+0z/739BeukXljHLYiTdFqOQYOi8kr7gckbZY39m8VZqNZEgKlXQbC82zmoOmqzvRqCkEaouHnvP0A/SCmG1fnEDZABmFNdFZvQHnEE1oELKrefjpd/Pc86/xxps3KbWwSJooLMwm9MUwA2iXVdBtJOV9kJ18DpM344gbzO7Z7s6exYbcrU3rJIQ6vP0EzV4C+m6l4h11Yz/y+jmCwvh2m6fjth1eAXNtdp+43W/wrdHuT6EiVXAOVXO35GxIeANtQbe95a6T4lhd9iwvCtdvtmw3xVVb5iRmZbOBxZ5wdrHiXJ1h6zrrL17j1VuvmPvYeep+DxEIzpNTApfxfkCo+9QLfZx3XH7lRT7z8x/nYt3iTgWaNjNwjr4kPvmzH2MgP464iHhHCB6NkZjG5LYhtZGXLo+4/MKVKdCwYNkScHNjzF/6s3+dR07VBJcJIYMkXFZyUmLKjGNmq1E2x4nVSghZaRCGqoxQxhRXc56Oz52225DSnVRUc2mbV8zCMF1RRvN5KMEptRcGKdMfbnH5i7+E3rrIYOkUvcUzhF4Pt36F0BrKXwQUA1XUHm6++irpyTMsaWYoUHuovTMciCRcYfuzwlGBnCOqwjiZdU3OaAGWdi9oKgqLoqTcZRYU3Ek2jElwRlrUD2IFhlCQTM6BLg1DJBBTxHlX4k2ZyjseffpJnnjf+7g+Ul5841bJUDGFMOnM/bVgXCjekDKsUtIz9wvKHjn/ntu3qONamEdpd0PQHaWPByH85+vb7H2OF07Y6967+3Q/MATztgfJW/T2aG8PJeA4uIG5eQa8uFLt784GY3K2sw3RFTf4bYMsuYA3hJ5TVvtKCI61YWacpFSaKX3zluu+6JRHlgPLHpxk6lqoS1qfAIMAlbO0OItbq7nynfHR5yysjzIbw8yGKm2Eti2Jjk4JYmVzB06QoKVYT3ctEJQbY8/zNxOj3KHJjbWu72DghL4Eag+9ntLvWb+leDg0daERoU0tTaMMR8Z5P0YZt9CoZ5inFfH2itfOk4O+f553Rxecy99KFT7nGCfTkEWswqAXoVfBQhCWvbJcwSNnHI+ccix6R10HtAp85fUhz73ZsJ26GHnJJhDlFJ7zi/D6Vma9hX5fWKyFPp5ez9ZFSwKBnq/RnGk10bZKk3IR9ib0daY0cvezqw7YeQmyCpVz9INnIWRWe5kFZ6mT4g0r0saEiiNg3gHrsdIbVDz+zDM88W0fJEmfj33qy/z0z30G721dpJRpitJh99UCQi0hF90ZazzwHTmCoNXZ3w54m/cMCx0j9Lf7evO0/UJX9ysNciqkO57T4wvso4IK53k/77TdTY6DXw6egd1NMW+j2apTb+yD3ro5fmB4BvZqU6WAKR/sju9MEQgowRmX/LhV45DPXTzbmhfoO0N8b4xghFIHRw+P94qXhPewniz/21LiBMmuAELMpRujIc6jBESSpZBhLnMprt5hdjQO6lLS1pSksuU7z1CsIE7CYs1mrYlx+TsQicbsh/nJU2px3p4kO9NxhC7v3VIbXbKqeVKBRmU8TYU/cHM6LKa6xx/L4Ge6vFrpYhyS8aXfLkPlrXhPX4SajDrHNsq6ehbcAtsu47Iw3BIuDS1lE6EA58yObR2sx0QY1SxXiY0itSUKBDUr3gVyssqEJvgdbYzEqGh2BVBplj+UlErtwHmlDgCm26iYAhgk03NQB/MSkLPxSWSBZIA/JSFq1RCTF5YWl3j6297Lo888A86ztnaLr7/0ImNNhJyocPQqkFiUKJ2CRpnBc5iiYnOUxVIudwjEsl5mjz+sTb0Bhx97+7n3b1O731apKQTzkanNg0+4mxiG+90eVNDkvWsl/rcrvP0gt7uWTXASTW77TWeAhZ3o6cQnOHIhtXGkJCSMLMYsV91hPQXnCJJxkkoOt8W8LWFvqtVpVnCQs5ug2jsSIy1AvSzOYsYOsjeAnADqSpmZjjVRjRwoi+Wkx6g0ydz4qbP2sUnxKgTMmlZnKYZJXMlOMOELWgoblXoAE9YgLdADKWrHHmN7J2CpyWyAlDFwlFAMJXfeT/kFQskUqKQU8RGoKsejTzxEFSLv/MD7WV5cBudoNPPGz36U0zXcuHGL8agBLUWOkl2r8pGVRcf1DWE0TlTOFA5pzXWPCNpClhai0CTL/LBQgOxU1J1ZeTmlyVh1TiQRW/CVKr1gSgHZTciZVAEXkByRDFkTyXvOPfoo7/uOb+fUhYtkElujDT768c/w8us3yCI0OeNixteeOngql0lZiQhJnREd5WRrBxtQywxROkbHrnWufu2UsJm3407bSW/mBwFVj5pZcCc8Bbu9YXfrfof1ZV6BOY+yfidtXxzQEcdov/Z2V2yO03Yqzff+2ffzJp3kPNy7EsYiE9OlwwVIAWWoThUAy9fOCN5y0L1iVEIyIfeZbVpSvyqn1MV9jWZwtgEHdWSZKhFSCGKSFCsyFytMhVwY7KJkNEuxJKfI/c7FnZKFFZy4STXBnJUkzjgCOouwpNOJKk6NC0ExAhyJgm8M+14M5QKG1OJiLjz6igkVDFQZi7ubkh5ZcvvuaGOZPVPFCiF16Z0d8U4Qh/OpEApZUSDvtLA5wpkzq7zjycf57Oe+yBs//XHqEEADCUe9WPHe9z/FS6+/ydb2mOXBAkuLA8bbQ1YXax5/dJXt9Q3e/PyL3BpuodHSAKtg98fLdBKSEtUEqHcCYiEHLRiBjuJapJA65WwYAUBE6QfPQDI1mUowwicP4h1kaGNjqY7OsXTmLE88+wwPv+sp+osLpJRZX9/i537hM3zq86/SJDcxFpqsbDaJQRB6zsJTPcwj0uRkvErF25K0AEFzl/uws+Wp2gDMpvQe7C44TpywUz7utO0nIPbbuN5uVubusb0bTIcn0fYLw4jIHQM8f6Xd/3YUcPhR2z2jIy6/QYeC7z7aARSHdEld83gMpyCikHMpSLP7ethu5go3gaOgun1JQ3SWbpaMnChMlJDC5uelZAEUTT2LWYrmJIacaZN1VLQUvUnGY9/F6zsvRBttYx9HA91JEdBacvE6euMQHD6YR8AYk7xtKs7hnCO4ilGTWLs1ZNhiykkRHjHLhCzHuSkz3sk1o8HpWBg6BK0r9ROKm2MSSnDF5x2BK2sbvPXRT7I9SowjOOdJrVnwZ84OWFx8i1/7fd/J1atXaUeR06dOs7xcc/HiKc6dXebll97g0rUNrlzfZtxaimirhfK4PGYuMRQRj6OQPxVuCgeIcxOPUC7zaigAoVc5qqAsescgmNeJLAgOZFrO2FWexTOnePzpZ3j4qafpLS6SBLbXN7h05QYf/cSX+MrzbzKKOqmwaXMrNBlym0leGFRWz8Ij1FrhcyZmS3d0qgU8a2Pa6cndzyylKmc3DTsWewkxMRVOe+XBd9/ZH7jdszmZUrtxB5aeVTcmisKODzuvtfNVvF1AnhQg8LDj77WlupcisHsuDuMq2H3MvW579WGefv1y8wo8yO0kM1juAwNhqVln0pAJRa1M0ebSbSolTKMipYJe5w/eFWNVCkiwBBlEzRWbEpIheBP8OSs5mQ/cOSGIR7wneEeMStXr4bxnHFtuXN0gNkp24MmcOrXAO556hJXVBULt8ZUjhIAPgVDVuFDh64rLV2/ws//mM+hGS3KFA18sLU/qmt/7Y7+Hd334/fgQqIKVVfZVoKpqQqgIoeLmzRv8hT/zV/n8p16mwZG1K+5sRY/EqwWbU56O6pzAwcNmh84qFYcxC5hlal55owJOKI16xtHKDrea0I3GCjc5qGvBOWWjtbTAhUGNaOaTH/kIp5drXnnzFr3FBRadY1DBa6/dYGl5maeefYS+M7CkOmNcFIUQM8EXGmI1972jstx/oWQYJEt60BIKUvMWOQe9SliolIGD4BIiRhpVB09wjiyJennA2Yce48LDT3Dm4Yv0+j2URAzK1ijxr37qYzz/4lvc2hqT1DEpti0yEZIZU9yGCQjC0qlVnn3XO3nowlleffVlXn71NTY3RjRJcKn0QYTHHr/IODVceus6mgXvPcvLCwjKcNSwsd3gnFCHQNPEouh071J5n2a0hoWFRVzl2FzfKFpGyXaYeLi0KLNdqqiFvVJRXJ3aezlVTLq1YYEsL1iWh4hFZYCcBcRKdqPgxKGaJ/c8iCzlfrWTcHfvFv5HySqY5x09qgdit2A/7vPtDzD+1mp3Kx30pNu9UB7vijKwXw7uVIwbBa+Qp7J9FslerOqUzQrv/l4NPJqF2LQTfcCJufbHWah7Fe/74LN84Dvew9mHV6nrQF1V9Hp9QlWTcmZ7OERwLCwusLyyzMLCAnXdI6fEwtIK1aDm2vVL/J/+xF/iuS++TosB5X7jb/5ufvy//D+yeHrZshC6yoqFkapADbn8xuu8/s0/xVe/fMXwA2VDjNks+6ff8RAfeu/DaErkmEgpkmJLipukUUNODdX6GgupoQ7CuLVCuN22oOUF7wQCdFUfp+N9pHSSXXOlReESgU4MqBo7YBKovJUibFO2mHeCwSDgSFRSvDudeNLC2JUTN27c5JWXb7E4CDz62DmeeveT3Lh6g4XlAR96+t185nNf4hPPfZ2w2mO81pqAL6A+yQbAJAIuWC6/drwACmr00qYcmQJRB6GuHd5nBl4YeGeAQe3wI5ZeiCqD1SUeevJJHn3yWXxvgKsqFIdXJWrm5Vff4EvffJ3NUSarKwyEBWRZxt5wB6UssWbG48S16+sIL3L10ptsbtxCNLG01GMcE/1ej7ZpgMxSleirois1ilFhD/pCbCKugqWzA/q9gCezublFzJaWKyJWnTGDC95SMFHe88yjLPRqvvDlr9K02TxO3pFiLNTbgneeqvKMxy1thNhm8FZ2eWWhR+WVra0xOCN+ilpoodVReVMC+gNPStHWSRd+U0dslNTaOTZXR99c75XF3Anpo8T7523zXvMggX9SoYjj4AUeRKH4K+3utbuiDOxHxjH1NHYi3xwChvaWicvRvNHF6jU2GSonfPB9T5KBL3z++UkFY9VEzMqNoadB+LZvez+/7d/6AZbP9w0lXpjpcozE4TbjrTXa4RDyCMYtjG6aRRcTNyK0Ubl64xZpuyEW0pg2w6c+/hX+zl/5Wyz0oB2PaWIkxWTAw5gZNy0xZa6vb/HGi1eNNKmEBxQDA15ea/hzf+7/zmMrdYlvK2Qlp+JCTsai2MbM9S0DT1auwzsY9gFsM9fCy2+YgZPjuJ8qFtMNUrOYlV6mw7lMqBy198SYWBpUDDSzMgisjVrUebbHiZyl0Epbvn720F/scfZUzbc9cYFv/9Ef5pHHLrB6/iJf/NKXefPaLXorp/jsc19m/coGTz/7NJ/45Bd4+ZuvceHCWd588XVSHKOtMh42jNuSVSIWMvHOklGteJIQvBFGLQShFxyhhAOcmCD1wdNfqOmdXmFh5RRJFHGJKBkvFdvNkM9/9kV+4ZNfZWsUTRGYWb+T1Sozv5eVnXEMm8yb129x6UamEseF82dAW/rS8Ou+/zt44flvcnNtxNNPPMnnv/gFHj5/mscef4TPfuE5Hr54mja2vPHGZZ559xPcvHmDrfUtvu3Zd/L6629SVYFzF87y0suvc/rUaVZXl3j11ddYWFhi48qbrMWW97/rUd68dJnBYMDTTz7OF7/0Ner+AnVVcePqDc6fW6VpRwxHIy5ePM+1mzeICVaXBty8doswMFrmuvb0Bj1ybhknU3S82PiO85RZNKEMehXrzdiYQxE6YOzutp/b/F62k8Yw7MX4eNBzdcefNPbgoHseBXx2v9I+72d7kJ/zbodujsgzsP+yndeNMYEJOPDOqhKO26kVurN12qz9XBn0CLVja2NUFIHpvbLYpnN+ocfF1R6DnkMKM0/ORgiTY6JpI20q9Me5RF+l45Q34d3kzFYrbGcmMfJKYOCFgJ8BOnY9tCI+mqEl0+RMckyqDyJdLN5RiWNQQiH2t+mzdrFnVWhzZpyVsYqVxs1Mat5lFSzzocScJ96Bk20igmgieGf4vWzhCREIwYRw8J7lvvDEWVioA69dGXH2/Flu3Nzk8q0GH4RTyzWbWyPGrXJhJXB6UViqA6d6PZYWF7hw4SyDpR4r507x2MNn6C0O6C+f5uFHH+eNmxtcubLO+Yce4uVvvsraaJtbaxt8/GOf5StffAGy9a1JRghUlZTBhdpRCdQC/UrpebUsiOLe9t6zsLiIX+yz/PAFHnvqnSa6qprgPFffusIvfvw5vvr8W2yOC65EuuqQhgWZrFixRdSFrEQ6F63iPYQqkGJL5R3BQc/DY+dWgcj21pjHHrnAtRvXaJLyyEPnefWNS5xaOcXiUs2bb1zm2555N20c88qrb/Lsu9/Jm5cvsTlsefLJx3jplZdZ6C/wziffwRe//BVOn72AF+WtS5d44vHHuXn9BtvjyDufepyXX34ZV/VZWVnm9dff5KGLD1MFuHL5Kk8+/RiXrlwBdTzxyAW++cLLXHzkCTa3Nlhfu8Wv/6Hv4tr1y3z6S28wahUnuSjBuRSOyrTRgJub25FcqJjB7WmV7vW3tzPAcB4+j8MAlrvPO+j7g9q84YhfUQbeHm0ebovD1tY8PANzKwP1Ifm4R1m8gsXsvUvUwTEeK+2MK3FShnSiDHTfGGjNiH6UHRJQpIQUrHohgpECqeJKxT0vUuyU8ne6mNoMelsMtDc1kF0piWtfV1IYEMRA7h0Aq2tttgwA824Yja9iZZbr4uEIdLgIu98EDgGgdnykhBaSECnFmrLaZxWyKxZpnpS2oeO7O6l91Fzh4AswbxZdJqJIhpXFmqpSzi44NCXWthPnzp9mbWOTje1E7WoeeegMr116C81wasHRC9nGQEu1Q0lUNYRgrIROBO9r+v0e/UGPhcVF+v2A946FlWVCWOXjn3uZzzz3Is55+l4tm6Q2F7ZoZqGu8Kq0bUtwUAVhsVcTXOGdqGv6Z86xnVre86FvZ3n1DF5hbWOTr3z1JZ770gtcurZFLERB07i8gDrU5Uk4RMr6m3yWrgiVEVP1+j3GTUtOxnTovK0DFzxLfc+ZlYoL55boD2o2NzbRbODItt2i3+uT2oacW6p6AeetpoNimJUqWMhtNNomJqGNjocfOU8VlFvX12jGTdFQGnL0jFMkJiw9Uz3nz61AjjTDMTlHRqNIjspolAlVn0HfWzaGQtSGhTPnWdvYxGskiLA9bklZuHlrhAt9miaxttmiYjiT/UIEv1yUgdl2WMrfSbZ5xnIvpWFeAOSvtKO346Z2nsTauqekQ0dNZ0IxQSaC95ajrzNqADCxwgriEHXeLOIZ2FTXpg7JXBgNbQ8M3krlOge+XNqhiDMlocMqBIqioFB1QLpiwWs2d6lzJQGgPKuDwrVfIuUFWQ8gYgK0u4UTo76tmFZe7H668gA6+7havCeiBMzLYGmGWoTxrPNVdv08qdZhFTq9y0bYFDFnSpYXmjZybcvCHk0L7bV1Ukp471HNvPDqJVSEQQVtkyA7Gsl4snH+AzlaOKT1pSqkRGQ9ktgmyS1ytpz9YZNZ32zZGClKoFdbyuNS3zPwjl4QNE0rD1aVMTm1jdI6RUKiXgrUSwtcvnWDx598isWFFTbXtnj+hdf5wpee59LlNZosZPwknisFE6KqE5+OdhUc6RYCTAmImawJGwshtYksnqyZmBRJ0MbE1mjEcNTw8IVTLC8u4J2B9DSb4tu2ltVSBaXftzLdqJXy8k5MiHsT8BFlELbo90BWGtpxQjXhQyDnCsWXCp0tC8uL9AaOZixorBHp0ebI1UvbXG4yNzaH1EPP937ne/FO+fI3XmalrlkaeB69cI5TiwugwrVbW3zl+ctcePgJvvr1F1BtyzjNvya/1YTN242k5+0CpPvl2I7rJbovpEMHAmD2eRCxE8kp40TxQOrii5M8J7P0y38T+112xOWmhnlX/EaL274XhNqbBW9A6VI0SK108Oxa7zwHiJWuVaxsci5ljQGksByGEmMQmBQOyth1XbYUuMmYFEXBobgJW6F91uJyF52G/7tIa8dcl0RJyRUSIsjqUNmDvnk6quwVo72zppggtHuoKo6MVA4cNE2mLd/hhHHMqHpQ5al3nOOVN66yPTYu4laFFDuuAvOSBJt8osnbwsKoxOQYJsfWKDOKmabNExpmgKDK8qDmwumadrhBIuJ9zUJV4xyMs5VBdhJoG2XYNCwsLbJ8bpWXLl8j1IssLZ/l81/8Ol/68su8eXWNJgqqHSC0i3dnfBAeeewig36f1964yubW9nScZ7wCQPEK2FxkdcQ201/wtG0ip24tKpBokxCT8Ma1hiu3rrJQe1YWHGdWa5YXTAkRtUJW43ZIbo3mOcUICCEEUko00VggVZR2mOlLj74PDBYXUc2TVNQURwTv8a6m6vcYrJ5hvDkit2M+9OHv5ebaGr/0Cx9lOMqsjTItjk9+9hsEUR55x+O8/5n3Ereu8N4nLtD3Ff3lFa6ubzFqPs+73vdtfPmrz0/DZwcsw8Os0rebMJ1tcwMH79LzHUV43In1f1IkRm+3dty1edxxOkqY507m5I6UgePkEHfuZlVL20tqpDHOGRp5Ih2Z3UtyMbzMn+5k6grv8ARSgGSumNteoOcdvVCUAbL1N2FKhpoCAJbG1p1rVroSxXKvYzKLFWceAe+NCMlqEUjpj/VbRYwueGiZEIpOlJWkkMgsL/RYXRnggwGwCIIvgDcXAj54YlJefPkqGzcamozR8KLkNA2dFJ3CFAOdSdHcA0x4nMUr0hWNKmqeA025+E/KMShtk0ztKTFzxLgbsirro8xz33yTlBQfHG1UXG3WduyiPGqeklwUmdwKbRLGjbLdREblcy5EULlYxQA45b3vfxcf/u53cvP6FV558Q3W3rxM2444tVRTO2HcmnISBgGkon/6FC9d2eTKzcTiovJvPvJ53rqyZlTX4rC8BMFSXY3NcmVpge/4jvfxoW9/H1ujEf/on/wkW9ulGxPUq2l0zs0oZGJrLZewU1U5RjHjLE2igGTN79Cq0rTKsG1ZHwnX18csLThWF2pWBhWBxKDvWV5cwIWKcduQYgsi1P0+Xm2txbbBA2fOXKA/WGU4jqTU4FHadoTIgLq3RKbmmfc8S10P+KWP/Bsevfgo3/WBD/Mv/+U/Y3ttjJNACImHz63wm37oO/iFj36ap598hJ5PnD99iuXgGQ6HLJy7yNKg5T3vfpTFxcqyEqQQfhyjlNbbTcDcTdfvcdtJXPtXPAX3r82bIn4QQPWekw5NOn2IV2B6wvTvkyIuxVI3JQCgy6GeUN90nvyJoC5ycKfhoSa8qhCoK08VhF6JIfcrYdAPLC5UVLUj1J4q1FQhWNy1Nq6AECqqXmA0HPPxT3yVN9/aosnmCTi32uc3/ZZfzWNPPIyvA1VV4UNFVdVUVY9Q1fjguXLlEn/3b/99Xnv1Jlksfl0wiNQefuzHfg+/4Xf+20ivZ0pA8HjvCT7gvTe3exP5G/+3v8h//9/+FMMIquaOdsGek+xmLNFOOJsXZa81cHRFwK5n454LE6FhF7TAGEGoqkCMybwizsh+QvAUxmcePb+Mkrl8fQtXMA+aMuKNJgE1RafNpb5AVNqojGMmFvrlsiJm5X8ZV+jVFd/+gWf54HveSYxP8u0fGHHt0iVe/do3uPbma0gzZnmhj2wPiTHSUvONN27x1rVtRm0mX71p+IsSgqBTOMVcFFXleerx8/zq7/1VPPHUo7Q64nNf+DprtzaKAmSLuuiC9llsLKYL3lk2RpOp68Bo1NDlowsCWUgyTbHNWKnllGG7TdzcGFH7EYt94fRioOr3ODVYpGlvGrOlKHUlqDhyFiQlFgeLXDz3KP3FU9xav0XOiZwSbTPk1JnTIH2G6xs8dv5hXn7jLVQ9Z88/xmc++Qm++uUv0SQYjhI5K9cub/DyN94ijg1j880XXuT00iK31geIKE3/Fi+88AJPv/u9fOa5l4gxGvlTnF2bR29vN6/APJwFD9oz7Zf19Svt7dnudP7uKunQ7S+ITsIDXYhVlUIlKzN4gM7NWMxqtTKz7/m2p9gYbfPai28yCc53iogKi3XN7/69v4VnP/QU/X6g36upqsCZ06c5f/4hVpZP0ev1CFWF9xXOeZxzOG/sf517dzTc4C/8Z3+a/8f/8LOMk5I9vOuxi/xH//Ef5qFnnqGLHU/UEc0lBS+xuXGDT3/kF3jr1ZskhdoZqtqLUCGc7tdcWLXqQ5rGkCJ51KKxpW0bUkyMxi39jTXO18KGM1CiZugtVKyN4qRQUecM2KmMncQLLZNnlO4xtXh0xNwo3kNdB7aHZp2aa1+IMZMTkDLbW6NSGEipXQmFaFdXwBETFstPVogqpemtDBchk750uBHo3OzGeXD+zCorgwHjRulVngvn38uT73yK1199ha987vNceullnAoSKm5utLxyc8z2GAtjFJVUxTw9Foc36/7s6WW+8zvfx3d96N0s9/q0aZubN6/y8kuvEOOs12QKBAUmCsLEdVM8VykqrheoKsMAlEARorbqhS6iZApdLBpkzMI4KlttZm274fr6Nc6ubrGy6FjoBfo9QdQXUiYhB3ChJmVlNBySY6TX69G6RG9hgdVTZ9lY22Bz7SqXX3+Vdhg5e+Y8m1sbPP/Nr3H15iabjbLZmCJz+uwyn/zSC4QqcH1tg7euXmNta4vXLgu9Xs0Ll27w1utvcf6RZ/na11+eeMSm6+iIK+8YXqx50fp3w7K9n+GM2xxT9/j+v1w9BYdZ6/D2HJu5lYEuSn3cR9zxUnahAgqxkHRObttAdRJwVLo8+qXFHr/l130Pa8Nt/s6r/19iV5sWJmA654THz5/mO9/5KDmPbGMXYakPK7pFbxxxrcX7c1Yj/GkjMUZyG0vsNTLcHpIuX+HiQsVWNAl19bVL/IOf+Js8/eyT5BxJbUNKdp6mlhRb2rbl1taIF5573lgROy9GsRpHjfI3/9v/kX/1T/+5odpJiCZSjGhOpJyJMTNuE2tbykotBFGaLEQtJXtT5xfpxlTmskoOa7dxrxc3voOSymhwucmmk5XxuC36mk1AVsOAiAp1EKrKMxyNzEsTIKZSZyFCzNlCIMlUxDwBhkxxFlMk+s5n6z7lrNy6scHlSzcQB71+QFym3x/wrmffy8OPPMbnP/FpPvbRT7M5TlxZz2w1WhSBrt8ZJBtVNNCrPO9652P8wA98mMcfu0BwLXE0QjWCCpsb2xMPAExkffEOzCgv3VhqIYlSW3N1z5v13AXWSxnsKRdH0ZS7wI/mibKQsjBuEze3h/S8sthTTi33OHtaOHN6kX6vwtdjQuVYW7+KamCwsEDdW+DU4lliyqSm5erl19nY3uCbL34TdYHReMSrr61x+foNNpvM+nZms4HKOX74+76DV199lZcv3eD6zRtsj0aMmobxcMzq6goh1Gw2yqee+yovvH65hMPujLn/uDHvey2UD1I07pWSMPFBlWV3t0XQPIrXnWIP3s4C9STbvV7Pc6cWBvF3pAzsuCkUC6ggxibuxKmggzzRegXzDDx2bhH1whuXN8jqiqIw3XY8ysWVitOL3tDo5bte8PQqz6D2JXUtW5w/J1QzmgpKXEGz0jTC+nZmO9VsNQ0xG8CxV2L7HvDBsA5BhIBgdW4yY5Q3txKbbalMiN0rKPScUKlSY/nw/UqoHDifzcbUUjURA7G1WRm3mSYJY2CtgfXUEQ7lIjiswuI8Ocz7bVB7pxNJGVs1ngEyuRR1cmIhD1d8/eLt71ktK8CLI3i4cPYU67fWcR7EZUZtpokGFFS1KpSodBAOKy3P3hSqe/W7EuXMSr+EhGqWlwasrC6y0K9ZXOyzsLCESOAjH/0Ub1zbJGbjbOiSBbXgIlSs8NLFM8t893e8j+/80Hs5tbpIiiO2tjcIdaA/6PMLv/QcP/mvP8E42vrtgIBdf6WEumTiTrHn7LJi+v2KXh1YWx8yrQBRgKSTU+wanaKHFnyKdIW8FHFWWtmh1N5KJy8NHKeX+5xZ6XHx3CKLg4ATWFpaYmV5mdVTD4F6NjZu8vIr36SJYxYXF2maloQy3N7i+s0Rl64PubWBrbkmsRQc5MTi6SW8E8apwXmHJqGqA+NRgydQ9QZ886VrtJlJoa2DggQHbfhHoe09yoZ50sJlHq/DXcUGTO498/tdlB8HURTv9/d52reSMnCSfT/JdNUTTS08KUXALlY2z87C3XGX7hCZaLqGLIdXr2zjJnZS5zmgbMBmeV7biKxvJSsS46wgjSMiUoS5WEEj77Cc9A54qF0VRQEcSQWVFi8lRi7CiIzGwpGQCmeAlMp9xdPRZEebp2WMO96i4Byu5O23agqQYLLcyYwVJZafTXdfptkQMXdZBrpzPuZcePOQV+yeJin0u14sdi8C4nbSFXvEykLHPBGuMcGlqzdBodcLxFYZRSZlmQ0756ZhgW4ed3g9Dm4twpX1UVkGQ4R1m4vZlSqZqBDVrl3qBs4+JYu9ig++/118z4c/wKMPnaUOSmqGtLFhcWkJ8Y6McPXaLVIsK6TLXMk2fzl3m8D0utDV2gBwtG2mVwlVcDStmkKMMvueanG9OJmyGGieHitCSbfoyl0r4wRb48jVW5vUfpPlwQ2WFytOLQ84tbTNhTMNS0sjFgYDMpHNYcOoGROzFe8axxHjUUMs+BbVhCZ4xyOneeodj/GVr32TLJmYEkmNA8F7z3aT2Nocc+bUgK2tUangWZ77gHk7OobloDV6O/nKnQim47TDWAbh7igFk7vOeEh3t9m73uko7PUM+z3XfkJxr7l5ENgoj9LuldfnqPe5Z5iB3dNzJ47AKfBw579TTbeIOgFUJxZGngYSpmfJ9JzK288sBWRWgsudNduoTgRbRZcOqMZDgBVtMXdbMk4BLWVy6VQPy4BwzuLVTgBnTIBdcZ9YKiymcv3OrS6YF6ECK25DLnXtM76U5J14yMu9uri5FBpdq0s0LSvc1c3r+PHZY0x2j/nuv+/XTDEq5YCxVM286x4x2cAaEC53J4KDpcU+zsH6xpitJpNyyRjonk+6f/ZYW3MuajOeLZ2OEi7JZb3oTNihK2RlbZqW2QH4Hjl/jt/wa76H1dWanEcMhw29qs/qqbPUvYB4YXvUcvPmOp0aZ9kfjmRlJMskz3i2dDIJdA+dkglU52bCAlOARPdUJeSik81gKlDKeuoycMWqM4JM1JtxUrZa5erGGH+5oXJr1P4yg76wvNin1w/cvLVBzvDwhWXOnfI4SahUZB0hkvHelJy1tS02bq6jWQniaFJr450dOSdim62KZhY2t8flUXcWNto9r4etveNYlLvPPYz176TavNee10N3R32BnZK/+6Oyw3yyPz+IxZffPm3e8NWdphKedOj3sHZsAOFRl9NeLqCuzO90v5xxuZbfZ4q1muUhne1e3KblZO+gAkJx/XpnAszJ1PIW7PPsvtshvycb7OTuRa5NIhiWbtgRDiFSXPrQvXVW3tibcpBNUHpKOEPBaIMtBmzoejHLW0y9SV2Z5ImFZUCyTInbTxbY1AKb3v32dtgiOPh7IThLExQnVKHUKiiUxBPaJxViUlyw1MquI5vbY1OKrFLNDlBg1v14Eo7o9u2OFyNnEleAnR2WAib9nKp03cnTsdvc2KTd2mDoPaHyLC8vs7iwRFUNcN68N29eusr162sTz4hzBh4M3hFLtsf05e0UMp3etbhAcs547xCXJ2EBKwBkxzrXFTua2RAEujREVUpxrKIcyOy7WAiipIu4KMOoqAq6Df7GJqFyRs/dCtfX13n2iWUePt8HJ6Q0KqWclbrXI0bhS998De8dy6EiRaP00lJiObaKJmE0btkeNjMip+NoODmRc1QBejeVgMNc5Yex/91tq3Ii9HWqwJ+kh+BIfbkLbvPDOCrebm33c3Vr/bB1st8zz74rR1lr96SE8W4ykUnb6R/esUinKO1uc2cmNrbzZROnBbUu+CIcdIavf+pPKC9HNiGrecoqmNKOq5Y4thH9iPntSyzfxEvMyQSDmHfBZJDSZhP23b7YgciiKi102glZzM2tIsSyb6rzExf8eBSJYy2hAWEYpZSY7UCJwsxtJqM2Oy77tXkXSIfaR+0ZBSsFrVKq9JXQikip/6CGe5Ai5EyBgZmJmHiUuvE6ieawKnrBd4a49SNUgXGbaNup92RSUaCskwzcXN/k68+/yK/5oQ+zurJSSHkUESP20Sy89dYVNjZHJRU24/HGnljWc4y7Y3ITSQ0TL5pVmwyVZTLcLkTKeTZ8U7fSTM/FdW+MTG+j0+uDFE8JE6FtSkQu4E2rlyEqtMPMK2+NOH1qhRCMirhplTZCzi3vedfTDMdDrl69TszRmCGDR0rJ45haMjAcNZMQlimy5b1g703ppFz5dxMDcJR7PIgpet3OerRzut315AXr3Q7dvN0Vgq7dCU5g9zWOug5PXBmYuqLm/aLThvZevp1V1Qm/iet89kE7faFwnYgU60oMMOadgbBUQJxHXRH2AuoFKSACh0OcgDrWN7cZj9Vy+tWUjNUzfU4/fIbgPSE4fBWoKk9VyILqumJr3PDZz3+D5roJjq52QVe98P0ffJYf/M0/QG+ppg6Bugr4uqbu1YS6oupVgPIv/vG/4J/8zx9l2JTywcVDgObikdapxSkzGRiy5yAfr0lHJmSVEjOGgwAjWvK+hDcUSyfslC7VmY1l56xO3OKTed/tCp93AU83rk4psz8olRfq4FlY7KEirK+P2NyKTBJQ1NHRC2dxjDO8eXWDur9IxpE0G/7BNYRQk9Tx6quXSCkXr4B5jFI2OmDvzW0+xQ10mRAyFegGKiHljMMzsfLdjMLUKZDl+aSEEzR3xxYBy/QYiiIw9X5IUcq004wmyrBzziz/nOkyNa5vjHnrxpBHzvdomoRzjronbG1mvvTll/EO+gNHVz7bpsdCYW00wqmY8s5522Oz2sva2cuC3o+bpHvvj7rB7XW/w/62+/zDYtoPgvCfbd2s7/fdfrvDSSgBd3NsjqKY3Q+w6Em2g9ZltyZPet3dE8/A1HKZfnYyW0N89//T37rqgAIsLdRkD6PtMbMobVVsw/SO97zvST78PR+kt1AxGPRYWhwwGNRUvYCvAqGqCJWRDFXBU1WBqq7sOx8IXlD1/C9/5+/xD/7+zzFuDFR1ZlDx+/7X/zY/+h/8LqpeQMRQ8066kIXhEbaHQ/78f/5X+el/9TliFkbJSiwLglfh6Ucu8pt++MNUvWzZDLGliQ25aUkp0cYRbdtyLrSc6QubzjOMiSZDzEos1rZKCZvgJhtmRz/guH0DmxcFPNti4cIJDquLkGe8EoCSJ8rAjvm1O+xxfdkhKG/b93dtvAcv9k782UpxDsQVcKg3hSmOt+n1as6drunVwtY4szWycE5nuXcC9OXXLvHmm5d5x6MXyM6KM3nxxDYzbBpee+2ykfp4U1y1FK9SbA1UVaBtY4FOFIClMumjiLEweu+IbbRwC5RJ0xm9YcbpX5RcncxnZ+XrZOx2zmM3L931ds2XmiLQOS0A2iy8fnmb5QWPpgKEzMrCYo/BoM/G+hZZxJQ9dcRoGTjiQ/F6QYyxKCBMFYFOEe7eZ92rv/O0qTp5FG/XPKC+edtRrM751+/dbR2IdicHZEeRLtMJOaE29fgdR7G/s/Z2AyAe1GaV5oOeI2ueW3nbTzHc3eZWBqZbzTw33evInZvCrMif3TT3uqZgeevvf+8TLJ1Z5pc++nnGY+jIfhBzUaYWnnrsIr/jt30//SVngL1S3CjlaDUDcraqb0mJcUxs14nDFnJmPI5sNiPG40i1dY2Hlx1bHmKExaB88Rd/kXb7Mi4kyAmXE6lpTKinSE6JUVJe/tyrBCwk0RUX7KIdP//Tv8jzn/8stYdeUIIzs9qpKRMJC1lc32w55RXvI7XCyAlNFkbZsHpZhQbdQc87E7LfOYbH0GpnBUlUIHUCxNwvKe2cs9kFN/FSTOyQ6bqYzvXOfs3r2iq+h2KdFyUrQK8n9GqPkCdlhExJyziU0yuBd114CA0DXn75LS5dXrM6AUUK31rb4qtfeZ6Hzp3CV0IST8gVde1YW9/k+s11Oqqgrp9GUlUKSztPCKEQClk/O2pi58MEC9I0iZwMU9KBQ2eVqe7pp8Mwi43pFOvup4JzlpkBOwXxLit8ts1QJaHOcWtjzGtvKZXAsEkMx1BVY86tDki1MGoTOQs4U2Scc4RgSqj3Qk42FqlDzOpENDCrou6e67na3cyVK+0kvA0PatuZ3jlN45W7oBB0bR7A5N0aw7s1H/cKiDp3CPcQabwDRDun0nBkZeCgLIKDVIHbD56qAzMfZ9rUepQCBnzthZfQlz1tkyebGTBxE2dRfukjn+Kbn/0ivQCVoxQVUsh5Et+uvKVUQZ6UN86dGVbKu24nZRkT1q23sMTXv/E6L3zjDVxQerVlBngpKYzlrWvEcfVWYhgpqV9F5RHACSPNvH59i4Ay6Af6leIk4cVY8bMYwGus4LxdNzihUutjcBR3vRgLo0znZu9h3t/9aWO3twdh1n3fWXg68/dZt3Y3B7vncdYi7dzXh/WLyZ32X8TiTPh7B3UQ+v2KpeWBldt1Sts2xCbRtgnNSk6RhX5N3zU8/sQjfOA9T/G1F9/gi198gbVb24g6zp9fpk0tKUWLubtgzxuVK1evsbm9jfMOJpUKBc1F4XOGBRARQjCGQeunL99FYuwKX5kyY2u2E9iHu1ZzydbYsbHu2ND3Bx3NKlodnqN7k1WVVoTray2rSza44pXtUeabL9wkeCNz2h5HxAe8ryw0EIWUMObDXLAVk5CEu61vx9tM9/Iq7aPgzLmh3g038n79ulcpaLvvCbsUbWZeVQXh6H061NN4AiGGt3s77lzPE4q6F+3IYYKDJt0dtMz2ejGEEvvutlc3OXZ6SJ7k8K9vt0SNRahYqCEXV6VgluJWTORb23gv9Iul2L0AUjgGKu+onJVO9t4bU2AXrlCLqxqTGuAEJ4b+TihJHc6BOogd6ZBzxUJ0DFssNKCFPMguYccFy3JQzagXkkvEThmh1MjLyeh6S56jc0Xal7Ezbv6pK3g2Wszkt+MvpNlF6CapFF2lRO3AATPzLAduLp1YP1IfDlpjAouDilOne4i2DOrA6dOrnD1/lsWFASKZ0XDI5uYW65tbbG9tE5uWtk3cvHYL0vM8/MjDvP/dD/HU44/w/PNvsL6+yfd99wd58vFzxYsEqmNSboEeL7/yOuM2IuJxO9zgQJpmVapYuqivhLZV8wA0hiVQLHPEtIA8M2b7u1Tn+Zvmwy2v6d92i1dQMkhX0MnjS8moJx8/x/LyMi+/8gZtTDQJmlGLdNkOWEGkfu1sSYhnop2WMTKg4e1zuTv177a+71hdu3t8cLsToNq8KPWDrvmgAdnKrrbLP3fn7U6yO+6lgnS3MATHaXv15TBj7bB2kmm0cysD85Ycmac7EzDg7EtfLHX7k8HrBXA4gigBJWJCGorbnamrtXKwEKD2JW1NckFVW8zfiV0xZ4hdVrYIKtlIhMQEbaI1jgBn93ZB0ZisvLGakA5OqARUM1kMcCbZlIVRMiVCRYy4SAspEUrPQd+JSXOx6HDKmSz2xL6MjWKlfrsijlp4BZwrx6mR6HS+FZEu5awbzZmAzT4L5ODY1O0iQ/ZZnK7EsqfYj9n5tGuZe93t+v72fh32AoiYAnf+wgrnzizggaWFAecvnOb02VXqeoGsQmqHbG6ssbm9xcbGBhu3NhmPxsRRy5UrN7l16xbnrl7hsUef5Nd897P0F5dZ6PURGnK2QICrbF0kVd66coOcbE0xid2XPheMgKWZKiErPSfFQ5FwCi2F3cBhimXn4t8jNDZPWtFxQFqzYQewtdItaUlKJNM2DldD0ypX3rpFu7VNaiKKVVyMaryCHVbA0nItyyZrnnmnp2mQe/el+6Ks05lhmHgx3DTaLXsoFPu1wzbEeYX77rDVca7b/f1+YAcmnj2mq/W4ZaOOKmSOO1Zv53aYEro7JLpXGuFxxuUk19Y9ARDu36x6jcxuVN0gkXAiVE6oneCwSnZJZ1zWkzCCuer73hMkISjBOXx5E7JQagWYq7ErmSzeutCRBmWBJBZPdWqWP10RG1HLoxdTALIYulpUCme/JwNR84QARtQob7WL0RbBbnF+IWalkMvhpDATFoKhJlkVujalUshpWq3ROemYmIvHZf8RPsytuv8mNhXue7qx6O6/17ncduxJLFpVJXjPhQtnWV2u6VWes6dOcfb8KZaWFxDnyUmITWChH1jY7LO8tMTm8jYbG7fY3txmPByzvbXFa2++xa2bazx09QrveOJJLl58hIVBHx8KiZE6HJ5mHLlxc23P5+rCJR3fRKXCowjnG2gFmmCu9GuqXAeSB3KapKrupzkfB+G+1/E7cAedUOrCZsoEFwKgCG1UFhY8y3VNzMLrN0eIQh0c3ilVKF6spPjg0ZyIhiyELkigndA5eGPTmRXtdA8xtUNXmlU4T67tJ6gP8lq8XdpJ9nuv9XZYWORBwVU8CIrHcebiTjJnZp/5KNe5pzwDOyamCGopRXBEOl9AsYKLNR+cFqHuIJqVT7endduJFlY4mSEFmu5ydi031ZSVUkwlCUTjKEDNa6DqCkbO+ho12d+0I0cyYd7Grr6BeQicWPrduID7hKkAz4UbIGVFvVAt9REHwQvOW8qX76omOkvjGt/Y5Nb6mLY1JH9CaJKWaxd+f3HMzvNRY5dHQUgfLoBmNV+Y7uQy3dM52I16+MLNrKws8dCFcwz6noVexbkzqwwWKurgbH48UPVIdU2v6tO0LQv9IQuDARv9ddrxiBs3A2sbG9zc3KZp3yDlRF0HBgvvoAoVOCN/8lJxffM6m5vbdE+087mLo13AZeURFb4tC6ejeYkEGKJc8p5LlXBTlG1Vxq7bMGfH7XYF4LjC6LZoXDfe5Wee/jp5fyTDqEmEoePsivDedz7ElaubXL68RlS1qp4FY4Oz9M2YZQYUM3OLObrcjeXUo7RzTTg6rMrdaxNMzBG8MMdp91KpmPc+syO+cwXuf92DhMxBxsWDohjcSZv3GY7zfMfxks573aNc564rA/tv/ADZhDzl99nlKUxcy4YLkImScNvjScEti7lhe/2KwaBmcWAALitTLIgTS+8KVvLVOwNEheARL3hXTYTzK6++xYvfuEJEICuVUx55eJXv/eFfRX+xR6g8wXnwRlfbqytC1WPUJv75P/0ZvvylV/GlbG9wRok7bjM/+Gu+l9/1h34PYcHjA0hVE6Qm+MrIbuoaJ4Gf+amf5C//F/8D66NIq4Y/yJkJj03e49U9SJAcdWHoZI72sZ7Kd8KeE7LrWntYurrv4Xs2EUOUPHThDBcvnEOILC/2GCzU9Pt1KUftTWkD1CsuOHq5T93rU9UVdahIcYzvVUivx621NYLvs3L2LKfOnqPu14bREE8VKkLoc/PWSwyHDd2a1NmdszBGBlVOCTypjuUEmygDhFWEgNKkRE8dy055oxLUWwpfLgWbNNt193M1Tgz6GbVqdnOeWmM7vRdd+MiUYJnM28R1rOWkctuUYDQSbmnms59/Ey0VF9UbLbVmirLKlENDE4hM2aiZd80VI2DfBeDQCVhz/rafYNt/bI+vJM/Vn/voWdgPiDuju00+79bh9hqB3ZbnfuP0dvWm3Ek7CQXhTj1S9wwzcNx2kEU5fWhbjp3drlqIVbIVmQEQlzm3usiZh87x0ouvGxVqnm6C46RsjjMffN87+b0/9jt594feydLKogEEXcA5X8IJAecdToo17j1efAEKThO8fv5n/jU//kf/Ehu3Ekpmqed43zNP8Uf/5H/I4ulV0FzIW6yoS8pt4WtvufrmG3z1S6+TS/w4SMlWyIIORyz5aAWQmkgabqDNiFHbEGNLbBNtG9l66Ruc7yl169lKykiFJnbjIdRiLIqxjNVhQJL9FIPdm6J9zkU1m24Rs25mEyYFAzCxkOX2DVZsQ8qq7CofNO3XbmfRHi9Cd92q8jzy6DlWV5fwTqn7jl7VIzhjDPSFMEqNdIBQZ3KKBHFIjvQrhxdhsLTC6bMNo/GQhcEiTz32Dk6dWiVUIClZvQXvcVXF9Zsblr3SCdHyvBYyEpTMAHhHEs61ylBNhC2LZ1kNbb+djWuih5Q5s7RH76dcCylha3pWX5rMyewIMZmTnUpCt3LNUyG75k8nc2BSO8vM5QCry5mBzOkzZxgMerz+5lUrSoTgSv6iUkouZ8NUxNgp9LOOfzUPwmx4Yo9mgMo9v7ojgXK7n+GXT7vNCDhgJHaPcFkpByho5bhjxrcP2pfeLh6De9HPo4AH57renB62I6QWzl+YaB50sz1IJ/xnXabTe2SZmmIVcHq1z/vf8ziX3rrMetOasMkZUUdKwmaC4XCLdOMttt8K5Bs1mpIJ6hgLF0BE20jWRGrH5LYltUb+k9qW2EZG48jXX3iNsz7RHwRiSgQHX/7MV/lL/+F/wqAfyJpwKaE5oTFByjjNJA+ffeUmXpVIeWm07JdO+NgvfZ4XvvgV+gHqYJtm1W3QhdQnAluN0ouwUOK0tcIQaK26Et45ho0JHt1jfKdjvMdcHjI3s/MyuYJ2rI55InYmlRkmAml634m1im1IMj3wtvtlSuXDfZspjIuLCzz22GMsLA4sHdMrdVXhVai8llBNtiJVjlIfwjEYLKAkFl2PQb3ASmvpoz5Av9djqT+wp8mjkj8vVMFTV4EbN9dICB2rJQXMKSKoEwsPZOEdSajVQK5nO0VAhIEE+mRuoSziOZ0zG5LIznApJjCVqnJ4L6SUSUm7pI2Z8exGqpsee3+mXoHp2E5F8nSc1bqOZYaUaykFnFfSYwSaNjG8tc6Tp86wWWfWxsZ9YaE5WxdOHClZjYWYO49eUR+1UyZ1X8Dajv3hhCS3zvx7mDA77ob+dhBceykCeyrYu87rduMpS8bO5zxozzjKeBxnDB+Ecd/73vv5oQ5vh8X3D1SiD9nXb8d3zde/u+IZOMl4h4g5DL3AW2/e5MbVj7E9SmSc1SQQMP4AQ2S+8OKb/LW/9D+yWjuqUIREwSRUziy7roCR6R9aqhdORCBNhM3CuNb3iSQZJ8LmsOVjn3oBUfCVxVGrAi6svA1mdsKlrZIy6LCNXQ3gqKI0IlwdJirxk3K2ThwhgObW0trEFKHsjeffC4Qs9FF8mvpRht1YYabg4UJ+5/juPma6iAzxPhUAM+d1AmgiFCd3Y6dCgAEsZ76+rX8yY+vOWBt7I76VCxfO8shDDzPoOSAiOeIx69uJEFODr7z1MWe8mDcopUxV9ejVgUFvgI+mXFTBCmKrJrJmlERVwjV17YlpzI3rt6bOD2SCGxEFScoiwtnk6GVoSazgOYPHk4hYqV+fOx6LxEqyddhMhHlGMS+TEz8pWqSl1gPkQnPcKSS6052C8TdMZaswg8jdOd63jalMryVTcqAr6w23vnIZFcX1PCUpB1VjIgwVaMzkjiZZTPHqUoNtdK2o0VEyAW6b892UOQe49qf37R7/9hj3nbQjhdiOgNu5H+2gkZgH/Ln7WfYLH8yjMDzIilXX7pUystc47gdwPUq2wrztCCWM72wgDnuRZ5wAu/4+TcMaZ2U0yqRUNuWs5TzjD+gHwaNsZmXYKFWGOhjBUPCCy9lIgqTE8csG6CeWjbWxhyQOlzJSeSR2ZXITqbMQfcaRUXHg1XgHshKzI0dFU3HFUsCLRQGpxArgOJdxHnAZp96ehTzlHFAIOCoRMskm2AlerHiSpR4qqRSjOcrsHLhAOgE9K6lnms66C8SZx2gmJi1ili7FId3Fw9m1MUzka1HOTGbs5X3qBCE8+ugFVlYGeIzO1wdvwaVsGRfBeTzGEyEulDFX0LbwJORC4dxaCKoVRDzOZcSZMBZR+r0evZ7n+q1tbt5cL8MytZgmAXcBnxSnykiVGlhxDpdbxuZQx7cNLgs9hHU14VirTL1GlGyVCaK+8FBoFwZwZZ1OBn3/uSv9FPVTTE43Z7NPoIbH2aWa2b3FI5Vj9ewSw+0tYsw479A4TXd04sk5mtKioJ1XQArvh3R4nzvcMzofVAklHhj+6ja/mbse1bKaq08HnHuQsL+v2IE57t0xE7p9Dj1szI4aMz9KqOHBVhhOrm/7ZWMcdFz3ea921DV3z1MLuweZdjQX4dG1ggovL7/DaqmHohS0lFRBppYIYsfWzsBclSusfhkkGVdAl+tvmQUAxU3bWdTiyAhSCgEFsRh0zCa0rRKv3VUES110ikieeBrEOVKUSXl7tGyx3YvmHL7Q4+Iy6hwZN4nfClLS2iwmrR4k5cJBMKV0csqkKI7qpELDZHz3G/fdbc/FIuaamLjF6eLYBhST7lnK8xXxU9gSFee0gONkQsE4S0skM5v2rKu4yEX0tk3fFLVBXfHYIw/jRfFZER9KdxXnS6aHWBloV3nbbNjY7K8AAPWxSURBVNQ8QcF5cJ5ecCgNmhLq6pI1EkH7eOlRh0RdBQa9Hr2FwI0X32B9bdMsdClcC50LSUzBSwrrWeiLclE9ki08BIIXR5uNrjiUMRiLTMrKSvEAeXEFV1FSZTvrfmZ6ZpVlUxLK/E+E7u1eoU5RlplJm6phdsGdm4rNec6QiTifbawFxppxwaOpU5qn9RFy19WS79pltx+UETDPBr9bYHSf9zMsunfu0OseIeQ5ufYxlYDjtHm8CMd5hkMuOLM29jnkhC3kByXL4Pj9OF6fDxrH/TBThyma+3lrjtJOTBk4ihtsEkenuDVnNkdTDqSQomSCE+qSXphUJi5acFCISQSz9msv1FKY1wWr8uYV8VJCCh3Pu22oSUwx6IhUVLu8f4dqwgWHT2b1OzVXb8oFYOY86gTVSFarXyAoMReyIaZkQKizzVtmSHoQUjSDMAv42Riw2gab1chcuhrzajpEwQlMwZezr/CdvGCqye4hIJp2XG82HW1y/e576SoHdgLNzQiaMsuy0+V7+81392X6wgjK8uKAhy6etUJSxe3unWIRbbunojYvyYRp0g4M6PC5Z6Wq22ZiOTsU7wM5KpUog+Dp9yt6tSfUFVeuXWEUYxGi7DA7BYPjtQJrLjOQzDkqGlU81hfNiRYYIySgFcemZBrRogToZD10SldKpe6DTpXiybrojpNJhYSibe4W6jq53o7J2uEp2Hs+VJUY4drlLUKlhEqoJ7cqYTtnVRedM8/MdO4csx62k2gHxVBnlQJbi4cLyX0cXkfav476ju2JoZr9/oDr73vNk1QEJn2Y/5p7CZ+3Q3bBSSkgd4J9OE6bd53dyX3ummdgv5dysmiKVBbtrMbOFTpVDDxCjZaCr+aCtHQsxSrnTAW7bc7FZS0mvKUjRRNBcSSMlTDJ1HKRZMxxFAHeCdysjmxFC8omY/9nBZeUNueicExFcVZoUseS2CkCxQtQhJpzam5XhWwPZnFtpKRueUSsOMxwrOTsadHps+PICsOspSTPIRbEXotWJ//sag4pfn0t50ywG3rwC2/KQIkZd5UVJ6C26f3204b3bwkROH/+DEtLS6haHF7BMjJ8QiSTSThn5Z+RVO5pAkszIBERxYUaj5BTYwLO9fEBqqBUVW3phVUF6rh6bb2kc3YplAaL65QeEJLCUIRNB5dyZh2lVlNOUaXFCkoNRbjlMrccZOeQ4g0QJ2gpWmQeFS1pejrjMZsdH2XC8NcN7g4PwkQtvG1cVXeul4M2tKwUTdlIlVJWQvF+dRgGK9RUXsCuZDNT5MBJiYDDBPWO+OhBN51odLe3uRWB2z4fft5BGAeZ+Tnf/e/MIzCP0nMQcPCkLfiTEMgndb3d1zws7n63wk37tbsR9ppt82MGjqI5H2UbEEAtzqli3gBgAj5TkQLuMkvZ+lIGQDqyonI/Z272qgrUIZTKdkLwQlV5JHic7WilRK0R/XjvEefsf+9wznPrxiavv3qVnMrGjqPued7/4WdYPr+C98Fi1pKRkqLoQiBT8YmPfZFvfPVVpGAbuv06ReXC4w/z7/yB38XiuQUkdERD4CTgvSdUNSLCC998kb/1V//f3Lq2xTir1Too+pJim3MRTZP5OYm271XczMtR/t0hTCaxLgr4rft8vH511xQ18OVjjz/E4mIf50EK4ZNIKvUTikfHBbqiSqSCE0FpYov3Fspom4Rkj5ce3mV8qcC3sDSgV9cE36MOgVHMvPH6DavEN6NwmsU9s1kC2zlzVWDbQ5UMO1BDwX9YxscI5VqGcTBcQldvY+oBKIrtDCivc/NrUVYnnhcOUwENCzFrpU9e3YnQPli4KhbqyRPMhxF1ORViNK9RstKVkywY2aHAHJf89vZ2sBJ6NGDe3EJ3H0Wp6Jg7Lftu/o6o/RQ/z5Gs8jv1COzlUt7vuIPOP+p3XTvIo3C/sgaOMh77KQjzKKv7fT6Jth9o8CjvxvyphfO4rjor8qgLttsY0R2LtQVyZBJDXV7qcfah07zxxlXaUYLO0hEhiTBsHY8+dJbf9+//Lt71wWfoLQTDCojQ6/ep6hpXUri898ZY6MUUA+esyhxGVPSNr36R/+yP/0Vef20dSVB75cKpJf53f+T389j7nkTVavrm3IIm+4ltnqd7yqUXX6dpjEJ5HJUkoM48Hc+eX+TUxUWEiKQWUoumEeRMajLDpmEYr/GuU8Ir28LWGEYJRk6thHGyioaVWj36rHtrtPO8XDu/UyjAMi1YiMkGp27iddHOJzvxTGjnrTbLVjsX9cksel9VXHz4gmVe5JZELDn6UsoQB1QzmpJVFlRTCoIYD0DOkV5VIVpBsBCQJnC+Ag30+wOqyhOqRK+q8LVnvDXi6pUbJT3RY+6FqYAzb5Kt9TEwSpl1Nas4iBTCqYwzLmu2U2ao5mGoOs+YWtZDR+BkIQJLtLQDDBg5kcbF8u5GVdRN8AezbTqnOxkqp+2wdWEaUIoYnqGQJIkoWdRKdqv1dTaT4SDL+yTavMKmO3b280m13WpUp1rNtm62DlKHZj0Ce/l/Dmsnjhng9rGb5/ijYhtO0tMwb3+Pcs8HG6w4Xzvu2j+xMMFRJmS3djoBv03i5ky0b6P69QRRTg8q3vP4RbZurXFtPDKrKSvZQVPQzuNhQ755FX+twlcKMdK2bXHr5ynXQErkmMgxk1Ii5kxuM3EcaWLi6rXrXGRIWHFsj7LxBqyt8Xf+Lz/BqZUeSMLlDCScRkSzkQs5x3OvbyCtTtztHUAwi+O1Vy7zf/3Tf5XFnrC8AL3KUh99icemBOOY2Y4KY2ElyMQS9tk2mAi4IIyi0O4x7PPE7zr8xF7zY995C+FIZ8FOnZndfHUV/KQoXFYueBqr7n4e1XqzexTrGFheGnDx4llybswb5I22mahotvCRD0aVqwWwaHwBmZTGiETDnCQha7CS08Ec2i4Ig4WahX7Au0SoTEG8desmN9fXbSubIdxhV+hDijKqtcPXjn7fsT1MrI8yObsSqrJiTTgh5GweLp3OQcfe11nh5hXQmZHY6f6f/pjSXu885oD3cY79QYsXImfDL/jUXd/61V3ayiqXIN7kugff4Lgb1VyI+DmUhTu9V+n9VCFj+sRHDYwc7uE56NzDBdo8eIs7aSeNbTjumjiJZ7nTa+z37u2lsJzUOu2uP5uBcCfXvSfZBIcvms66dNMXpGyKCJNY+831TT758S+y2ZZNuQioLrXNeeHGrXX+9n//D1muhV5QQmXphRVQeXM5V65j0evS8koaHAJZaZMSE7Qi9AQItoGPNfOVF98iSKAKSq8yC9Bppg5C5TIR4dJaYpwyEWiTTVhACGrFjm6MG7aSsJ2FflBqZ2BIwaHZEZPS5EyiQiTjnE1UEuirFbxRhIbpMOy1BO5owRXZP61XNN3yplcto9aVtd2hBLhJiOS4fckl/nzx/BnOrqzgtNSDUCApWSMi4H1FV1MCNbCieSkAKuoguCqQ24BPHkm50E8Lvb5Q10IIQnC9iUJ65eolRtvDAombDEJp0xewi+87gcXFiuUlR78vXLnWMowFO4FlvkyxBpZOWBI3MK9AJnV0m6Yi0AHjdk7KdC5ynqb0nXTTjmQqY4zDufQ7+BIeMIzLNGJyex+OowTu35/jhQqOKgQPjacf8HnqGT1KO2n7fp+7nOBcwNHcz4ddZ971e1zw371UFo4CQj0J8OFJzumJKgNHczOVl6bzBkwvUjb0YnXiERIKjBMMU0HuF1hwB0a0qoUW348IWw002Wh7RwjBmVu9LhiC4MzKccWSzCVEodlivMkVIeDAeSN2cWo2nrqIiqNJVqK2FKNjHDNRHONkgMBUBIUXsVx6Z0IIjCsgqlrlQmf3dii5uGA1O6JmsqjVS1AlIOBL5TidbiNyBBNjv1jXUTTMTvlQ7dDloDkXt/H0mJ0W4+F9ue17Z7P/2CMP068rvLS2NgqHgGH0UylCFWASPgBQvPPEVi0cIAEN3jws4hGvIImlxZpeT6mqauLSF+CtNy4xHEWz4ovAnj6XKaMWTqFY0MqtWyNS6hGTeUjKCBRr2oirqipQ1bmkKRo9sapVAjRg3qxzebcTWnd8nmSr3DY/d75B7FgPBRPQFSXKBfBor+7+6YOz/TlJy+tOjrnTvux35m1Kxx1e7zjtboD77sSjcJg8uJ8u+buNT/j/s/cnT7Y8y30n9vGIyDznVNWtO/7GN094GAmCDZDd6GbTaN3qhcxaLZm00qJNa+1l0kILLWSSmf4BaSEtZJJJJpl1y9okNmlsSSaSAEGAADEReADew8ObfvMdazrnZEaEa+ERmXlqrrp1697fA+K9362qczIjMyMiw92/7v716/Z72lidl0Vzk+2llYGXG8wSAFjy9w2GLVugjpznFQyt6XZAYcewg50Ukh5RgrfPMoBzA7tgyuY/NuheQN3Qp6JG+lOkXMKEO84sOAtmtM1cC9+BCSLbnCOFW0CFdSwCPtfUwEImpFIE54hGVPIgn0wwpCKIkjpUjeo1A4jDiRI0I86UpaSQ8umkr9OX+PgLfZWFdFw5UMkDSGBiqliLIiCWXTHknBdU4fhLd/mXUBHNNE3g/XffRsQIg7yIlQF2Ah5EG+M0IBWmSQ9YDQqgMEE2ZHVo7vASELHS2NuLHbYXWwQfrPxxzvjg6VPigw+esk4MmQTHh8t0AhsBU1cT2XleHPZ0q1zuqYyDGCoQGnNLoGP6q3NaYgWqImCWvkw0vPHaMvysMTZXff/OsxBPgzozVjY8U1k7TWF2wYMkq8hZEaRj7bS5fhUR2De5KR5fp6cpD68r0G24/glnRfn8DEXn+PhcFIV/U/N1/FqnBQ9e5Tpn7WVXvYernHvVdhPW/kX9XzQG152/l1YGTosGPa2dvCkdYWgqeAqVkqVSqYiAd0KD8b5Xsh2o0dV2lgMaEQKVAMg2sljoe3OJ4BZnaYZ9GgNbhh60WO0kyA6nOhaWyZBCiVpX8wc7Z5qDAGIRgnRZiSmTcy5EMuDUkAFXriia0QwxirEpDsRB9lxZCmysxqEfk0VyR0xhyAKxKhpVuTl1jE/fCM6KbjUL9oLFc8rXqpmi1dWZOSk8r7jwq028vbXgrbceUGYRnLPxKhkmIh7nGxRzp6CpFKXyaATJSggWFBl8Q9PMiHGND46t7RkhzBj890HwwXO0f8RHHz3D4U3YnTom03VoMRy+9XSrHhiJrQb0pJIyVTRrgkalSZyFfZcnvvkpGjC+Z0VU1Zm/9Lie6w8/sckoOWd6I98wZXzILrA5zjqSEJ0aMleRolPiGq5yzwMEf8nzbxoWf53XG3e5qRJ+xrHXvIebVnDOsnDPGqeb9KNf1K7rbrix63P94M9XuaZvn4FwajlSlYDR+q8bnAiIZLwz6uDhjVATWlIseJn0OwTF5Ww5/9nyuCWaRekFooD02SK9kUJPW6BvGe9NcyKLQ2o1N4VEppGaew+SGH2mCjihFyFKprAkFTidwlZnwnxrdwGtN4ZBMRdExdOlpEN2y8iTzw7o1zYqUcv5irkoosUkaF3ML7tGpG4452uap55X+BwseM9RWRHPPOWyG7oqDx7c4d6DXROo4my8KsdDVaHECkalrATvAUfWRNQeFzx7L54zn8+Zb20zawKLNtDOhPmiNWUzeMzF4AleONg/4Onz/WHehgBKFVMaay0MGIS2uLKC1dw6ph/puK6ykUelEkNSkbCcIEUYWOGlsPidokydVOJ4+Xm/sLmCqJV3tRbTirG8N0YQdtaEF6/fS7frCIszBQ+bwvWq7TJK9022uiddda6vFbh7w0rBWa6G647Xde7rdbojzmo3kQ1y0891a9kE5SCG109NkFVec3tBJygDJuB9IdlRFXMRDJTnhQ61SNssQjQJwc7OFvPtljDzRjmblHnTsrU1o5kFgi85/b5yDzhCaPAhGGVw43jx4pDf+Y0/Zvmsg2DUxW892uI/+k//IW996R2cCME7xEMIgcYHfNPiguNf/ubv8V/9F/+MuLJ67L7cb5+V7bv3+J/8z//HvPONd0yASAbxQ0EXwVLNfvxXH/G//l/8b/nhD56wTtBhrHtZZagJlydjepq1f56/bwMSrghL2XAusyGUOPoaszchy7nZ9s67j5jNLZBSxERTj13U5YATj3fJ/ESV318yggV3/ujHP+Gf/pP/H3//7/0qv/pr/w5bWw3BCU3rCc6jTgtNtBqPlWaePH3Ms7190hDhVyxwqT+rlTquZ4tbMOFe0S4tiiti34MSc0bTSNbT92M2gUzXNWdbMLohGF7lRrcpgTbWQyFLUtWikJ7c2uSGNIHrCo9TFYFyk+eN2lUyo24DgRgMisnfl2kve1/n7QNnocFVabsqVH2bAvt1KwcvQ4X9xiMDl077YVzQFQFVqY4BYSREGV+AaSDW7qLhm9/6An/144842F+X2vJmRfUKz48SX3znHv/D//w/49f+/V/i7jsPICXyekXTNGzdvUOYeVxZrM5N0AQBch6q9T395Bn/s5/8b/j959+n75U58GBnm//kP/hVvviNL6ApojlDKuWRs5JjIubI8/cf8U9E6BCaEqyoheP9YP+IP/3t32fvg3vkHOljR06R1EVSH4mxJ8bM4+dHzFdHPJg7DiOskrJOsM6FHU5Kal8218V0HqZ+sdN8dCdfVL3Qmj+OlOtka1Kd7vmnm4lX3zSFEDxf+uL7Q10KkYCSEHFWES8X5kExVkLUmUXuAyIQc+aDj39EYsnW3W1mi5amEdpZQ/DBkKGCzkiJ0VBVPvzwEw7WPerqWpw8j0ypf52tYQwNyCkXlKTo/SJIQTFMk1E01rVggjLGqsyx4fa6cKwGOXvz+eYwhXQZ7q9ecxDyOqIcZ/ZBRTCuf4/XtW7hpACte9A5YMYb127jPk+Llbhuu0hZOg9ZuXps0c202wgovHSmwSQm5DbH41bdBLrxrw6OgdGvKJPfreVqeYngyKRuPcCV9agaWR9T5tmzA/7FP/1nfPBnf8z9+zvk2NOtlqTUs5g1NEEwgpoMmskxk5NxDfQpkqKSusz+smPvx094NHfEgoE//ugz/pf/0/8V80UDOeFRvOQJWGE58E/Wjrg2IUVWK3KEWbWHByv+L//nf8zcQQjgvRCcpR6KGKugZmWdlS7BzCnZa6m2KLhs1RsRUwI6quI03QCncQSMm/bxxThwBkkFWzbnaxoPcorFOlUkzmovs3i3F3O+8O7bxsFQoPqUI4iloGbtCF5AWkQCYG4CFcgaQYQvfOnLbO1s88WvfQk/D+CjdZ5BQotznuAcmhTnAknhww+fsO4zisV62DMPMFQdnfqEADRNQ+wTA45QtVlGgVrq+AwKRFbjRZhy+jvnSsrgxigOv41zONakeBkf6OWixetTFfcYYwrdaUJ1I3gVrm7SHr/6Bc90blAkx5Z1RWvOuZnLXO+qbXoP1xmG2xCJF6Wvnffctx2jcVPtNu756vFSm+N8W+N66zEDtcmxRz7vcS3kT1l3me/+5Yd0qfQgjGJPleCEruv50z/9AX/1nR/Qeil0w5TYA6w4TLm2SB7uYYxYAE2WFpjIzJ3VGkjluKd7CZ5HQhC80/KfxSOIKMEL+71xAlR50Cfb6r2z/zLKCqUxDzs+C62z3xMW7BhFycUPHdRIa1owWLsEFPbJ0JTjY1dkz/gSc1JZqHWDzKq/6GXXQrwzwsEn08lObLsn5/EKm6gI3L+3y927dwzFEftMirB0ziHNcHeQyxgLiPYIysw3/OzXf5aoPYvFzBQvbzTFmjKJXJgtjIZaENZ9x49/8hTNrhQoyiPcrxW9mjxngZWcc6j244hV981wkBRBxEQ5SIyxAgzK1oUba/1uMpw36YM8fZ5K/7r57MeKK55+/sVL46XbRQrBRtNxrCcfbfwcMcqXu/Zosrx5SMRVhftls0POClC+yv28Dij/dbkqLivsX/X9vTZlACZWxcamUihvddxGPSbIrWBKhSrHfgTBiWPmjRdeRekNwS0WtVHEJjFh7EqfZrnU+gaTyRHQXNLGsqCxWEEi1FTw7BXxWFU8sbJ2gmUQdMmC2VTEaghoFWSWGeG0pCw6Bj+4+lyua2PgKiOtCF7BqxWKUYEglk1QefNPbrQXWDb1n8l5U2Fy6gteKvHVQM2p7TpVKG6qCfD22w9pWj9Cl1YPGe/HrbpSSKOK94JqwqngM8znLTs7O8Tc41tHCC3OBRBndQucs7gVEZBSIGr/kE8f75X1l9AJ495GbYCqcWFz6IMjpzyMa4XSN9eqoVIiUjIIaiqhltLVZ43j6a4DKagObK7fa433sTk/cR86fv6m23/XDVrTAW15dZvum6YQ3GS7mrJ/urJx20rAFIZ/Vde/KtR/XWXqZdvtKwPFuBgt8rqxVmC7vI7FsnYwWN1Q/OUlTqBIaOtHlCZAo2PmvXqzpMVRyhhiElmqZWtBYDWdqyDRJuzLpu6lVgtkuE4u1/CuoBPFcjf4VOiT9ZMo2QpFoEuBj40hr3Ld63CtBBaUNQzOyD0gRZFBxfK+xQ33LOXYEQmoMQCDI2azDajARAE4d5uSY7/X8crjrA0KlQwC47Kb6smXUBGnPHx0h5w6cmqM+S4bzm4BmRmHp5EZiEfVBLcTMYFPZtY2eJ9xweFCwDuP960hRZotT8TZnBQuP/ae7/Hs+Z6llxataZhj1WFdjvcrVh4bS7NzG4rSiDcNa1zG9T6m4pXFPChoxzfJk5/Vsd6Y90tuGhe5Fc6MwB9cA0U5OX5Xx7DwjQ1/QD1erm7FZTfssyzW4z7xDWv0tOtR36mrj/PGta98xqtvl00zfVXttl0Ltx2H8Craq3yGyxcqmrwU123VEh/+BqQIViMuKcIEhcILEKo1LwwFeVTB+cFcK8iCFaaxfH4s2hkZkF0rXyxGLONl8PP7SoEsRhkLtvnmEibvMiB5vOFiHTtndLbVZq0bY8KbsNZkedmDBYsR5Tijkc1Z0ZjJjT2/SwzXT8rgT+6zkHNJKaRmFBgRTNJBpdmEb5WJr/vYHGxUH9p86S8SEjXjoULg4w5f/qmaXVUMrvmeC8qsCezeWxDjyko9N4JT4w9A+wKre6TSHjsrV5w1I97Rti3toqVtPT4YF4FzARcc3oOQjKMhGymULwP27OlTDg6XJcNifIZx7R7X2i0bIadkpFTO1uIwlMLANzEqBZwYG9HR7QUTT8Cpr5swHf+rbAwvFV8wKCt13Z8soS2TT05u9FqU4HrueP9XEQpDcOOxPeky0OtZ15LJv+PdFjXt5LRfuk3eklPbMf3pVttVx/0mg9lOnYPXEDx4UZzETd7HWf1cCrU65p57Fe018AzYdmHpJxTL1l4ZVy3lYasxS8TJNMLAuOerGS8FNRCREg9ADfIupDzmWhBnZX+9lKp/xXLp1TZzLe6HnM1HrMW69sqETa5SsBY64aiVodUgYSBpYp1McXGuWs06QpBOCLOWHCw1MAmo98XyFxCrnhhz5sWTPeI6krJZmwlTLlSNjKjPcDzU7NyxPw4/TSzLYaMfZPr5FliV+/X4cXbqF3DZVXvyvuDOzjaPHj0iZ4gpknNR4NT8+9nKDpJQRBNBHI231NBZG2ibQNsU5kZv6aKKUUs7MatcsDWD06FM9seffMp63Q9joaJFUIupjRubhP2nObM6Wtua9vUQGcdRGbgYqitAcdQwBIZNcBjBYezGy8kJl8NV2nmbzXUstOFeBrrmy/Q9VWJqkaPrWaG2ZvOIkJx73ZPfXdj/5E4HnXCq6VyznXb6S+gaL92mAng6ZufFDEz/PmssLxeYev45xz971crB6wiCvOo4HVceb1KpujVlYGop1oc4/hJsoAZqFq9iZYGl8PerUlICy3HlBbXobCE7CE2gaVvaxYzZvMEHO845TxCHE4jJWPPms4bQmNUoIrggOO9JfeR73/krDvdWw8bXeuEX/tbP8f43vliC0QLOm3Pfe4/3AQX+6I//nN/67T8mpQofWwcxK963/Pf/B/8ZX/+lb5LFrFPnA8ELbXB47xDfcnC45H//v/s/8id/9EP6ZJkFfS6BjGK56VlkUkR4MtanBOKc8ImpqV86+BnGfb3Oy4WQbNkth+OZ/nJxOysgyQm8/fZ9dnd3EaemfOVETmqQP1JIfowACMlICPi2oQ2BmbdMjfk8IKKEUGxZqUpeedgK/ZeFpAofffQZfakSeZw1rx5nFu50bErGQPlOirJrVR0dKaci9xwpZWI/9FYUDlNNRsSluBOGgbKBvsnN6qoBTDIoLVUZH6mpp+28nuxsN/nrZdorILVgclcT90bBas694+Mb8Hltuu/drug5dh8XCPvbFI6XUT5uqk33ndNcR9N7evNaDZk/W4W8buzDrSkDk2Q3TDUwAal1I1VlqgwM3sls245kJXjY2W5QYLXuRz+mGLR+sDbO+W986yv88t/+eX7ul36Wd957AERS6nEYrNx3PXsv9ogxsb01w/swYqCSyBn2Dw74f7x4wV8dfkSMhhI4Ee7d2eEL7z4CEuSM00TqezT2SDLGw0d3hO0ZrDuhS1YGNqllB6yWK/7wd/41n/zozxHpCZrxYj7ynBM5J1KEw3Wi+/Rj7i0cqyisemWVYJ2VvihEjTg6NbrYaSy5qk7WyaY1O9UcqoJQZ6Va+ZcW5owJolqt3NrZFfLKj20BOA/vv//IFDUiEgzVsfoBCYejIVhMiCYa7wghIN4jvqQV4q2gjkvG+ucsVsRJsoDBKmDND4QTYbXu+fCDx6TCCClC4bKYjCsVERgt9WFTwRRMzUq5HDkbtTBF2OvxbIThvdDh7yo2a6rtNBbjqu08y+G0Y8/eRAqpkv3GRPeZHnL+vWz85a4Uu3B6UOvJi76sADktcPL4lV8Gcahow1SBPx3beL3tKm6Wm4L1rxv4+bLtIkTkNttFYzkY1a9gwbw2ngFgIOKpgkw3D0TEGYyO4Ek0wTH32VILq49VTdjiYVkqv/3oBz9m/7NP+ePf+k225h6HpSYWCMF89SUtwZcNX7MO1MSplBDeW3ZsBauJoArkzO/+xr/mD37r98BD45S5s2hwL0pbCGyeZ4f2dj2npSaCCK7UE/jOn/0lP/gezL0w81Zi2Rd4OWNES51CF2FeghlzJdeTEguZwUumj1qK6djj1cU8CGc9WcpIcKXo0LEX7wrvgBZJOFDOTKCBEe6+gkJg8hfFihO9884jvPMEEs47hGBIiHhUhD5HgnpmweN9g/iA94WX0YN6Gcs7q1rtCBKqa0QcQgMExEeMowAOjo745LMXw3ieEX5ZlAAZflfNhuhUga1jeWGqwiXmbrI4kjInlUxJq3jVoc+qAuiQOvryG9R5m/WIECkGY2ziTaoj+lHvZ0Qybu5+zvPhbh4vZQ+4PTF6XtzUlf3vvHkKwHXa5VCly6NQxwXz8fNfpp3r9nwjUYCxbYwLlL3+7F3hOgrNpZWBV5Nuo6f8Nvm2bgBSorRzZrWCLgPqBshXijQKxj7MchnJq54XT3UoV1y5Bhz2XxBnqX6CQdFQSvBaRkBGSKJ4b6iDFNq/qNDXugZF8HnMD53UodlxFIU+RXqVMa1QTWFonOC81UhYIeAF9cZTgJQiR86ol5PLkK3vUKonUssuuzHNskzQRpsg2EyBgpoeiGBVGfNxVeHyL7gWyFjQay2+jU1ivF1272zz7tuP8OJKAGmDkwza4V0ziRGBtmlpZ3NC65jPPC5bnYLgQlEWE6izVD4ykhUfbDU7yaaQOtAML54f8PzFgQV95mxBhMc2+bM2jVpAK1fGQa1KgxvSBnOux9ma3uxvqghMx+h8UqfLjvFFkePqLOXRbiOf+kKO6b6n3+t593CeNXk9y/IMy+mK7o/zzj/ez1UKBF0BMPlctMv6/M8av8+TIB4NqptBPF5lOyvmY/rdZdtr5RmoQYSnvbNTP61XLcF4Qp+VVCypGpgsTvAitA4azGzOWLBdLJ0oZpV75/AC6qSkDY4MguqK5SaFfS4L2ZkgzsXqK7eBR/BYfnglB0pq0dIxWfS/Bf2ZZPDO6I+9V5oif10JLMwVDtYqUGxM7L4MPQlFwrhSuCkDy1gq38EAZ+qoHU02ockAF8u0Wr4v305ue7Yor9a33XLGobz7zgPu7e6USn/eyg5zZDC8WjBhCA1N0+K8p50FgssEsmVt+Iam9XgPMXpiNFQAMhpbRBtc4Yowk93m9bPPHrN/sJxoJpd7BlMApkLIBKd9NiIIORmXtJS+z35Z7b5sOZxSCfAa7eLNuHw/eR9Pg8eluljOeXePn3P899MEtq3hqwvv8/p83e0VobnXbleJa7hun1cJKpx+/rrh+dfVBsVjw9F7vX6mP6/Tbj+bYGKpVufAWfcvgCPjvJEOOcxfnmvFXBEzssQs3uCUWRGWrlzLifnXnYy+WRFTDFxBFLSgDBboZ4GIqmzk/NccdhEhFS1EJxpkLvnTWYVYUgFTrWxXnkdVrbxugWCNYldIiaH6YM7FLZEphZlqxgRDCWbJpZyxVhfLKdr4xpiPu7YUmLdW1bNjp3b55V9ILdavTeaUjGcCYelm78NoTD6s84gaO+AXvvAOofEG+7tAzmtTCnxASHjXEHxLCIHF1pzWC7PQ4hsLLEw5GerjW0QaOjpUc3noYNkdkgfFTsQyFj7+7DGrdazqmVn3Z4zHuPnZQ0wj5odfVYb1k0tRn9HZPulr+G1ToRjSYi7xfr+UFTPRBUwpPe9C9ZTrbd6nogRwbI2ccelzrJ4zrdLT7uFyt3rpdtZGPh3WN6EdH79LI4CqZ5572rh/noT6We/Ny6IBl3kfT74H5ysEx1GAm263qgzYA1QYtQiAyVYwLjIL6BLNRvcrWFW5ItNs0NwgcCptvCvWvpsMlmJpgFXauGQkQk4gpyqxjDku6bgTapZhHx6o4qUEjWEIgmaGdDNRKWx1SkzFnhvOw/otz5vJJFW6KPhStGbwGpd7jZnirgCNFkCXykLJagyEMZ2W+T5ecminLUwtrHdOJ4NYA8SqfD9/wblBranKjZ04+MBFi9++UkZP+qsW5oCaG2nQbNby9tv3iDkSksc7Cwx0IYBkgkvM5zMcgdm8YTYLtF4IQVDvySo1adDWUetpgqPvO4iG+vQSaZ23NEU1lTNlePz4GbFY+ZWZ8nS4/PiHNmKDiwBM+UAhuzLfRXGrPNDk4aXO1DBMGbgcTp/Ii+7jOm0yd0hxadiaOHX+qzKvZ6lJZ7dz0Ykr9nVRGxCyE/DGsWNOu5czLOjTEY0z+rji/b6p7TI+/uN/v6nKwOuA/V+F4L4Iebnu9W43tbD8Nr6RmbHm+ybMIZgS4EqAXrVmc9lUqYqAFr+3s99rDQFRiGCu2VQZDQvHPWz4dafcKWbNylggqfYnFdYv+oIopAlVbRGMSZVOjVXQUYmSShCgCKtU8s7VdAWz0EdBnMtum9UZpTH2fCnb86jIQErUJS2R8cfH+vjgyynfi92U05JLP1r1Q4rhZRbyRGIOxFH1oxOayuTDQWoO/+DFc+/eLo8ePQAg57Wx+uGIWYxhsvG0rcM7YbFomc8CkqMFFqqn79Y0TktMiLcMA5fwbk4MmRwTSihryix3l4UcE598UjIJuPiFUooSdcxCHsespMQW/gJVGfgKpimE9dcptXC9/kV+/vM+v3DeVEAy6sbjJSuoGxfCOU8P4zSfkIdX3Iuus3VdziKdYg7Tq10wt5cY2zo/r5K6+FW2lxHYp633C9+XK6AQn+d2layEzaDAzXf/dbTbdRNMhEMVulKs5dNe7qyKJiGLGFWvqhEIDTimDBuoqrH1pWLcVyu7wu9MlADRSX5+RScm5ZOHc7XKusFuM3dB7QdzXZjSooPmkKge6rEJ0PVK7IwpsfFCEwQ/E4t2F8wFIOBU6DKkKCyj4yiVKoYqJLRUbLRe3QhqX3oRbYz1kOpm6IhTd8J6OjPNpboG6u9nCIUBapTpPWpBFurLYxv3o4f3aJuApgQ+o4Xy2BHx4vAEWhcIbcN8FghBELXI0SBY9UEvVn64sC023lsp7JSJfkwTzZoRFWJO7B8e8fjxPmMFQVusp1mJ5maqCpO5AEZEoK4Nm8uaoVL7mroThhdCFatTcLl23Q1z8xnsn6KSjC6MQVE5p1VF6pr79u2ki417zE23qQD8vAuv67aLXAVX7eu032+6ndb3bQjem0QHXiW6cas8A/ZL3TgLwDy1SIfvLJreWNqmC6UIrEFa6cTvLRz1SkzWh2UN2KY/ZBOI+ZS9swcXV0ojix3rhULxa9BtLkhDikUJgVJqGNrG0zbQSCY4aD00zuGcctjBT15E1iYS8M4C/8imSMyDY+5hey5st0IIye6tSBhFWfWwSrDshJkoqygcZThKQl+GpPGOPlXGxPOtpTODh9RUipFyuVg8p8CppwZpFSRgtGynx9a5OQVOHObUFAFB8F549PA+YMF2OYGEQPCexgeCKIt2bjUGxCE4nBgjoXO+UAJngjPaYkOBomVmSAOtR5Ig6nA5D0WKchL2Dw94/Gwf1VqTYLTwj4+rak0NHDdEcyUZx0DxaA3DMyIEtUKh1aUYUaWJIqBa9YNrt8v4cKsbp/r+R3X3onYzwu9VCNL6TFD0crXfTlxnoqSe6OOS93RVhWB6xOcTS9hsZ2WHnDjulFV1UdDhTQm8l4msv8JVmLztg/yaPtOrVhjPy9S5yvVvOYBwVAAGK3zjPscNclAeJnM4CJASWFWtGyYKAcDMCdvBBLQTaLzSlPK1ItmqGEKpZaCFKa7mhWeSGltgVogZeslGXFPm3Qt4Z1kBQ1VEsGj2LHiEAPRq1MpZLECxcUIjgteMU6XvhKUKTap1FazKYcpKn4UuW3ChUwugbBTmpSQS4gmixFOiBk7d/M95MezvWvioRg1crgkjj4GWcr9mBdtcu1rvocYjDG4gE9QAztl8zlrPg4d36PseyDQSUB8RSj0J36DBqhM2DoIqLguNC4CQJOPaFnEe731RUHxBTgyZ8EXZSgpeXEEslKfP99jbPyz3JoN/37ICJsRBpapkrgGppUjR6J+q6YMWLFpXpvVT139BYoaKUSMmpRvwytmIzMtuMOYIC3VWSpwHHHcpbbZy7EsqK6+yDTRYyiWVm3LeDaAtF9/b7SgCLxuZ/qrbVaD0z0eb7pknZ/l1wf5XHdvbJR2qGvmgDJR4gDOgyekYVvh8ItKmnQEl2t8Z+U/r1ax/zC2Qk5JyHorJKOYyFzG4eohdKIpAylYIKE2vJnYfuQiF3Cm+lCXuXSk4hLJKzioRmplfoGRovGU2UAIDYwaN5vv3k3iEVP7rspIiIMZj34iinlIlL5FkhGovu+AuCuIyaPVyx5dhR5yM+fOMkHkVcAU0P1POmKUtbG3P2d6ZE9OaxjeGzKRMCoGUoXFzKxIllBRCDNEo84gkTAg7VBPiQ/ku21dY8qLEDNGKSSXvySnz5PFzunVPneR6q86NlotzoM6yTwRFs1U+1MoBoUJMDPEEUTOoIRHj8q6K5/Dwk88nc3DOZn7Vl/wseDSTQItrqPY9PebEOWfoClfY6zbufKMzvRLocOozIaCF/IlqUEyzlU6/0Ztxu/y0CLabbVfhZhjOuSHheXtCuD7HZvGty7SrrJvzxusm1t4tBhAyFmY5+c3J4wXqpllESflmojhIHgbBieAlE4rVWIsAebCaAM5y3y3K3yxDKSZZdTtIcTSmVJSAXO63BDEW2Y5HkWz9p2wogXe2BeUMy1yKCEmZwKzDczhxODVLMWkuKY5CLLC0fUbhD7BaC+TKNQAEe6asMmQ8HFelTlsc0xSh09Oy4DTmuekxwxhN+oQ8EO6M8zYVcOPxNXFBhq8mZWYks7u9xaxZoEmJcQ3emeAtEfhOIJh0JpOJscd7IaVYBJor3do8pJTJ0WII7BIWQJgT5JRQMl0f6fqejz/+hL7PG89K6QtM+aCsBVdQKU1+sJK1EAo1zqpM5lKs6gT7sD3JOOalg6vH5r9EE1DxQD6RvbCpmpw80YAReSlvgZ7y29nXvGq/NT5FB7In+/R8cq2rQKrH36/TcrxP6+e2RNObjAqc1q4rtE9DGG7fChfGfXOC9r3KK56h3L+sQnCLMQMMwpQSbFah11MfTjcf2lIN68OeMuhibINzDFKv16rWtqYSFuYEyZUSVkpQW019q/aEBSIi9r2vlRZLHeVq16mTkiagQ4XEDPRpYrHX25MiS1CMFEmofoekdm0LahT6IXvCOtRBiSrMeWUzjmlkp7toIRyPALZ7mm5eNkuX9QVunLvxnNNo+VMUOK3pkCNSIIUXYndnC42R7AV1Sh+VWfbEXmicI/aZ6BPeKWggiJCimpbRmPrkQyi+XBOwMUVcEjRZ7Yd17NDeij5p6ukzHByt+OSTz0hphMHLnQ1VLEFKZgtFgRQkjJTYmm3OU0q2nsShyf6u42J6hRv+NhggM6Bbk+9fRZum26LgaqGq6TEX9VF+bpx1HpxwTh8wYiTuivvY6UFZY2psHcvTTY1z7u0SSsF1AsI+X+L51bTj7oubDqq77DGvRmGofU6Ts8+/j/PadZSbl41RuOWYgWLj13vVmvKXJ+JCqtEIQxQADHSDJcBq6HEYLLE6AH70YWcMbjchq0NQUd0skoyIxZCBwOhCsLRFO1gdSLLMgQHRLKkL1TAWrRwA1dKTijNTmQQrwpAx9EETg6wUxnTDMSiwLi9nxY6y+bIT00j1C0b9goCyK/k9jwenUIOoimwrikBVLqaXPv06NjveO7Z3ZvTpAL8OeFmgDXSxB7FaCs5BLzO8bBH8jJgVZhkNkSzeqIYL6tKnRIwdmjNRBe0T667jqLPqh10fiX0kJuHZi30++WTPpkFc+VkUQ3XFhWSxJGRLd3ViqIyryRcyBq4OlmK0NZfLgtMhNqU+tdukXjjlt6u28zI/Nufg5Nq56Kqq9v6+rAVyHMlCi8tOX+bJrY1KxVhf4jrtrIC2adDb8eOmn/+0tuvGI9wW8nWTkftXb6/2umdlNp123HXW4e2SDh3f7gQTklo3xjxCyDAuO9Vqug5Ny2djvm+h/B0N0EmOv2woF9Phk2qRl105F+tYC3ucYEqDKzULDM4fLbkBMS3Wo2YtyoA1VxQDEaNSXmcL1upySSerm0vRBnIROFRlIVOi3iGrI8aEaShWJ6FGvJ8Ym/p8ZyyW0469qE03gvrSVaXHVSXpRHfnvSAy+KGb1spOxxjpUgKMYjplT+qNVthpJqeIqsO7lhR6ggQc3sh9JNNlR+yVdZcQHDGuSRn6mFgtVxyt+oGfIcWERs+T5894cXBQEBh7Hu89FQPKpW50SgpidQ58icRPqSp4NS6inKdSihWdMgdaFYQ8QNtTIXZRO+tlv7wi8Prbxp1ccw89sfHrsbHRTSF0Grx/WbfAefdw4THDLH/+8YGXosw9ZZ+6LcF9ewrCxWvlonYcxb2te389tQlkNPCnaID9VnKYMZlnVpp9N43ZdK7Usp94ORNCV4IFcynrm0zNwDlng5ozvl55igzIJM2xoAJJKC4FgAkCMImQk+GeKdURwTdCW9xIikHSCUhZeLrMpFwQCzGXhi8KkS/wtikZZUyckNWxjmqsg7lCu7kQ7QzchRu+yzNTfV7St3aWV2yYzyv470wRsGOaJuC8crg6ovMNTVJErSpjmq9opEE1sFo3rLvEatUxnzeg22a9x46skb0Xe3zwk4/ZvfeA2WybnKFPkS5GYsx0MZFqhcqUiVF4sb/HatUNPn7vquvKxtUHR06FdKoqQTmhWOCkszxDGFASoe+LQnke6q/j2n5Z98BFAusmN5QT6+uKXU8Pv6KH4UQ7Ky3t0vdyhXG5rutg5Bf969FeVzDl60MEbre9que8MWXgcvDRlIanBoJNYMtBox/sTWoFNYExGwGzuKeKgKpw2GkpRTzekxNLIfRZacQE7kBbXISRKIW8drDnSVIEhoeYhS7ZvUY1GuNBkFe6ZGeFi4LAYhHoNdH1o+DvSrR9UqwPKIWLYCZ2/SD1uTAloMQndDkRMdRg1gRiTqiWdDc1DoRhHnRTKdiYoyuiAONoM/x+fIZtGmQydpfjKpcBAlLQzKwNOO/o+oimDI2w6hwxGwFQJhbXiGM+W9P6F+xs79D1HXfv7rC11SIu8P3vf8w/+n/9v/nVv/crfPvnf56cPSlGYuyJWVh3pjS4Ulxq3fUcHh7S9QnEI6ID62VKmak0d27goyRnX4pn1XmwEcgZy1xJtRzU6U1qv5o5HlV7Aka/YCxvs11GsThfMJZjGJG/lxWUl1V2XmbsruqXPt5+GlCB09pl0airxApc1WD5aVMCTuNhmH5+nT4u0243tXDqzyvCoPL5jzX6JhBJ2RaHVCEFKUx5Iypg20kuboE0QNXm10XAeaERtf+cEKSmAZayxkIpUUxxM2hh+jM4tyuWe0HnBy55JxT2QQiiA99AjomoSl8iynO9ZYo1XKogNigt0MqYlGIVFa3PXFwQySl3Hdyded5+OGM+E/aWiR89T3y6H0koWcdxqbERxyHRyyymavlPVYE6yhtqweCeYYhvGLs/XZxVV83gHqhFmLKyNfPkFInrQGiFLh0SdA800LGLZEfvFc2JlB1HwKrrSTHSBE9TlIl1jhylFV2EGIWYIilGtIyRc4KjIWe1zI3cs1qt6WMsPnGKm8gBnlRqWWi2+3VuimgIIQRqkIcCMVkmgQWZnP8i2rUKKU5h9auff57bpTfxYz9fpl1H0F/W1XJhcO41/ejTa75uJe867byxOssgOaudxY1ybir0T5kScF47C/06aw6uMzY3pgxc7mXYfCApDn4945jh2OG3PPjnFcswUK2FiMoirH5DSsVCKcRCWABaRWNr8SOhRIYXp3eNDndFP5Ei4Ad9pdZHEMWLUerUioo1XqFPyiqr8QgY005hObTrhWyFh1rvLDK+oBROwTuHuIKL+KLZqDD38OV7ga9/4Q5feLAAHE86+MPvfcof/OiQp50VMtI6tlzuZZxqkAPAMFjtI/x/6jnDmG8GCh4/Y/CqDD9rpwrq8OKYzWeWw4+w6tbMGqHr1aD53JGS0s5dQUuU4B3rLrNcdcTeFDd1wt3de3zzW9/g7XffK6hMLjwICR884hvL/shKjJG+j/TdyooMFf9VTlasyIkjF+6CrIqkkr1Q1okzaMniTlypTREzOVfV6ZQNfmM865obi2JdJBJexv9/XaToJoTVCX/95rd2P59DKP3zeM9wynxcUQm66PjXG8j317dN0YSrjv/riRkoTU8IEWtnB0jBCC0bilCMqtLhcVDb/Lze1SSjCX2x2H9uEJp2hkNLpT3ZcDOkUvN+JhDLsVL6riyEUuIBYzZ3QizXEcbAxVkQSJCc1TMIHrxSqJHVyHSKxU25jyAwE0dOyosnz8nrQ7bbwPZsxt/94oI2wO/8cI+ny0qSdM2XsJ6mG79y3Gbd3NRlAAJGJGdso6VgnVZf/HTufeMIjSfFNT4UEihtSDmTdV4ol3tLGyyCPbtswZbeIR5UE6jw6OFD/oNf//sstu7Q9R0Sk1WnFI/3rcWS+ExKkZQVxNP10SxzTaVgkw7BfyEERKLVMshYuWm1VMKUJsGk1JdwRMAqE95xhOA4bjKNhbnszE2VgutsuldRBK577sue87LtZWIJLnvey2QrHIeBX0e7Scv7IqXxPLj7rOyNn9Z2XlDpRSjJ9N2/SaXrtSoDBl1X2LXQ1p67ECw2YICoi383Zx1iCoxpTodaA62z/7xMNtxyXRWjlfXFnWCw7bCt4xlpTf1EiXCu8BR4y9T2BfbWUlugljcGGT4DrC6CWG2ErIxUtoWuOA8p5zYoVbDkxrFSeH6YyD08O+zYmTfcm6+Yec+jGXxht2EZI8soGyRA57UTkN7ktOOWm9Zn3zjm2AssnKEImKtmUOZUTOkRq9Pom0BoAk5bNIELQs7RxlmV4LJFDSRH61oEj3OBre1tZrM54hzOe3wbcEvh3p0dFttbHHWwWmW6dU9W8/EHHEogRm/Bf76pyRsFqtdh7EWwgFMH3jtyzDjnrG5CWReCKQl5rHM9QiBljGpe/1TBmmIAV1UENmbmhgLgzjvnOpvNm5DB8LLBhDd978ezcV51uyiQ+Dpr57JI4020nzZ04SQ6drVnu8wcvAyKd7uphVUIV/57pdQFGKTnsQ2zbLgy/SSPEDiTyPXqohXAuaIMFMmeFTxjml6BdCu6kAH11mtSo2fNRRKLiOX3UwSzmuKiBU62lMBN33nMDErLyXmz4ykKSCzPrdkQiCym8CQPEhViJiZlKdDnzGqd2V7Dwf6KF61jZ6tBGmG7ERZBWEXBO4FSiCdfQcs+Xf0af1FqShyn5ITX1DobhCkiYHUFKmd/xUjsd1XYWmwxn81pm4YUO5JqKTAVEYl4Z/EAzs0R75m1nju7d9je3mXWNvjGIy7jvOILEdB8K+Bm2zRNoGvWxD7Rqy2UFDM+CCF7kvYWc+EYSzlDSRmETCI4WzzeOVwAVW/shtmKWFVuhROjKNjq0vria1FTrY0Iwpvdrq9EwEkc5Oz2uhWIa6MHx2IGznri2w4ivCrMf5PXvMj6/+vezlsLr+s9uPUSxtWCnDZRyJX2tX4GQ57/cFz5y2m13ir0PG6sisUCuMIZK84q0yUF8XZwcjLGd2mtPuYQb5XzVErtgRRZLdfEZGmLojZgO/fmPHhwH+89iBKcwztvwWRO2F8e8aMffkTqMy4ziIGs0CXl61/5Mt/8uW+iPiEieAk45/He0QRPGwI5OL77nb/g3/7ev2Uds9UrEEcSYb1KLBzsd4nnXaJtwYXAvTksYyZmGSounjoNp7ywVaM8KxhqQx87tUntfOhPBHwQahC+CAOKM7gInHBndwfvA+ICTQt9jMSshBxw0uDwBC8s5ltsbQd2t7dYLO4Q2oa2EYI3pS6lHu+E2Hc4l9luWxZtw3oWWK8i60I2JAhZPSn1qHNDgCAMpXgKBGfZJ7GukWRohgRHcJ6cIMZUHnmzwJPFAQyTT1WCJmrs4FZ51e11bcLX3cs2IPSXDM677aZcRf15ve0qysKwP5xirV8n+O9v2vXa8T37omOu0l5KGbgKmYZIMS0HiSIDhW/9e7qkKrRq55ZP1MoRu2qGTy5rhrxxpz96eIdvf+tLfOVr7/PVr73PYtYMVLJNCMxnrcG9MXG4XNOvI4v5nO2tBbPGD4rGn3/nu/zf/q//iP0XZq0KcHer4d//u7/Ef/if/EMjpsmKkK2Ajioqic+ePOX/8H/6L/jwoz1LH6yaXjJftOTMg8WcTIemRE4duYvkFFmnxNG6YxUTy88es/BmS0Y1l8MyZaK3Z/YIKYI6ZXsGv/S1e9x7uuIHHy45SEKfK2pxsaZ5uY3hvN29XsfQAXMNWOpknZ8BKamKWwn+/PSTp3z8wWcsZi3vvHeft96+zyI0+OAJ7YJ5O2M+c+zubnNnZ8Z8tk1oZjhJtAXxIXmaWUvYagneMwsN7awla6ZpHe08MU/QrTv6LrPuMjlmgmtw4kcFpQhuU1ws6HTeWiTp6igTo0IytCIlJRVComEMZByPcQApQalu47PRAjhnWE8b6QsCv85qrydq/eUEwk0pAlWpOMvqermxkTE79KStc7WebmGOLnuN89bZ5zH74TLtuu/WVfu+yb7eiJiBq7yoQxoaE+u/CnkYAvamw1V9zFqUCFGDwJtG6Ps49DQNRlNV5m3Ll99+i68+uMtX7m4TnLJaruhWa2Lfc5QSuY+sV2v2D444OjgEhcY7PMqq7+m6ns+e7LOjkTAT1skCAnPf869/4/f40z/6s6qBIGQ0ZxqUxinSOLq9tQUEqkWdi4IWwfhXP/gRP/nRT2ido/GAZuMYkAq2G1KiKC4rLRZE2Bc3RxKICCRTMBonuJh5dzvwK9/+Jf7iR4/5jT/8MR/sR7pLztKYKnjy6MtuHJYhAt4bImNpeBbwR3XpDDEfxvkfe+VZtySr8GK/48nzQ760t+Rv/a1vsljM2dpasDWbc2dnzt27C7YWM3MXBEfImcaJlSLOiqijnTkWWzOati01DxzNfEabMjFm8nxBSonVOuEUFrMZwRsqo2pzYOmugnOOh1ue/+jvfJkQWv7pb3+XJ6tMxFgIuy6h2Q3uEXEVXanvhmUabGJeuqkTX6FtRGtfwWI+z28M51sYl93Abjro6yaC/zb64/z+XvYadU6rE+xNbpdJqbyu4XBRf2etj7OE2/S8Nz2w8CbQkMsEYZ42tjcxJrfLM1B/VuFetAM9oQbAVAmwvyzIruszKRYB68Z+zL9r0P8HHzzmH336z9nywsyPvAJGOKQDyZB3k9RCtPASWBpZTgYPg5EVVSBCgYNV5HD1wixd1cI34GhL+WT1jr43pkMVq4JIichvgnWmmo1uWKUwJFY+hfK8pcqi9yDJjs3BetHCKaBZUHGQMi4qR589Y/bgIf/wl77K+4/u8o9/68/53mdLVrmiLsrgFL8EWnCVVgmUnC/c/W4aLc0g/MW5km7pBgViuBeEmJSffPCY7a0Zv/7v/RJbO1s8vLPL3Z0t5rOW+dyREFKONkYIKSfWndK2Hh8a5ltzXAiA4sVb4CZCGxpDj3KknVsMwNF6yRffe5vv7v6A589X1Nup3v37c8+7kvnG+3fxf+er/OPf+yser5V1hph8WTfKzlaLD7DuOrqoxFhRkjrWYq6wfPUX99TN+wZFTuXNON6jagW9x6tept2Wz3MjIO81uhOOuyk3vrtiX68zXuIy6/I2xvk2XQxnKRhX5Ue4TnbPaW/WTcz/iXu6pOXxGrIJptg+E/K1szxt5XOppMBTzekY1Ech3BFYl8IESQXvMl6EIGI+fClsf9kUAKfVj50LMaKCyFiCFkG84gshXQ0UVCaFarCUxIixDfZWKt4C+MQO8ljmghTNQiv7nC957NPHL5CyoHhfYGbBCvEw3odzZhWTwCc4evYU3Znzi+/c5c5/+Mv8o9/5Dn/44xcsU7ZNa+KmeRm/7GkVyJzXwR2j5CLsJxUcCxKEGBFTKhkX9V8pylhS4ScfPKZb99zd3eXO9g5bi5bWQXDmLjF9xiHO15OJKdJkT7fuCeospqNwQ+Ac6hxePC47cJE4y+zubLM1ayD2vHVvTpcyB8tIzBBQluue733/Y+5K5u988W1izPzWX3zI9z9bEyUQJHN/N/Do/hY5rTk4TOwdJg5rCeOKhAxL+eUVgZtqdl9n8yTaZjJWbbyor+nPm2xnQvvIsA4vE5D1yppuEnX9NLSzLXE5dac+a44uY82/SS6H82IjLnPuZfqGs9fKZRXpq6B2l93bX2tqITCa2/XPYxDR1AdbxYZONgFVrYUBLSbA2YsZhAJTFyFYqguOKIJu3EAVVFI3bKl4RRl0V194y1KoqolDCo+B3XvSQlmM1bW3IL5s6IO4gkTYMxsb3rTSujEyiow+yMI4DFry70VYZuii4hUj01FLfZvNAovW4fo126njq3cc/+1f+xbr+Cf86UeHdBlE5dSFeFWF4BibwPBDpJDxYK4OS7mr4KlSA+1UtaQmjEJk1GRhuVzz4Qef8Us//22aJhAaIUhGNROT0RI7ESsFrRlNHX1SmhRgvQYcbh4sBbS4X4JaQKemCCkjqadfHvLRT37I17/0Lov5Fq4J7B2u+ezJcw4O9onLnj9/vIT8Ib9C5O995T2++vZdfuvPP+CPf/ic6Boe3V8QXMQ1QotHEqy7TLcxwFOhO7W4LwcsXx0mrdeox57hCjjrc5medzVEAG5vg58qBDfS3zkQ7IUbNJuz+nlvp66zU17724pFeZWK5m210R17ep2XK43jMdn5su3WlYFh8AsEadbg9Psp3lagVvumuBVM4hgvwQhDi5gboPEysAGaH15GX3xhGazphABj9SGrTFhtZ7P+K2mMG6D5+gwCkEeIUAt9cFahT0pWy0owl0C9aslylJEwaaA9plRMFCaKiDU3uEysvy4p62Q0xo1X2hbmDdzZbnh4f5fdnW28ZraAL2wL/61/55usfue7/OUnR/RamPV0GIEb2UidN36ACrFTUIFxMie5+xQXh25alFJyQxWrt/Cn3/kB/+6v/TJvP7yH88leHnU4l43voDyHeGMGjH1iverQGYjrkBBQ52iz4/DJY3743e/x+LMnBKfsbG+xv+55vLfkvbce8tYDARX29p8wE+He1l0ODj37R4lPf/KE7x9Emh99xi/ExBfff5v/3t/9Or/yc5F/8SffZ52zFcfqE7MgbM2EWQNdN67P67ZTMz4utFo2Fd3x98tec9qHK2tcJ99fLZbgJuMIruoyOS/F7bQgwsse+9PWLpV6LOP+OG2XzUo4D5I/DVGYGgi32V71NacKwXXaZVCD036/qN1aNgFMhCijnV9Q4zGYcCIwBkFB3crGhVgFyfHzHEqQ8c4qAZCIFndAiXQvHUtxEovqmP5fg76qoKdSHhdhp1pq0zMQ/CTMDQBCyli8waaeUwLUDB1ImgZ0wmVTWKoQdMVFUYMqphBkzkpwljUxE+HRtufL9x1fftjw9r1dXA7EPqNO6GOiXy95byvw7/7MO7zY/xEfH6aSsnnDVpwTxLsyF3X8ShzEoHiYApBLzmMlmip3MT5lQVg+fbzHb/32H/CVL33BSH/IpFSUqxzBCS4IKfU4NYWgl2hz6ADnUZmR4pr/8v/+X/J7/+rPkQD3dh3zhWPrzl0evPU+X/3qV3nnnff5+PEz/ut//Gc8e/aC3XsL7tyZ47YEee8u/TLxk9U+7tNnRC+084bn+yvmLqKuZdVbkGSfIuIFscpXZrVWV1EVznp5a/v6TRkrXtRNeNNNZPdivw8bslRvhg7TcZnl8aYGdZ3VLgqqHMbhiq/GZTGb6fF64pPr9QuXQ/jOU3AuFOzDfj1ubm96YN9NtLMCGV8m/ucmR+s8Zf2y93hr2QQbNzTcZ9kUJQ87zoZRYiDwQCW8ga5OA8+0DgCDlDer2/gFlDyQEtU1XGH9wUUwqfzny0ZQP6sWvRQcXIuSUCsMqoArNQtc2exroeGKALjiy+iTklTLfdUMgiLgpNRTmDyiqsUhpAw5ZYIX7i48O9uBhUu8P4evvjvj7fsL7i92kOw5PDwgYwjC4foIWuHrDxv+9jd2+Gd/usdRykVtGa3yqyoEg5VKqdjofRH6dbewB9RC2WsEfa6Ul2aCGoxzXefRlQfPwO//wV/wCz/zTX7t134eJZFx5OwMrSGiKeEkFf98gybL/9ekxK4HdXzywY/oc+ZXfv3vEmYNq9UzPn38KY+Pep798EfsPXnKcvmveLy/5rvff8Zie8GqO2Q+m/Ho7V0Wcsh6SznqHI+PVnDYs3pywIujSPTbiJ+zWMyJcUUX9+lzos9qwZ3i0IFactxMr9tO28hPvuzHlY0NlbS4oCbvW0FjBv/VFFA4pV1lrVwmevy8487r77JZLlcNBoPy+DegIx/vYhMru/pxl7vm+QrBacL7qqmG03G9ztxd5bs3tb2pXApnxW1c1EQveWQr/up3dfxix+z7ihQYDGmWlZzyhtjWNsIqJlylFIoxS9ThEFEah9EIlqu5IpicG18qN/xXixiZ4Vb3wBpzVq+f1ALeFDbyib0vNMdicQSNlCDF4DhKiXUyhaRPxeJ3VofA6uGVNEmB1kNQy3Tw3l7kiKDJrts5x+oo4iWzu+N55+6C999eMHcdW3S899YD7iy2aQXImeW6Z9lH1Ck9K1wAF1o+PPD8N3/wmD/7pKdTxakfVJZrzacYQhG80M4E8aYM1DnMKaOpKjWlJHMqqJBuogLVPTSgQuVFCwLf/ur7/I/+8/8u7767YyWqgZQjDm8uA5cRMm1obVZFCMExX8xIfc+f/uG/Ye/5C8Qt2L57B+cdO3fu40PLD374Pb7/F9/h4w8fc7R2EGY0s4ateaDxkSCJ7aaFEHh6cESMjtWq5+mzfbq1snPnHtvbW/TdiqPlktT3LGYNyy7xwZMVB70zjoiKllRd6VojPhmxa2j+dnA+XjH51H5cveezDj7jvOP3dla7rjJw1escv8Z5aXWX6W8aAXPh/R37edZ5U4T1Yozg+u0sJPe6Pv83URBepV0F0TgNGbjsuef1d1ut07Mo6MZ2y6mFY8Ee2xhLTraMhv50jCZIpcHrE5jfJmEzfQ2EZW9oQitWk6AJVqMgeIPfhRJLgKWbOTLemdCREhuQ1KraGYufJ+ZMFNsIytXKPUHAEcQqF7belIEwEyQKeWUQcUQHNCE4h1cloHiUIBQlQgle8CVrIiBkJ8QyAItt5eGdGe892uKde3fZ2YYQdnG5t2fRBqFj3a1YdUa9m7MSsyOoR3PL/Znn5760zQfPX/BkJS+lCNiYG6+AczZumhLOmzCurH5VEahzNUVZxk3puAZoi0GBqPCXP/yY3/jN3+a/85/+A5pGyLkHAecCTgQ/pHRYr5YamsnrNd/90z/hd3/nd9jducus3WJ/7zl3799le7ZF4x0P723xfZcJM9hpHFEzq26f6BdoFp7vHZB2dllsz8jRETVB29K7wH6/Yr2/z1FcEfue1FmK59/91hf4W996j8/21/zbHz7hd7/zYz7ZS0StdStfZxtUXeCkdTgKxo039VLtNje4m7zWlZCOs/o4Y5zqKp8qBidxHJn8/uraWWjBTUaun9f/m648nMdpcF48yetoxxXYmwjivPUAwuO3a0rBuAFV5cC8Ambtq1omgEHq1aKU4d9cTC4RUDEhHxw03izvIEpQywcHE7ROM16qQK+KQSIVH310hY5Wk3kysMyAKeGcL8hC1XBqEGAXM+teidnS5FSlFEqyewiqBVEYX/7shVS1n/I8CtAIc6e89WCL9x/OeWt3mzt+zWy2oF08YMsvkG4FuScr5NyTNLNcL8tAGs1v0oi4yBfvb/H+wyOefRAZY1qvu4gyNf3MLF7BKjILmtOgoIGQk1X6q0J+IoKGf6t6MmY82Bx0KfO7/+bP+OVf/Cbf+pkvIc6Rc0KJ4KCPGckRKe4XCR7RhniU+fEHH7PfZQ4e7/HgDtx1jsP9Az7NH3D//n3WR7CY3SX7FzjX0KjSpQYLTlSSa4ih4WC9pIuRVb9ivtjh7u6C5WoNZNbrjqNlpO9grsJWEN5uOr70/pyff/9rfP1hy3/xz7/Pj/ejuQ4mwZvTtXzZdlaw1VXa+WJ+LK7009JeduO+THzUpFRVOeekmvu6xeFl72GKZsDNWcRvUrusC+usdhNKznEk6Pi4X9ROCxa8rlLw2lILp/qyTkZhczAKU7xM3AQ60cHrQCiIM1dAg+Xiezf2YP9VNcIi/e23WnHQpLJS0uGQIsQVxKLcnZQ0w2Lhe4pyUmog5PIcWUwJWCVlpaYcpKIvGCJhufJSghxVKumL9Z/EjWNRFIbdO55FawGQT58+59PDNTMf2L235N7WNg92tpDgWKtwmDPrFBEXLJitiTgS2isSAjuN4yuPHN/9RFlFQVRORItfeg6lZm7Ul8J4/s0XoDCxLkt0QVEQtPy/uhRMmcgiJUtgshLEMi4+e3bIb/zG7/HeF96m2WrIWuoCZE+K3lgYBfreqILFOYTMO4/eo2l2+PTDT1gePiXuHbJ/NGd7eZ91hMXWfdptx+79bfafedpG6ILQHS1ZLGYw63l++JythadpGkQWBB94dG+BUyVF5dmLNat1z7qz+332bMmnnz5jEYxr4cvzzK98ZcHH3zlklR1Cmmwk57vfbtyqUnA1JHVDI9Bh2I+78y7s8iUskus+11UtoZe1mqaEYBv9Tr6vgb6ncQ4cTyF7HS2X+T6BTJwxlidqw7xEsNzxv99EZeK2UiSPX6tueVpkUA1qP+/cs9rL3P9rQAYKcDZZmCIOCyI0IVsh37pBVUXAbazkatWOOoFg/nevEJxF3VdhJc4sbwGDtDG3QfAOlUSpMGDR7o5hZipS4byUl0ON4EeME8AEtlID47NCn5W+kuoUeVgFvxcpWQcMbvNsWog9r8Eb1p9a2uHh2lwfy+WS9XLNaql4jew8WfPW9jPee7jF3bsWyNbM56xTguTxJHLKxOhwZJIu6fKa+3PHvS3h4z0LyHMvsYBcqUgIUNPecp7SQ5u7QgdOgXxKhHZFeQzCLjpYmVv7pQd+/zs/5Bf//Lv80t/+OcQFJClOeiI9zi0IoUUloq5B8PzVD/6SDz/4IZpgu20IepenB495evSEebumP1qyvfMc5zIP7uxCPGD/RWR7+y5d64CeJnuOlj2p8Szmga1FQ1RzjXSrxOPHS/ZXiVUHKQsahHWK9BrJ3ZKu61AC33xrxpd+vOR7L/KIgMjERXZGO3WDOmbWX32TLp0oiORjlShfbjO8KsnMywRh3ebmDeejAuMxp/9+3AK81vVf8nndGaeenq45xV5/+ttFMSynreubiHPZWC/VfrpEH9eJ07mo3T7PQH18nXwiNrB58hnFWjZWwbGyYBU4tY96iqrggZkTPLm4FMZshKmHXMt9qIqlqokpCq5Y8Vkh5TyeVa1ZKeiCH7dMV5WGEhuQgbWO7gRVSmpjSV+s1RKx4MCskASkFBVy5GFF5BJRf9ArT1hR0yW6bOjHos8crIUuHfKw69nazixSg2/meN+i0aMxon1m3a2JAXoCTjyP7nR8tl8qG15zHTnnCgJjwj/n6SZvSk3OOgg7VT3VMqlbj5b5mvZfBZ/i2FtFfvM3f59vfuPLbO/uoikR1eY6a48wY94uOFqv+Ve/+/v8k3/yL7izDe+/fYc7s5ammXP3zl3m7RbLo55PPnnM3eWCBw8f4MjstEr7YIt+nbmzPQNNfLSXiL3SrRJH8Yh79++S+jWfvnjGh58esXdgJFMJS610AdbdEmVhhZbcjJQ897civ/yllo8OjjhKbChLw9hw+gZz4sVntNpET24Ol7fASkquYqRcTDJ36rRcoKicds2/Tu0iaFcvccxlW53nz/tYvy5U4E1FJqZi7yYUsOuukVtVBmz8T74aUr4UqV63MdBQkEJxe8rGI0L1v9ava3bAJKmArIpLRSBXd4DaMVGqcDboPpfKh7mAfc46MMKiYvGO4LcBGrlYeKgJ+GiPMNzeQJYqpp7kopikqgBVAVlJjwpmZAF4ytEakmQLynNCcMIsJ4IIfU6ssrC/XuO3BF05fJeZNcZ6GFzmUNcss5JXmV47MoG5s3LNKcqpTFgXNXWK82JuEkba4Wp1aqmyWAMIVU8XGDpZA64Gtw2CUks2iJBzps/wZ997zB/94ff49V//VcQmldh3ZJasxLNcRf4//+Jf8v/95/+W53s9Dx80tOEIudsR3CEpz7iz+4A7d+YoCZd6cg7MFg2z3BEPM2jH+jDRbs14dP8t9vY/AB/oc8fT5y9YrTqQltlihu86UlJ8tsn2SdhdLNje2iawhvUhQa2U8zffnfO1J5HvfLyeoAEnodQzfZnlp2xsGee/9GdtgKPyUZ1nadJbAvHI1H93Rt+ve1N9ExSSs0bgLDTgPBD4Ms9zGwpBxsxUV4nALrCcr9Ku4/q6aXfZ8bV7XSv/Jte/uQhuBou5zhq5NWVACpxeBcfYFPMZZyP7mUCW6syt4IpftYhKs4w0W0nYUu1whOMdSTPqbcv0IeAbN6QilloxxUVglenEWT64OKEpOYYpK/16zf7egfn8sw5Cc3vR8NZ77+CbBucwFjwBwdPHSNf3fPzpYw6XHc5Lgc1tcvqUkEzJHAAfHCGUwDupftuMpowWV0OHsM6wwvjyQ87MgnBvLjy863hwr+HOTGhQNDtck+nTc0iwXnUc9UqfA1mFmBzqGkLqaFXpq4J2xYXjZEr4VBQXGBQCYVQONpkI64KYzL8IsuHymbwRMsVyhKM+8S9/+9/yM9/+Jg/u7+IkkpMnRmH/4ID/+p/8c377977HMjpwgYODiNyDh63nzlzwvsE1kT6vkNDgF1uknFkd9Wgn9KsDlqvIp58dgTQ8fPgWu9t3OVoemvK06oh9RJyjcZHFDFg6ssIiCF98EPjql+5x587c0khdpusgLXu2/Jq/9b7noyeOpx1UtbOmQ1708lYhYkN0vpA/9fzTlLG6yU4Cc01dzQV9cAPyddr1zuv3TRDUr6udhRIc/+5adUFuYSzdBME9NVbiJQXh61AiXzbAbtrPcVfXRdc6npVwnHxIymc3Rav9hiMDsqkEDEi/+ZjRwuon9iCGzNvxfVaclsh1rUWFgModUIrBxKwcZmVrHviZn/kK77zzkJ/59jd55/13cMHS33LKhTRAB3IhTRknQtIMMbFaLTl4sc9HH37AH/ybP+HF0yP6pENWwJ2tlq9/6V3axQzNidh1kCIpJ0iZpJG42udw2aFA41zpPxGzoQmNF1qnzBthFjKNWEEl4yCAFCEmYRlhifAsZlQ8TQgE7Wm9sh2E+zNh1yV2ZwGVxHKVyFnZ3d0hpgyu5ejFkUXzq5EWKZnWC61zLHMeXCdweY2y5qDnvIkMVMUia2ZapOi0vlVKgGhxBlWFgDr3g6JiKE0WUyy+/5On/NEf/Rl//+//HWLO9Co8eX7IP/1vfpvf/6Pvs+wVdaY4Pdqd8e7dgD/YY6u5w/1tz2wrgASSCq5xdMnRZ8feYeSOOJ7rPs0urHvYf/IpKdu8rI96U0KdoKljpxVaQFvh4e4d3nmwxTu7yt15jyPRtAsW6sh6hOsNE3rnXuBb77b84Y8Sa3sqqhvq4g1ygg1MNpUbsZq0hK2gFlS6IbkupwictW4uVHLeUPj25ZsOaiycVBAuUgRuUngN17xkXzd17bPaVZWJ6xBHnXXeddbY8XMu6uMq6Zqjkn9+n5clbdp4J6/gA741ZeDUHM763WDtF+k8+OpH7dTKE1tevvdVachDL6pKwhj+3Fp4/MkTuhd7HH76CVuLBlGjsiVnYheRnCElcs5IzjjJQCJ3iZQSfZ9Zx4xLie1gFrSVIoaDp4f83m/+Lr6odMFBM/gNIMwtGyE4oyaWUtdAgCCCd4qWKoKkTMyKVDYiUSt85IQiKznI0OeSIZ4VH4SmVba2hHlQuq7j4CDhnLDshL5fc3SQuXd3mwe7O8x9y8GqYxUjKkKfhHcfLfju06cQI1W4XCVn1QibfBn7PFl09dzBhj2zCQxCZ/Sh6wBdD58VdUVUcc5x1Ef+5e/+Kd/6ma/wzruPePzsgP/qH/0z/uQ7H9JFSDgkZ7woC/F0B2s+6yNPnj7j0e4h9+8uuLO1RdsG5vM5d5qAAl99/y2cD/Tdmk57cA3LtWmdq37Fsot0/YqM0LYL2jbgpGMxa5i3ASHy/PGn9Ef7HK0cfd/Sd2vW66UtDnFsN/DLX93h8d4Bf/ksnojYru3UudgYD04g+GdZKKcJ29M249Pj5e1CIpPQwsF7UJW3s51Mf90QgdqmCvZUr5qO+GlFlk4TZtdFWK4SZPamxiJ8XjgKzmsX3fs0WX7EQW/o2ue81cfb7WcTbGgttZ22EAfwpPw9cg6kXBgFJ+j2GDNgzHefffaCPZTHH5Y4ArFCReIUp5WEyKB5DwMLoeXJF/rfXGrQA6JWndACBc3VkFG8c1ZsqMCpTqCPsC5VBVWxNMVcMiJEaMWyHRBLRURKh65UO3J5uE4S6LIJOEVJKRF9RrwjzB1+y4PMeLbsWa8zRyvlYB1R13H/aMX7yyPeun+Hu1uee76hz4JrFtwj8IO9I578YJ8aTJkrrH/JeTTWyInQIpe6A47jpDUn5lfrcXXuKt/BdM49mmspZKjFqcQpH3z0lN/9/e/wcz//df6f//U/58/+4iNiLlz8ZZ2sM3z0YoVLQkOGpDxd9mw97diZHbA9FxbzQOMEyYndnU8IwbM1nzNvHU2Y0agyn8+566G9OyMzA+eYzbYJ3iN+B/F2n30fme8+ZE9gdXjIUg+JSYk5EWMuKaaZh9sNv/TVGR8fdBxEmSzkk9vAhiDXKn5r2d6z20X+0Lr5T79zp2wcVbETMYW21uSolsx5DIXXES4vgxK8amv2Km0Yn8n2ftrTXGQNXgeOv4ko98tc97igfpXjftlxuE2l4fhafVmES4HzwnSuqxh9bkoYi2w+/eag1lQ72/xc8c1ThZZSFAQ72onB+K4IWhWz6O2z8nMwqhxaBPFQMEgZBP1gyhfY27gISvrjMCngXRWECk7JovQRlhFitm1b1eINPHYfAQuMU1c6kcJFUDiQc2+XjQqIo8lWzAhREo6UMoerzKf7wmoNrc+s14muszTEPhiT3rNVRH1ktrXi7lbD9myBd3Pa+YJV7vm5d+Y8eXLA1sJxZz7ns73Mj5+vWEYdfYZnzpshAlWO2by4Ua5dZu6r4JbKAjGWN65kRuN62AxQXOXEb/z2d/hXv/sXfPr4hdUsoPYlBVQSni4TqsKdxsZ/P0ZaEWZeaQM0oWPmhVmA+fOOWVBa3xgrpRfa1tN6mM+aolB6RJTFYs69u/fYmjeAw7vGlM3U4TWyPjqii9kCRhVSyiAOJ57crfjqfc/Pvt3yxx9GVlLHL1HzTY9vsGOWauG7OEfAVM34pEU5VbjOgE/L16cp5wMjqMBtZ85fdiN8ExSBabvMRnzZWgKXut5NBrS9YWP5prZpPMBV5+oEEnQJY+xVBe6+FmVg80EmL/kUDRVA68a+aQllsb9CsdSdG89rgj2UV0UoqYiulAQunocEkC2fHQEvzgxyKkxTN1uHUd4ALhszntMR2saq67lqZZULpGQc/CMREcMNDvUJymexxCuIc6Sk9BgK4UTJzrFORmJU4XNFwDesyTw5UvaPOjQ5+qiIekuNDMrCCVutsO08so6staNfdaw7RWl5cGfBl3ZW/PLbjtQ7vv62I3xtl9//qOc3vv+U9eqiJVmVpaoQWOqniEX+X+adqItaKrsSfpgBVd0IvDSFYxKDII4nL44gWwaIFKXKVlT9aRklB30muEAbhFWKSFZ8QYSCNzfPVoDtuWPmBUikPuICtI0rpbFXtN6ySdrGsWgP2Xrygrs7W2wvttjZ3sF5YX/vGfsHezx+fkivMJs1tE0YlKe+71AcISR++esNT5eJHz7vC/IzqmDHLfSRvnlUnodldcyVUIXL9DOpQTZV9Z24Y6bzwWl9Uue3vINVoX5FsmKc87NT6T5PgurC2IArgsJnjclFAuJVWfBT33emoLA3dIk31X0BN6d4DVvasZ/XbZ+T1MKTEKilDYL5VAukVg6rL4nHKuOJOFJK1f1qRvXgWBacZGYeGrVzpZAN1WtltGxq5WdRDqTcB9jvNYBRSUOGwwjPMtmSLSGrpgCKAlmJasGMplSMm2YFDBOUWAKGjRkZ1Q/UIc4QiqNocHe157Jm1rEAkKr04lhHc/17Im0DO43nnR3P/QW0vmfdJZI4nj494mgJMQq788D9u8IXtz1HR4714ZK0SrwT5ry7M+NHq9W5GqpU+V3mSHOe8ETU+b1oRZy2MdVsExuNeg1TMgwJsvNkQNYHHGkDZRcs0yTTRTj02bJGisIWs7mNfHZozBz2mRd9Gl4Ih9K2ntBVQZyYN9A0nnadaaRjK0QO15kHu7COQt937O3t82x/xdP9hGsc24vM1iwSGlNWYg9OLNZgt838+rcXbP/I8cMnPUe9BY2Odr8pWiMqYIpxfeJaDnt0xRQloixQHXw+E2FUoDWtyrZezcLfUDJeQbtMpPbnrb1shPhpysTr9qHXVXgcYxK4MUXg89BOQ6yuil5tbFvDZ5ef86vEep3XbjW18HjNr0EJKG0Kf4xCt/r7q4Av5D1DpPrk/CKgrIpxgVezDn6Y2h+TfqsrQmphnVxFvo7S2ksJYDRXg51frKzKEaRFQGF+/uSEaGUNBhSggh+xpEMOFi2gedzEK2Icc+ags+BBpSZBZEMPUKIoR+W0IMLOwvP2Xcc79zwPtoRFUFzqUQ18ugfP9hyHR0qvjk8Peu4fNcxb6HKCI2V/dcTztObo8FIzWgTx1GId85MvRaV1rG0u5KrElWtsrHH7zhUXTUppVLgmb1ZVJDKw7DLBCQuHuXOw8tYxCV1X3Di+XketrkUy6mgv4PA0HSCKJ9M6YSvA8+Wax3uRebsPGVbryMGqZx2VhThCcmin+GjZHCjMZxlNVpzqy7stD35+m+89XvGdDw/45EWiT1rGsawfbL0ZMlQt+0LoPQ0uLMqxUOp05JHFQQbov7BmlshFIaNFIbhMGtSrFs6nRULfVLrViWuVn69TrJ63gQ9xHZM7vC0l4KKgxeNKwPj5yXu9Dopx3WOPt9vMVLnWuyGnK1CnrflXHUx5i8jAqEdOSrszFeZTRcBksQ4WjuYRBt4wt6nWYgFZRUrVQfs9a01T1KFceyW2k0qRKyb4Q7Xkq96rRX1JOtx+V9GI4sKY0reqjJuv1SCYoAKCRfJH4zBAwItlR1R0xEm5t2SukB44yhYnYMiBBXHlLEQcuVwvOGV75nnvTsP7d4SHW8LWHJbLzPODwOP9zNNlJibos6OPmcZl1tqx3du9xLXwrBee9PCiyxX6uGBGRz77MQCufuDqTF7qJTktinrMOplmO0y3IUfTBFTXxJwn68YOsdARI0dShGWXaGZCI0qQTBLHqu/pFPriikCdnd8LTpxRWWNzbUqp0nhH62DmMjtLYREyXjrjhlChaR2ztiEjHK16umDVMYVMM/M0ZGYh4ElITtxzgV/78l2+eL/l8WGia7Z42jf8yZ/9iO4oDZ5+LWVIq2DXItjrKAGQx2BYwTTjQWcux1aFR4d1OSoE01iRU32aMs5wvepZ0/uylsp5QvCsPO7pZ+e1qWX7utpFvubz3CT1+9P6u8infFOBmSeC5m7oesfbbQr0q7bT7udKPn29+ho8Sym4aL1c1G6VZ2AQDhccqeXwQkODUILtxwiyjQ1I1QRqAg570CQ41VHAig4Wk3dCwB7cCUOgmIgx9kkGdZBLoaKSTUiKlbrXEIlq8VdB7p1YoSQxDoHeO54tIylDzEKXjcolidVMaLBqhI0X2sasRO+sgqIq9OqgUz5bp8lmbwIrq45xCJhbpBFBktAtHZ+uMn3K7K17nq8zR70pF6iSc2GaE7vvJsG6Fw76hucp8WwdCxJxMXxcZzRXwaRTHcK22uOW3lWsvItye1UtrTJXpXECrXi1uIB5G3j09jZ9XHO4tyImRxM8TnqCM56AzsOyg16hr1Y1NlZT9MlhxaKyZnIhtTpMmcO+KCzeoymzpZm73uG9I/YJn02BCCKE7Igpo7khC6Quk3VJ0DVf2d3h4Vz56Nkef+/v/QO8tPz2v/lzQ0dydVSV5s5P4TRizk2NzonbjOcYhktQzZMZnwrh4yJzVJdl2skp83MT7XX5wW+jnYSLT74fp0X5X7q/Y0LjZS3Ly87FTbWbmNPXFXNwnkJw2Xu66Lgh5uqKivBZ7RZ5BmCjyKdWK8NNNt3yrdTjC5KAwZoq1doRs2AUENvcarDZMhoz4cI7Zl5pxAStw9IAQxHAwQlBrJJg64s1NQnlTIVQJ5b6Ab1Yil99ClUtVQsFLzJkCTSiSIn6L+K7CHCz1rxAk5RmqhJN/d/VoM4FmcjT8SvR+pLNR18WS5eUF0RSjHy2b4tjlTLqPUfRlKEGJVQ/BXYtsucgKoc9fBYzh30anvFSTRiRFyZip6Ihxy2JG7bDVNWuL2bRem8Kzrzx7G4F3n64w4N7WywWgU8++4wfH66t6JN4Zs7TMvJWhFkJFRVH46FtrK8ggveFKdJbMGFwFtPRd5GjZc/hOnOwVvo+EQyQoumUPva0jaMFkiphwrIYXAcp07pA2zjiUY/GzGw+5+2F48mf/QG/8jPf5vf+7XdZL+OgIA/PvhGgMcEGtNBjc1JNsHCJQhMt46kieQgOPJkqKCdQog317gJ30MtsxucJl4v6vA6z3+tumwjYzbXjgukm4eY3JcDvvGd6HWjCZRSnyyoEcD109art9WQTUGFPE5ybEPDU6h+RhLrRqRhCIKVeQU3dqwqEw+B3LyZ4baOblD9OtllmsTx+LRB3kEqxWwMIzcVQd+GKMtSAQxlvz0oQq3ES9CXm4Cga22CfLUZAxGDr4JyRDon5fjMFosaQA1eEcSITs6s6wcboZTXBJVoWixMOY2aFWcSmpDiLUlQGTgZxxTpMmZSEg5g5ysqLBIe9Wj2GCZHTRa0CNcr4czIsV2pXhy5tVdSzZq3j7UfbvP/2Xb7w7kMe3FtwZ9Gwv/ecDz/+mLhekRJ0Scnamz7qlACI83jvCBgL5tzDTiss5o5FC1szYTELtI1n3rS0bct63fN8/4hn+4nHh9BnW1htKWK13yU8sKWCzj0ulYJKCegVRyI4W1Ox6+ljj1/2uMOO+bxlvX7GWzPh0b0dPlw+p2SdTkdg44PRpeKKQqClQJcdZ+8KpXrk6N7RqoBujGtRtDbG2w2jPQQp1vXH+RvbbVjvJyHrz5ciUNubIFiv2l6VsL2MZfwqr/8mtdtQum5FGdh8EOG0RxoDnCpiAFPxImgJHKT4OSs0PZ4jGGwfBJwv1o/DauFNjkuquGxWdvZWHle1oAfqCpWuVaIbakyXcsi1PkEuikGhGxrJgxT6qKwTrBP0xT2AYL5aZ8pJTWPM5fxeBadS3Bqm5KwUjhvq9ryOVOIk6uaNQkjleRVjWGQMeEwOcELjvN1jhhd95gBY5yl5zCUVARmVgWpmCmPq36vxIxcFwEr1WUloD7s7Ld/66lv88i98ky+8dZe5V2Jcc3h4yGdHh+w9P2S1LEWoCsLTJAsMzIyuE1eC6jRJ9RXhVJmLgPQ4jQg9wWea4Jnd2SKQyXlNjI5VNBdAQktWQEa7SFZl0ZrbYJ3BRYtrCU45ij2ieRCz3dGa5qDHN4Hdg0PeefSQDz58jkxItyrxzxirIfZ9UVq1fKduqHxQCI9qs5oew3umOlQtHONexjd1QH6Gd8iNH14QY/AyG9hVoqQ/j0L0NtvndXwuI+iv5KO/pXade7ponb/s9xe1W1EGporAsW82/pqO3RQJOCGmtFQilPEcVSMAmjkpyEAh9ilnmuU9Kg2pbKI5WRxBpTv25XPFctQrIRE62FPmGy+le8v/yckg2KxCl4wWOakbUAtXYFiqhVb6jXnk96/pkM4ZM98yjvkXp4PCI42zq1BFFdAOE0Zq5EVRgZTZah0Oz1qVVcqsB8vwJDXqea0GsI34dcnB0DqRF1uKZ/TMBAs69ndBglCaIIRGePutLX7xW1/k57/+Rd57tEuQjuXRCzQmNHUc7O+xt7/k8EgtnROb+z55goNAptdslSazxW10qix7OFonFmvHUeu4txDu3zEehCDJKKPFsWhaZr6nlcQ6JSQbYuWCINnQpS5aHmmbC3FS4TkIwcbJqcWRiMC6s+JG0nUsnj3l4YPdMpwyCH8Z4PmNcL8yL3WM63snA9+FMQzmoljIeByUCoV5TLcdVN/Tp+g02PlVCZzLboCfR9fA37TrtzctXuSqwngjSPqC+Kiz2nlBpFdtr4FnQCf/Tb+DDStDy0YlI+QnjEFqtg3mjX6lKAShWNvVdZCp5DagybY4IxnSodhRofNBCnwvWqz/ej/1nyKXBp+tMEC4mrXwDDAWhJ0qOHU/HyBb6yCpmjVfLqLR8r+ry+H0aRVUyygUhSOVZ8iZImDABbuBlC2qPibQlDlKFP4CI/2p9uX1WhUGZx9x+SCmzc9ViyU6nJOZzRxvPdzi6199my+9e48HW45780yra54//5Rnzz7FhZb9w56Dw31SsmwAL5Y+mNXSNmN2RUkbg0WzVUWmS5lVLxx0wp5Xnh8mnh4k7u847m07guuJMbOOysFS6ftcgkUV8SZKvYwZLl2vpKR0CQsWFcX5gmQ5R+sESIhz9AAZnh8cML93B+eEGAsSVuZLhrGasgpOgz7LcVKF5KgAT+fhzOEftOxNoqGz5va89Lh6vSmR0rkkPNf0k16ZvOdKR//1bZvq+JvX3iRU4FXey2UF/OcgmwBGtrrNmy2M8xuCd0QDZDimQsR5gEenbWJJirkGdAi+q5a3HeXFYFOHVO7hYePc2NjkWPeFUCCrDuWQNyv32TEZQV1RNsrn5oOt/v5CVVzw3LqdO2VyPzrSIg+jN273ozVWGA6Lu6BmQTrTS0i5xE5g1t4qZmJUVllIxWosOtGJdjlLS05MxXmpUJd7WTah56rsBQd3d2f8ws9+ib/9i9/kvXd2ictD9p5+zPLgKa27x9FyzaePn7G7e5++SxbM1zja1vL+tVekM+HdJZvS4MSyMZzy8P6Cw+Wa/UP7XrKyjJllL+yv4ckysbMXmVuhClZ9Jipk5wvCkgtjpOC8N1cUdS4EyUoIDi8ZD0i2ANacMt7ZvabiWlpHpWlai//YGJUpcnICNxsOkcHsH5GoaT7zphCfCFW153BDPMB4/JkzdgkBftFaOs06Om/NTBGBy/S/eS9vRnvVXArTdl1BNQH//qa94nbZOToRI3MDSsgtKwMTaFLHzakGN9U2PqMML0meuhqK0D7u07S6g1YpsAbeiVCEdhGIAh4LMBRnxYSqTaWF4c5pteCnboZCkav1Fuwb53TIqPdYn4gpHFr8Ekb+Yht8TJhrQkqdAgetE4K3+5Jy0znDi1gtP8MtptkO01Yz4QeonpFUxyUjafIwUDCnpIMicJ7Vd9EGpWUuFD0Z3DDtZwrlTjf3jZJ9Oj5rib9wud6Fsph7fu6bX+Tv/NLX+PJ791g0SusieRboQuDJ408Q51h1keUqsViYota2DfN5IuZMM/e0UVmuleUq0a+t/LFmo7aeN553H23zk0/WHBwVqMZl+qJhxQSrHg4zeDJOLSYkqSJeEWfEQCLQRUMBmuJed8GhOUFWQrbP2yA4Z2RE2QkeIUWrbdGnRFhHfNdtugJ0WIDjZ8fRFHRTyKgplyMyZ2Nf3V5jCm8eHA8D06E6KifiVTaf6/hMj+dJH0+bOqvdlCC9rGC+aUv5VSkCxwXGdQPuLkKGLtMuQo7+urbruAOmSvJp43rd2IFbVwaqNVzFqYgJwj5qCfSz74cwpgE+Z4AsRRTElZ+2UWuxhA+7TM7WZ+PNQvYihVPABHfrrAKh85ZuFwCPA5dpfLX4HVEdfY6l0IwJA9uLDctwhdfe+O1NoHjJeC/E0PKTJ2tyLn3L+CJ6YCZC44Q2KPMgVjTH1WBEISV40cOzHlMoMKF+WtOiustkd9KKWqgpVEmNEEc39InrbUNVqEz7Gab1Eufa+W7DKtWS/imljoQItF6Yzxse3N/iy198m1/9xW/x7oM5Xpfkfs1qbc+5Xh/x9PkeGhoA+hQ5PDrgzs597uzcRcWzvSP0GuhiYtVlXuytefp0yWqtJZsDggrdakWKNoxbc7HqkoVuOiboelMIatZ/0hp8qQjRFDxvCqCn8lxA0wqhMBzmmEsApo2/D0KUbJTbSelTZtUpzSqziGljXK8iHI8jYogrXeWC+ZgCNnm9ALX3xY/I0lDrYSLgL9pszrLyL2qvM1CwKj3nXn849pJ9XhT09QoUgZsWsDfV25vm438d7Szuh+lnZ51z/O/jAbbT86+jENyqMrChpcIg0L03bv2polCbDIrBlPTWNrl6eO1WKdX/BGbBeAZaLFe8kg8FVwS3t0AtL9CKEBDEOZzkArGbRdiL+YEj5pPXDFkyWQx+NwVAy+8lk4FMij1eDKkw4uKy8YtZhY2q8R9IfY6CIHjBFUKZRePwAv05isB0bGsVwXGhlQESQaUUbWICH19nDieuA9Xye00tOKUNi1IKziNMlJZsSoCDxTyws9XgxNF1Pd7BV7/0Ht/+1teYz+DFk0/oDz4lLu7hGkATMUbWfeRotaTrEy9eHDCbt7z19tscHqxwfsb2tkcDbOUGZM6qX3O0WuEd5L5nTxIxKa33pD7x4adHRAngFB8cc1EWcwhe6aKyf6QcrK26ZSzPXkNCUYP9k0LjHc5LcdFY0SXnHaFx5pLJmV7V6i2o0jiH847Ym3tg3SnbSdg/PCqo2MVb8mAtYIpGVVphJN0K3vgS2gCLmaETs+CYtwHxMHOBtgms+jU//qzns71EyhXRezmxcNUN6ryN81W1y6ACV0EEXvU9v+oI85tsLxv1/9cplfB1tNdUwrgGJVmkgBMj7TG6v01FYHRWVXh0tGiOCzRhDCAMkvGMFMTIlOO9fFis5Fh28VBenIp4O2eBeE7MWsKVO6quAZGSu2++CCPgMch+FROpCD8tbousigQTFF5LWmDpN4vxDQxP6wwt8GLR7WeFEV440scttDoe12yjIoAJmzo1ZzStz6QFvhYrKOWc4p0ym3u++pX3+Zmvf5lFa9H8bduyPZvzxffeZmvh+fGPf8AH3/uQuPVFhLsInnVnUfp9zPimRYKwvb3LbLbAeaEJPfPFFtKv2Z0FUhJWKyWp0obM1txx/06DqnK0NE4AaRz7R4k+J6JWIiNLAdxZBO5tC9vzzJODxLMVaBrZKIefVG3HXDsqkHIJTuiFhTR0OVLqbaFqSEKXhfUq0UWlTxb8l0T57MlzSubjpVrdcMUZTfWiEe4sHHd3Ztzd8tzfCdzZ9my1nsYrrTdFlmzcHblPrPvMB0/gkw1A/GrtNEE+hf3P29hPE15vikB7E9tlLco3QYheBV06ft5PQ7tO1sBpCt1F/XwO3AQVrmSwqGpUQPBGCKPTjae4AIYgwknFtppaOLpPDd43H7xZ30NqYdkcEQNIp9dQMY7/WHZ0zwilJ80lIM8u5ASyMyXBHMFQLW/BFVIiyyboaxqZWDpXHhQZGfnuxQ3PoFhk+ZCZgLBoPVvBYO3hQc+d4yKcBtP7Amv9ms0sfRlQGTmnTzn2Rwhwd3fBWw93eHh3waMH93j37Qc8uHuX2B1wdPAZj+4/4gtvP6BbHbLeX6L9Id1qxeFyRR8jznsSHiXTtB5ZHnHnzl2c9zTtnJR7cJl1f0hWj/ctMS7puiWr5R6r5QrJgfnccV9ahDXLI0NVtmaeLhVFo88cZUvLFBIP77RszwOJTJKILjN9mgRQliG3rARlHcfSlDVqxC3Xg0KmYnPuHWiyQERT+zxtUJax4+mzpcW/6MRdxkT5kKqCmFBvPews4N2HLe89XPD23Tm7ixmzxtNIj2hHkITGDtQRgqePR+Zmyx7VjnUf2dtP7K9SoeO+cOFtro0LPrsKf8BZfZ563CV9/jfRruoueBPabebj6/G/5OS8vAnKyeehvUyg4FX2+ttNLdzYVEzE1ih2V4h4LGZsEO8lfz5Pfi+ugCIcxx5toTdeacVoiJ1FKprwplSmK9cb6ieqgugguHO5q6x5IGEZ9sOaKkZlPgRxuWzFMkSNx2yFklQzqagfVCRdijJQIHxVIWUd3A4kU2KyWkngRXD4LhEL/OsmQ3g8VUvrz0k1xBNzMA3me5nNM+tYjvGsJvWebG4f3N/h2z/zRb7xtffYmSv0RwQPW4vETA+5f3eLfbfgz//0D0n7L1jMF+AS+/t7HC0TP/jxx6hkvvjFLzCfz1kuO1AhhAXvvnOH1XqFDw6yI2SH9x5xC/o+mQroM20j5BiIUVDnCduCdy37IXK4H9Gc2J4FFq3ncBlZp+IiSsqqW9MGq2GwjpZFkoYaADbfNq0jVJ/rrEhZL1oUS0xPSAm6bJpByaBEVWnnnmd7Rxzsd0P/VHSFyi5oaYqzBu5tCe/c3+K9R9u8e7/l3hbMXI+knuCFqBnBaL9b7xHXoszAB/ABzZGUHEl64hEcdLCMir0xei5AcJngsMsiAG96O/4U18NNbr69eSiAFl6XET2zj6+nkLz+57nZ9ipdHseDRi97iVtGBspCkHHj1PK7MAq6IjORAtXWYkCVZ6BujFaG1dACYeKPry6BKjSzDnwCRv+bkRLh79GhkFGvZq2jmaxa8rIr1F1NMx2EcdZazkeZ5mzlbOyDuWz8liFQaxRkiyMQSzPsoiIlt71ajJkaNJYJOLwzAXS8TYX48cCn06yvG194evZGqOWeBNiaB779jS/x7//dv8UX37vP4cFj1gdPyXSIZkJWSB390Zp5O2fvUPnNf/0dfuFbX2D7zhZ7e0sOjiJxX1nGT3n8okNzolt3ODzOeba25zQednYWLLbniJ/Tx0jXHZBSJqbOXBAh4BcBVWHdZNRlFovM/XueZ8+O+PjjI7ouIs4KGkU1AbrOStcb62HMhjCljWyIuizKnGOKZEVpKoV1VWgdYqmkTnGujFaZQt845o1wcNCRUlUc8xAI2wTYmSsPdgPvPNjh3XtzHmx7tlthqxU8Pa4omuobYo5FEegRHDGv8dJY3QvJFqugCeca+uw57CP7KyXmosEeqz9w2fV0clN6tRv6raTnTX5/E1SZNzZeYNguR/T3svjSTafM/TS1K+/loswaf/FxvBbSIRj1aavOngeoGWrCW108Y0CcZeOLDCY2RcYOigMoXtxAIKSUWOmSyG91EMrGpMW+L+f7Ac4thY+qoNuwbuq916sDuZDWVP9qgXRjPaloZhacaOd0WUlZh33WyZjxEIIn54imwhFAZu4dXRwDJo+L4OMW/mU3gBObZ80SqIGBx+ZuY3OX8YjTIrClfH9vp+U//ge/ys9+44ssGnjx+Eesjp7Tr44ImCA6WB8SQoO4lueHR3z2dM2LFx0iH5NRnuytePIi02f4aO+Qv/jhIZoz3oNmi1vwTpi1wvYicO/+Fm+9dY82KG0Ls5lHs5AjxD7Td5nFYovtew0+BJarFatujZcFy1XkyfOe5TIbplPGO07iPowgyg20wOP8D6Y9lKBU75R5YzEKTbAMl3nb0LrArPWExtwEUv6XUsQ1DRp7DlbCah0RPLPWs7No2Vk03Fk47mwJ23Nh4aEh47TE0PRGh5wqM4czhSVHy37x3hdl9RAnPagnWEUPnHOk3HCw7thbq6VdDnf2ckLlNJTgdQQIQlVSry9k3nQXwVnjeduCtdpH08u+qnv4vAUXXjnt9hpr1mJ0lO3ZxRVo4TUFEOpEmCeFPo1We2ZEALQcmyfFVep3zpUzyvioVKvfLHLRQTwPm9kQY1AMugFy17HSmyv9ZDHkwTK7xr7A0hVdQRq8qHEEOB3SC8UJnTpYFUKaUllRpNLTKq0Ti+RuhMXcsz2fc+/hLltbWzx/9owXe/usu8w6KblrSHtL+jTWr68lljf36KJkTZ3Y00h0GV+a4YhySFEDyncy9lX+PZHnWn73zpgNc1ZinGQaoGzNAr/+az/HL//cF9hqhX61ZL084OjoAO/gaL1GM+wfHvBib80yOz59fMSnTyIpwtO9vWEuUpmjnHXgnJBokyVqDqaDCE8POz58tmbxwR7bc2Mq/NpXHrGz2MbJmvW6I6aeWdtw7+4ObVhw1B7y2dPHrHJkZzsUGurIwWEy4qGSklcyCAfviAx/W0ZM64VZo2zNYHvuLWhvO3B3q2ExBy+JHDuCdzg1TgLE0mh8QThqLEaKHnUeELwEvMuWtioJ7yPkHp8djgbEk0t5YiGDOASPc4mce3AB33jEeUQdjpm5z4LDSUA00vhtNJvSs7/KLLuJe2yCDGwgTpP1UNdUdWGcFSdwWrtN6/UmKIsnb9Sp/cPtoBTH2+dFEF7U3qQMiFfZroLeXnc9Bee4d+8NVAY2BZH9m1LNdNaNzWWTnAZALAJ/2t/k9yrU9tcZzcLMC1thDCT0Ingn4IwTvvGW768FkfdA8B5vNIDEqEQ1Gtk+G5VrLILH+9qnceTPW08TlFlQ5s6iso/U0eWMKxZ+Tyms4xyteGZe8JpoW+Hegzs8fPCAsJixWvf47W1yjPgFbPmGhd/hQRfZubPL9u4d7ty5Q3KeDz74kE8+ecLR0YquX7NarYl9JJkGYu4XNVAaxkA3e8LJpjXVH3Tczuq/p21+DscX3rvPr/7Kt2kC/OBHH/Gdv/ghy2UiqzJrPf/ev/sLfPtb75D7A7oEKa7wTsmaefHiBU+f7JGZ8eOPjvjkSbQKj8lqM4ivSooU6mcLw6zKnJZIS5NVJS5DbSEkhX6V2V8lnuzt8cknR7z/9g5f+9JbPHrwiG69x9bcWeVALzQ+ELvEat0xbzzrRnnn4YJZ0/P8YE1Sm9vgbT0F75i3Le3M4PzFbM68FWYh0/rEosnMG2i80JCQ3IFkcjJFRtSTNdGG7bFmgCqao/Ff0NOLuQVyVohrRJTQOFISMpnQGKWyE3A+GuulOhovoC3iFmh+XvgCUnmnAqH4z1xwiHhDVmhxZCtslBNHK6WP43hu0BYee/8GivCJC+2iXP3X2W5CSL8JIveyUeivq91EGuptXu+222lkUPX3szIFLo34ymgo7u62fOMrDy513u0qA5z+ItkjjtpLhWanzy5YZUBPdR9svtaD5Q0G0Xph5pXGZUsRtB5xjoHgx0sxenL5To2BTVzGB3DJmONcsojyMPH/u8otADiNxijIBFGIJvylEZw6GvEkgdmsZd4uaEMg+Ibt7QVbjx6iWws6LyzeusO9xYz7KeNDAyqEJrCYt9y7c497d7e5s7VgnTp+8OEH/PgnH/PZk30ODw442H/B3t4LVss12iv9uqfvI10XiTGTs8UzjHZcUbqoRWwK/K+VIvpsSHRrLnzti3eR7jnf/8sP+fjJIV6UduZIKfPuWzt840sPOXrxlBDnoD1Pnn5MTGuODpbs7y95utfzww9XPD9SVErAnw2oVYFUcyOhjMyMopBHeuqheuVEeaEgSVGNAviTvcyzg+d88tkBP/OVu3zhnS0k9ywPluSYWK86JK/ZnglN41g0nnnToO8u0NgZP4BXvERm3hOcx4tDNRa3ls1/Tl1BnRxkwasMbidRIeaMF49GwdEgfm0Ij2RUM06MZ8F7xXuHc56cICdDD5rWEXJDSopqtGfMvbkjwgycQzWV+IADGrcG54nJ4REcCY0RdYI4c0fFtMRzh9A6YrYsisOVKcIwYeOcvKnTd3IzOOzNEJSvup33jMOuVLXom772GyD0Pm+Q/JvUrpo9c70xLkGDosS+49Mff3aps27dTXCZ9+NU61/kJH/+sXFSNZ4Bp5nGGevfUAdeRnhXMyZEBNtAy4Zt2pRHspJdJdQpmQkl71tyyUqQEiHuAC9EMYVBIyQ8ywS5nREWC9rFNjs7d9m9d4fdu3e5u3uP7Z0ddu7cZbFYsLW9xaJtmW23tG2LiCNn5WjVEbtI369JfSSuOg5eHLL/5BlHqyP2V0sEYattkEWDZ5s2eNarNSlG+y8l1l3ParXi6GBNt05DUNrI+GiDWWMopuO+ORcjxNqEzA9/+GP+ZL9nHXMRalJIhJT10Yp//Zv/hsXMs70zo2kE1QT0dOvMah344JOOvaWiXozHQbLNRbYr1UjYihDUG7Og0ylWzxipP7ABlsVdFLemgT72fPzJMxaseO+tGY1XZNmz0MT7Oxnngl1zp8WpgiTI0az64pP3hSExD4iLI8dUqv0FqzgpGXGB4AIpdjTekXJExBG8kJMgNBa86sBJQunwISESCc7RhDk5Z7IIhBl9XIOA98UdIB40llwVK0lsfBfe7lcSuBkxqSEAukYj9BLQaMpWzsV14BJ96uj6wIujxME6k3H2/GRU3aDIb7xyx5C6v2ljqy6WM0CVK7c3SfBWy/M0Stw36T7ftHbaOL0K5cCOzjTeMQuGll+mvSbSofNbRSeh+vsp264MAYBnFfdtxNE6mBUUwVchJ6M3uwYkDNBmjfvKkEjGGleyHpwvWQ954icv/tI+Z3ISlh2kHtQLrmloFzs0ix0ebt9h9/4DHr79Ng8f3Gd3d5ftrS0WjadtWxrvSbEn9mvW+4cs9xIx9iyP1qzXaw6Plqy7nth3dLHn8OiQ5XJFXCf6vueoW3K0OqJbrYmxJ2fQ7EgpDXEZ1swd4XZb1qtoFfZiJiUlJ4s6j6YNbVg2x5GZKRZztM4crlY2LrWc7iCvlacv1jx/vjYXTLAxa50rQZI2a88OMsl5ZLDuFbIRE1XU3+ZcSlwAA5JRFN8hNbR1xuI3a4St1rOzaFgsAouFY6d13N3ybDWJWaPM1eO0K4GmHaqd1bXoE847chacC0XQhlJ0qhaBakiahriTnO18CCAzqwmRegKJrD2NEyRHgm/RpHiUFDpEMpIDQQPOB5rWkWJDzitwiZzVEIZSQMs5jzMaJKO7TmtjqoxWx6LTZEqErwpqSxfBEUAz67zEe/tdNIAEcGuQOVkTJE8fM0frzKobM2GmTeo/enKD+uvg471M0yFu54b7fYlgwFclrP9m3l+uXYZv47jCddZcjuyjtv5EPI92Gr72nrDu+0vdzy3zDBz/bSptjvtJoNQetDQsFO+cWWXZLDNPdQ8wxMm1TlkUyuFKE2uCQ4c8bru0UMrM4VymUuYqEFVLgJdFYKdsqkdUYwmMSelUiAjZqZHdNDNmizlu3jC7s8t8toUToVsd8OKzxNHzT2l9wKtASVeLfc96vSZ2PX3fE7vEOvas+kTfr+j6aH7mnA3u7/vytwnxlO1ejGZYJxH+oyIgAs47fONwQUwJiBkRwQXBl+A/n62mQ86mRWYttSQLAnJcAMQ4DmOdUfvajulsZM0y6k2ROEJQzNp0WusZGLUzDiQXAZStM5uvUBQ3s6DnIdD4zGzuuDNv2J55theB3a3A9gx2ZsrMRbYbT+NAyGiOxH5F4y2gJuqamJW+V8DjvUdFiCIs/IK26UlGyk8ODs3g8JDLuKSelDPO27OIBBuI3KN4JAmaItlD9iUaP3aINmSxxdQEh0pHkIC6QM4RJ4L3DZojOaei+HhEkv3nApqN4Lpp5qgmvHd4Z8pLStE4OHKDSM+6P2TW7oB6nAZzn6HFFeNxtEhQAgERT6cdqy6zSvbeuKGe6CSC5BSh9KYJhNcaxFdjcOxGfmrblaPh/wY9GNp1eRYues+qDMsoc++5f0fxsYP15a5x+1ULp8Z8Tc2SaunXVrSbYSPSwvNuooS8qXlXhaCm5wlWQ6Cy5SaKFacMQigXHy6VqbAUjYmFREbNNc06C30U1gm6XFIGnStMhB4XTGBkMv1yCYdHvHi6h6qSeoOPnbiBvr8KTPPfm6CvxQhzKWvcV6i8fC5FENdYCntmRyX0qPdrae8yWHAVO3FRYd2XmgebL2atg1BTOH0wS19zLlaplWvWrEjNYKhlepQBZakO4+pbPq6U1ELNlg3hUClWrytBddmhKqXQT2LhlfnMs7PVcnd7xs62cGe74c7Ms906Zi3MXcbTDfEamiOOZDOe15CKlqFC8A7FseoykYSIVW50ydLq1K3wweNcoE89Kh1N61HdIqVEjtncAQ6L7Heh+O4V7zyIIh5ijuZqIiBEYsr4Rgmux+NJlpeISiY4c32lnBGJiERUI6oeJw7nlaw9MRUXRk40YVGqckasKmMg57XFuLhMzopqT+4Ts9AStMHLnOTmOHeIsiJF60t1SXAzsq7xTYPmnq7L9l6oG6CAU912w7v3Zkq816EIQHGl6eu6+untVQrfEynHpwTAHW+3yYT4JrXrPvOVYjTUspu2fUYOlcN96C5JZ367yoBMfxar/dTnMx/9AMwXoRdVS0qd+ab9ya2JrI41pVqcYkyAk0u64serikLtH0ObSRRSGbVSyJ1mUok9EG+ag8esae0TeS1ojlYqIY+PZ8K9iMZqPReBXVkO6+SWwni4EslfWevMXVLIaU5pU+iobkKDXaTjcA/9DcJ8HPRc73mgehbM2pXRJ5wTTvwo0Aelpl6nsuK5QTPLSIH7c0FuDN+hzI0IeJeYeWHWerZmDVuLhge7LffveB5uO+5uN9yZz2klQT5CNZNTj6gpS2Ql576UZHaIE5ITYqeE0KDZYhAQaJqG2Ce62BFaZ9Z8ygQv+OCM3jhnlJXNjQb6LuHdCk3Rqlu25jvvkyPrihBsXToxLU6d0jhFgis8EYqTTHABUaWPHeqtOmNOioRtG5t0gDhLrHViREfiXSEFUlzj8LTknEgxgiuuF7XYg6ZpEJdIfUTEMhWCzMjJg1uTXQc4UkwkDcSc0Cy0rqXPc9oQ6fuO9apnQBQ/x3t1FcWXpjs+cf7LtZtIX7zp9jdBf5+PdnztXMZFsPGZKq3APYF2PyIihEvq6689ZkDVHf9gAPkG8uBqNg9/FARApjEE5tM96DMpyQBC5CLfzPoePHqDoqGT30GK5SqloA7FsDQhm1OGWCoI1onJRjkrQ1BbUQXEfPEizqxqkUJio8MxA9/CIFDLtZkSJ01Tt6YLwr4dYys22/ECMSfyvsu/ld7Jvnc452gaz2wxs1iFdUfOFu0+avSjkiO44bntPs2ibEvuvfeOtjGCnXnjDc7fatmZNezMHTsLZXvuWMw8wWfmweFx+GyjkHOHJiH1niSRrImcepwEyF2JiLc5EsHcO96Rc7VsE6pC30WaJtBFhw8NwTV0ssZpDxogZ6T3pLDE+QVaXRYcgbTmFkgRfALpaIIJa+9mVogqRVAh5kjWNUjAuUDjAyTBhwYJCaMO6Ax5kpIN4G0dJe3wbk5KgEQEh/cO8S0aTalIucwD5iZCOzwByUIfldksoBFUZ6h6ej0wxEF6HM4oh7MSgpK0wfseFxasUmSdIp0WZsVX1I5vdq9ScF5OEdjEsi5zJ+cpGdd9ltsiXnpdSsHfKCEn26kC/dj6udR60Iqj276w5YU7mmkotXSay439aythbG3qMygvWfk9c9qxx8+UYpVRrFEjHYpDVPnmWTZceSMtcfRYyDEtLJodHO2o8dY38/DtOzfELNSCSIY+GOKgk2evPkU2+nOTDzYjtIfo/lMWhUHJtWyx3UNKeXA3DFc4pllWb4MMm76AF3Z2tvnWz36bL33ra3T9Id/74z/ix9//CO2gOPUHv5Q4YasNzBtPcFaad3trxs5WYGvm2G492wvHYq7cmS9oXCIIzPw+rVswYxehJ7K2MYhrUszkfo0yJ6L0MYB4VA8hOVIWvJ8Ro8MHT87JlDG1stNoJjuzjmOfwZWxydmQoKQE50h9RF3CaUKkpevWhRgqk6Lgpcdli6fQ3OCYk/2SrA6HY+YbU/a89dWEGRGlyxHnMg5PThYcmlJrQtfNSGlJnyJNA5oc0SViLvTUPpXKlQEnnmW/pG06hBk+Nog6knbFKyN0sbdARwHN++TkCH6LlCI5Km27IIuS3IKcI+CRkCxmxQVoEqrJ4lKIRo2dPOvYmyJ1rN2UkDqPQvta/R0LorpKq1jYVa/3qgX2pf3DnBQor8M3fyvw99+0jbbpahn39UaUu0FoUJgL7Z2GfElo4LUjA9N24QswtSLUoFYGY7yUQmZkLNBJACIwMMjVnPUqeA2aN6FRugYKqnAMuTiuREzVDXHCbG5+d1dcCatlNKa8iRd9qgiUK516jfOaBQYqiy0P3qx6TbA6Kn5jlFOSMcf+J0PtnPD2e+/x7/2D/4Bv/+LP8eLZY/74t/8lzz98TIoWsJZdpglGEpUy7M49v/qLd/nCw12Cb3BOaYMnkPFEJGcarzjxNK4lxiPEdYianz4SgRWZFV4aHFJ8Wx4tRFTOJyQLiWTR7doYKuB6YtorMxsQ71FNOAI5ZTLR5jpZEZ7slNx3xpKYHU1QtO8RH8gkJAjBN2g05SFrAgnUDIesFpgXvMO5Dieebg2hiZbqpwkpJFaoL5CJol7p0opQYglidKh4svSkaK4XL54siiQhtFum/DiHZ2FZISg+r3Ei9Hk5KJIqgZQSjozzgZiAmeA1EIIQ4x64lhQTyhonLSk7vAQELcrDDhoiiUOUGV0vrDrTXm8rFuBlLenzvrvoGabmwmWf9nUpAmcJ/YuYHet5r0Pg/o2wv3w7b4wuyjqorLQCbM88OzswaxvCjrK9I+ztf45TC89qmxtHyXUdNkcQLZkHYpUE/Xgkg8+8DqhO6hSIgSylhEGBvidO8VM2rE1Mo+gkatZnwrIOtLo8ZGqpa/VZTHqWM3q1I0Ymxk0rKCXl6LAIvkKlOAQDQvGXu+O6xuZzCDx4uMuv/Movsjtr+L1/+k/43p9+l6fPX7BUBfHgavBgNh5mtTiGto88mlmgm9V7iIhmvPck6VDWwIyUA64QKDm2Chd/pF/3ZJdQ9WhKWGT/jJh7YlbmugJN5ATBK1GXFviXI41r6WImqeKdMTt6B9obLZWTVB470ghoECAz8y2CBQKKhz4m2rYxdMd39oxUAmspkHksvAMOZUafOsQLKXsrOEUEHOSME2fryCkEIWiDpJ4UzeJXiaDZAmK9EmOPbwMaA1kVzQ4XOgumxKFRSUSSabA0oUFzS8wR53pDh1yL5Mo02IPMkeBBzW2i3iGaQBoyDucjgUyK+4j3xoiYlqy6jj7LKajcq4Xzr9NuSiirWMxOrXn25jzh+e2m6w+cJrhvSpi/yr5/Gtpl5/DsNW9mX+OUR/cbdu9EtuaerD3rw8zB88vdx2tRBgbZpvXvaywII3mzsr9FGxABcVosNIa4AhCSWuS+BWUVaSlWNrZpHbN5SzObkVQ43F/SLftCg1sth6mvfrPVSATNynrN4CYwjc2b0iGmlNijbroKXIHdxQFaYwgYtt+6EbsSm5BVSTGjKhYfUe7CFAdwXsxFUcalEPINAY3jPAjOC20j/OC7f8G/+c1/xf7eIVlh5+EWX/jCO3Rd4tOPPmV1cMR84YnZsTyKrHrhs+c9X313zf2tttS9L3C8OjS3oEJEUV3RtAHJloGx7PfwknFhRe57YkylOmVH7DNOAkHWxLQkhK1CraxGwRvNx6850ASxgLpsKXaqSs7Rih558CialSZ4wNH3EY0dCWEWGsgrZjMha0KTUf1635i1n6Smn5BSBaASfRyDKAXBeWeBh6IWZyDevgtpYMxsgsUcoCtSp7jgCS6gMRbmRzvfO0+MS7p1HUfF+2DIVYQQArH3WKrM/7+9P/uRJUvOPMGfyDmqauZ+l1hyYZJJsqpYjaqu6gYG6GkMGugB5rH/6sHMwwy6B+ileiFYxSo2mWSSucR2F3cz1bPIPMhRNTVzc7/uN+4WkS6IuO5upstR1aNHtk8+EWp1TgRHMRZKbgZtlxGNIDsgo0Ri7EgZSsnELiBV2I0joXMmw0JHRskU75p5hLB9ew/+LvmYWAE4ROkWW1kOl/yuyILeJB+atvdt5EOh/3+MBsJdaZyHHOP2/ZruAS63wvOnFQmV6zQxXQlcFfRTLC08eMGerRM5H8Y+2kcqjej3+AuThVHw1KMOcxMhBBVb+gpUaGBAbYZCJQa4uIj85Gef88d/9i+4+Pxz/vEff8d//D/+ktffvG4VAndPzhsd/ma8goGIESOEZqEI0urajdy0TIiCSkADznBYzQlurLU9zgewYXb6xEXBt5uBOQMPZhCDsL1wchrB99/vC+NUGx3x6r4bfPXVC37/229aOabw2RdP+G/+2/+G/8v/9b/m1bTjv/9//T/5y//5L5n2CdFCF4ScjN9/m/j2xcjzTcQsM00TuylzPRmvroqzHWY3gJ5depXAk8se1cpUElKVXIyOQImVZIJQsEkgJCCQsuezoUNyTy2g0SMQAhRNbPqtRxmYSziNnJ3xr9OB/bh3umOUqoWSO5JdINJB8XbVxA7CM6YwEHRCioHtkDJ5ugMllQmjECSg0hR2ZeFmAEOCOQ2xBbYIpSYqU+PIiHRDADKxr5Ss9Dr4M9VKtkroBkJNmDYlXyvFBNHeSzCZiEFIyamzC7kxSXpLbDOoaSRowtseF7DAftyDQrFKnSLZKmPOdBLQrrBPyj4b02SYhfbutav6iOWD5zgD1rG62030+8kSk1sf6ANe7n0V7btUyOc80dvQ6WfD0vdIY6w/uy8a/lOWhxgqD/Xs34QReWO6yypdCPzsi4Evn7ree3Gd2e8q/WjEMxigc/IRehOsb+pJiIDb3kO7MbEakuqmt9s8dFFPIRQgVedar+v631aEXxCurzLf/O47BvkVgxZ++nzg6hfP+fV+72F4e4AXY6sFbAYqViMEIQ6xoe2l5YmVWgytRkrJUeAYUlqJPLLkgxCnCrYqFNa1zHNYWxua33h60fGLP/6cvvNuP7lWXrx8ye9+t2fcy9EYa/V0QwWiKJ3Cv/jzP+f/8X//b/nX/+W/5PffvuB3f/u3/Kd//zdcX08MXU+vxm4qfPOq8Le/uSLniXHMvN4Zr3aF12NyD7o0HgMx+rDnJ08D/+wXT/jiaQcYQQJdfOKZeQHLGTrQLtLJM6a6J5lRcyFYwWpFVQim5OrEQIoyjROpWKsiKBSJZHpScmWdciREZbPp0FAhbAjDc0IMWDBQRVQpdaC2vhTROnZXX1HTFTZNaJlQAtoaLSGKlzE4IDXEDWVKlJyIfecVGBS6HsScC7OagnTtmSVUFdVKrAJasRqgOAdCzZ6CMBuxEuh6pdquRRFAQwCit+wOeGOtasQQCdJ5OWSLiJgpKY/O7aBuRKeihM7LDPOusMsdVyPk3HgvVgvaxyQWuu29KwvQ9u3jC+f2O7Sj/jhyHwzAXduc+/5dPb/7Kqy3UfQ/pqjA27Bzfp97pgLPniq/+Lly2e3JGeok6ASaHeN1H/lgxoCc/OYK1hbtb8cbLWI2RwVOogPNmhUpK1vCuQHKHDptx5+LCzws2PABc36weF+CV3XHNP0Dv/3t79AYyG2hV5Ub6Pz7ipmz6H350+f82T/7Bc+/fE7sIrkY+ymTx8S4u+LVt1/z+99/y/7KyMVR+8s1QiszVKyRLumyYAkHLmX/U0V48mTLT3/y3MvSamU/7nj1EkpelSna6aQ1qhSePnnCv/43/5p//hd/xPZCSP/wiun1K0fgVxh3RuiM0MFYK3/7+4lvriYPwVchhsB22/H8M2HbCUMXuB4zX3878avfTRTb0/154OmleN59Gj3KHb0+XloYPgFmwUmJghDFKKOTRqXkZXmIoqHDQiRTid0FRQTpFI0B2xdUldDC7aGPxNCz3W68h4CB6eAEUK1vwiY2/oca6bbPSLEny0Rlh9hIyJVaE7kUhEiMFSje0joWr25x0ABhULwBUaWU2cKraAyUWjyyYSMhCEEjEi88+lAUjYrVQrWABKXkHSEKxTqup4mgmSgboghme0SgFKFY8A6ENZHN0zIAYx3Y7fdsNj0XG6EXqDZhtoXa401NihMaCUsL8E9RjLVSvBk5eFv5GIro+1RDfN9znnrsd3n3j/J28i6e6e2gUj9+F4W/+NML/vSLRGcdv/86M14VdPIy+PumvD48A+FKbO1Fc17hymrbOQzupXTzt8dxPatOTFRXPLlzlcERUxYzdEAIMfDZl8948vwpL198y4vvXpETLM1wbgER3nml7SUKAj/58nP+9E/+mBDg6tV3TFc7dtcj11c79td7rl5dM+4ansFsga/NT9HLwYCghCjkmUvh7N0yYhD6LhJUmaaR7759xe9+uycnObtoemGFe4zPP3/OL/7kT4DAb//ut/z7//2v+A9/9e+5ernzELRlNCldgOFJYJ8LMSk//2LDF886nm0Hnmwim8F7RAQxplL49ZNr/o//eM1vvp74/FnHk+2AqHu/Oe0J8RkSZjNHKDYi2mO1oLIhdIB4zX0fM6qCxg1VIqVCl5Vh+xwNAdU9ya4Zng0oHVOeqFWIVrnot8QolOJ9ILLg+ADLbDaViFHrHis9EaVoT+kiQ/cZlGtS2lHzDqRQa3IMR5nQLkHImCoi2qiV/VkWy402eEOlkOreGSbF+zREuWCcnHMgl4qQ0Oi4kmrV6ZCtUOmo2kEwqmaqVXJRcq1UFUZgv7/ycEgpDP2GzaCkydinTNXCvhiaEps+ItqznxJmA/v9jik74dbCb/6gGf/hRFbJ/TeN9CFe8UMX7hvRyg+UYrhPdOBt5H1hAz5FyuqHyqdiFB2Xv/va/fxp4I+/FAbNXO+M714W8nWlp4Ho7zn0D2YM2Orfcy/MXfCI5S2zQ1RhracXFIJ5uLsuyntuUnQ4yhq9aIAE4YvPn/Nf/df/FX/+n/1L/u5Xf8f/+P/77/n6n74lpyWjuFhha7mrhGl+sUqp/MPf/YaX37yglMT11c4bBZnT0Fqdr+fAsWB47n7x4KWdH/OoQWieWzXWrZ9pAMXYKX2/AZQXX33Hr379gutxXjZLu57TPJK3uA1SefXN1/zV//ZX/P2v/pZ/9+/+V/7+V/9Ezq2zI0Jq3PUX0oEmfvLFU/78zz/jIiQuAnQU+gBze99BhD/+4pIXP8v8+7/b83e/2fGTJ4EvPy+oChI6poS34Q2FEDqEzyjiufrNcEHsIlaVzWbwALH0SPQUTsmZJyp02lFSImch8IQqAaIbQCW5hx6AacxcT5ndOFFth0pH7Cc09ZRSiJoAxeqAlEwsSrYdlEQQB5qmuGOc9uxqJXaRSsLM0KSE1vdXqhGsOlo/BES9FXHNAYlGLSP7XeBlnahEqiWiOvVwrqU1dPJoQjd8xlQyQm5VKxtycerRjFfJxAhbDCUi9ITQe1kkFSSSUyFKJsRKKqDF6IdKzk6PnYu0vhyfxsJ3m8jJ78tbekbpvE8ldPNcbV06+lzm/3ycJ8O5M/S+quC4rzJ6n3TAt4Hh7ptHP1ci96ko2fclDyGTujcWo82mCgyx8hd/suXnn1d6ItevjPSqMBShM4+A53vmCT7J0sLbG42cC6lZe9kc/SwzjL+lFWYK3hg9deBgrzm0LnSh46c//YL/8t/+G/7lv/nP+MmXT/jum99x/ep/Y/p2bGmF28Nn5x/ybHD4Yv7i9cjL12MD/R0IjKwZGS1mf3QHpAEXpWEjKOt1ZPaMTjgQWtS0mpMrTVPmn37zLa+urLWknQ2C1jK4jd8Z7TwZ8c3XL/gf/of/L//rv+v4+ptv+d1X33F1ndo5PSRTgbFUvn05su2UzZMnPPn8F1ga2Y2vmdI1SGaIU4twRPog/OKnl3x37Q2CXteOz8KW7bChIxD6nhiEIkrsNuRySZWJGCLDsCWoeNmcBVQ2lJrIdUfOE33s2PYBs8qYoJaA9oYKlBLd4OgSVGWqwutdYixGLq3qQryrYAxb9uMV+zpADZgkRPbMrYy7IES9QGPPZGChJ0fDRJmma7r4FKlw+fQ5z3/6c5599jn/9Dd/xYtf/yes7um6gKBUgU3oWm7e6EJFpSeVV8R6SZUtJlfkMqHagRpVRkRn7IEh0pFsYugCwRSTRMAYuogQGdMEGH2MgNJ38IQLAhmmkVQqJSe02zKmiXEa2U9O+f3QKNjHktvey3dpANznePM286s8L71rA2DuTXI/KNe8//t7Du8yfH3f0rgfu+I/J297n9eG4FFE2w66MSB8dhH46TOl7ndcJfjtP2Xy60rfDHzKynF+g3ySxsANdP5teTVpN+zgwKOzQUBdtKcGYdgG9lfO2nY4hnvIw2ZwYyGN5DQ6fe6mI+ieUh62uMze/CG/IUvE4oBZWIY75z9uRBjn3P7a5lntdbuYkcaMoXz38gXffHu99ExglVpZW+Z+noIBL16O/O9/+TeoCMUqpQq1BK/qkNY6qhkixYHq1GSgPfHyCTY8Q/KOfXmFych26FECnQb+/MsLvvylNwj64ukFnz25YLvpyZPn9kUKuUC2zJSB2tOHQIyRVIpXIGB0XcJabX/fDUSJaDBySdQOokYv+ZOA5IyZlw1mSZSaoIvUPEEQzDpnKtRL9iNU6UgNE1CZ6NVTLnNHwlQcAIoKIpVtwwOG7RNASBnC8CX98CXPn/0U+zM3sPLuawQlqrIdFLVKVejFG3BVu0bM2xRXGSkUL2usqcFfhNr6HYgU+m6PUBinEczoovcnMBOqKbHvMEvk5AavhozVTKDD6pYQCjkYpoHQZ4YxOO7DQluG7ok6+ohyg9r4HRkBty3Ed517FndD5MB/0j5bmpGxMhJukYcg1u97zW/CJrytor4vEv70+G/6+w9J7mtsrj6gUhkUfvrZwKCFl6+Nl99VXn5XkSKtiZogxpkePuflI/EMnH/wJscLkJz2LWAGzzmoTjFicMS6gLdmRdiKYBq4ep2w4qV24z41xX5M/Vtq4rtvv+Vv/sNfc339in/4x1/z21//I7tXuwWv/PALPPl9TmXYza/Xf904m5399ChysnglLc1geA586C8o9QU5q/dEWK7GOCQk/P6GIMQuoFGZxso+zUuYYtXPoSiiRhQjiBA1MPTCF886fvHzz/gX/+zP2Fw89Ta8UtGasZLoQ3AQC4W+G6hSG6+A8+MLgd1uj5UK1bv21dx74565fLAUxikzpoJzF/m4iqnzTIiRCpQcSKU6y17o/ZpVkBqwEFHpMKlo3KF9JE3eaLmWiZevrujCBVYLBUfep1zYDBekMiL0TEkd+JfdENiEC/ou0XeG1Z79GJlK4uuvfs/L777iH/9TIMRAf/HEu1rmzH53TS5jK39VuhghJje0woaJjJq3L07ZS0i7TpgyWB0pORO7vgWTKgSllEqZKjEqKsXhJRIxFKL6e2XZm3fpBlOjhsJQNs6M2Iid/P3goLk+cXlfnvObjnsKrTyrXE8yog+JtzwkjP+24f73ncP/oWMEPracc4gPUQFfuZ8/EZ4/Dez2lZzh918XpiJo0+pdFsIDolEf3Bi4k0Z0pgGb/1Y3DgRar3eIwXkEQiPL6WOHhjb5VOi7ns1mwy5Xdtdfo53SD5HXrxxEth6H4Qjq3/zm9/zP/9P/wvP/8ykvX33Hb/7pW/ZXBavadPmbJ7bIuvzx/DZrM+T7SMsiLudbfrbjv3y14+//7lfkNKLi4XLHJro3YzIbJ9ZSF8ImRj77cosJfPP1a9JV8VBUJ8QOuqBcbCOfPd/y/PKS58+3PHu65cvnz/nnf/Zn/OznP8NShjIBpSH0B7w7I9CMN6oQxEsA81TIJbEvQpTBke99pqJ02mOaqZaYcmJXRooUSnasRZQBk0qWSi3ectlMoOspYhRLS1pIAO9XkKkWSNnJeoJ0VJsQi1SByXbkkqlV0SjEbsOYrqEoXb8lq5DNp+gQe2IfCRRq2lMMgnT0caCSyPkVuzG12HDATNiVyj4puxqIQSAJm6FDJLuxFTO1FnrX4SQqgjDucRZH9f4JWKLUDcYF03hN7JsRaApW0FBbZUBGRYnRqydSnig1oWLUkhENCBWtW1K+JmVPLxk0UCnL/Hpc3A/yJmNhCQre+Gxtkh/k3GL9vol+3ke54X2rEX7IfANvKw+933Pk6eY+Pvu2F8q/+je/5LNBKFeZ765fcV1fUqMi0ZBOqNlxQdT73eMPbgwch41mX3aOvIr3w8EJiUJk0ax9J/Sd/4wxEBqoTtVbu5pUQhy4vHjCxcWW6999iwH9xQAc8vTScncLIhPlepf5+3/8HV+/+IZxHLm+qveuzTxc1yr/z/rnKjx3S2jxoeJNiWy1YNQliCCiXO0y/+k//poY8PC/CFhDuZsHKwU81E2lV0NyRUf44vMNP//TZ+QxMwwdT59c0nWRIQaePr3g8+dPudxsuNxE+iHSxy2dFL77p3+AnBm6QOx7shVMha7fIhIxE6ZcGwhSSKVQc2bMGQsbz41LwqoS+x7BsNpRJu+ip2qU6pTSpRZUJ8yC1/wXz6B59XlLZ1i74+JlgzVnTBsQc+pAJmLfkVMg5R0mE1aNoB1RIkLBilBrj6pSDSoF7XpKalUEVFINpBQoVqjlNSH0mHmJYewLVON6X0hmjLmQtGOXKqEoljLbWunCgGhmgzdSAqiW6KK44SKFoNCFjhpArLZOksUbFRXoIphN3qbMAmZGiANKdKCpBEKI/uxFIUdUMyl5l4hsRqoHTMn63fzxGwIH9W3Lv4fP3xTWf9ORD0c/fvvnXK7ZKtP5AQ2vT8HI+1Dshh9TTp22+/BA3KYnBHeG/+1//kf8d//d/43f/f2v+Mv/5S9JJTvexwoBcV4SqV4t9SmSDs3ipYHNRm5KXQREnfoeZo3thsHMOy9izTMSZC7hann/ruu4uHzKZnOBdoHrcXKPd7Ph6urqqBxjLWYeVt+PlVL3lFqdSwaYAYhH28tsRKyMGjs2bo7nti+w3/edO85j+j06nEeXe0q7f/t9ISoEETr1I6hCHyBED68PnbIdhKcXHRdD4Omm4+IycnFxwaaLDMOGzeaCLig0Qpuu69gMSlCFYogmMG/iIwRC7Jgy7KeC9orJRN9BQcnihFBlcgpiRJHYE9WfH3ZJkEtKzdQykmol9BeIOVNeHt1grGnPWJ1xwRYcQ4XGKGnNGA7q+e9KIZlzAajA0AdIF01JJmgvjEcQErlODcPQUXNEFKZS0CDUOqJBGVMmpfnZRoolch2JtXI5DGRvFEgp3jsim5ewdm0cgmBByHUiWUfNyr4WBnH6ZBWhixEV43KIYI4tyAxOZVwTtU7UImgYEEmYVmI3EGWgWMZEEYVSpxY1UIyCkTENFMuIbajsmZIxlQY0xZrR+Icpbl5/PyNgLTdgP/N5ztziD6Gc3zXA8iGfz9+ZGe/KOfqhydsYYcs+LW34i88v2U6vieNLLrrEVTCeX/aoeN+Ty8sNL15ek15VdrtPoJrgeEIcIgD+ktmMkmtAG1vy6u7xC32clVwgRiOIIVJbXtfjtSoTWEX7S2IMxOiUtDklr0VXJSVX6sejmev53WLPeU7ySaO/lRtvr4inMY7SBifGgqp6aFoO11/ruoLgfi/AGi4wK/9Dpr+NuqUAhIMhFaOx7ZRtF+iiEFRRhRChi4HN0NEPHUECg8JmMC43zmTXiSJRUY2oGX0VQknEEEHUSYdSQfuOIfT0Q4eqk+LM+IuUEvuxeBjehFxHppS8zA2PENTqoXPUSwfFjFImwJ9XTjtSqewnI8jEVCol76k1U2skZ8NqRiVQxfP3tUA3bBhrJZdMN/QNPBMZ055SjWwZ79hXKKZIdQRKFwe0VErJTLV4H4FaEfW+A1I7QgiIZVIencsAPbh0ViiWMLx74TR5A6KpuqE55Uyy4umFIPTR01xTgf00MebIWGBMgT1G1ytRBzQbF51ikxA1IXEiSIeY0EVrWI4NQZVcMiJbf3Zq5EwzlCpGbg2OPG0w5gJ4g6gYB8aUmFIhl0MXzx+LzPGNc6H79VZyyzY3Q/0PP/85Wd/n9XGP68h/3F7zH4oh8C6e43wMEcfJ/ce/+k9c/+PfU63w8mri5SujG5Tt854v/+TnTPtrnsTCFXvi5n5q/p0YA3Ooehk4pxO8/cRWncGs1ZhzMMFFlm1UBQ3QRS//cmVVvQFNiK1UpzH0iYPbZi993O1J++ReZK3extX8JAsd8iqLN0spcKAqnAlw6jK2edE41tQsx1F1A8XTG61sTQ1LTh98tJ8YDoM0vJuf5++lRUUWjkHPnHj+Xj1EFNWIwRg2QgzQByVGT6sMXeD5JvBk8O9EnFFPQqGLHRCd7U4gSqFrERfRiLVKi5wTYoOj0Wsml8F5ESwTNj2Se6Q3yBWLgULFSibn4j0XaqVmgRAoqdDIHzCt5JqgOmcBAVKeqNlQMtUKKsaUdqQq5Dx5HX2q5DwhBCpKyjD0zkI4l0nGmXWwNgCjAiVjBTCf5iEE50ugUMT7LBhGrR3CCJqppUOCQM3U6qH1Wo0p76m283KdFMAStRoq7rXnMrpH3ydKazlcklItkqxitbIJgU2ndBIIwcACWTpMe7K+poghckGykVLcKN0XpaPSB6MLmSebQDDvahjUAYqildjaORfzKghRJTQDDgJRlaoTVKGTSGwEXsZIyokxtVIk1gDTT1PWCv4+S+xsvp8zdNarwnoteBdRgfvsf3oeT6MdBvo+DIKPwXp4er4fs5FzTk6v+3ZWQTm73+xFdxG+eNqjlvntN/sWWVe6HrbbyJ/+8R/xr/7Vf8H+6jUvXn/N/8nf85vfvL7XGN/CGFh13ltnw06e7axq138LruSb6z1XaHmYdt7c5rxIayaEI9hVXRlKnPf3vyHQxUjXRbAMFNI0klIhxI6gwuH9Ol8fIE0hGzMy30AroQETnd64PZi1Ql9dvyrEziMHi/Gj/nBVhTTiTYKsAb0Ak7pESYI4aCyo0QdxhH/w645BGIKy3QS2A2z64N5l57GNKAIoMXYoic4yg4JRQJ1wppq0FrsBqZWu8/OmXPE2xc5h7URGRqeBnEcomViEKVUseDRBQqSYEbVn6Hs0CNY8a7929WutjendHCjoJTGTN98pNDIc9eZDmKP2xemfRSFIIpW5hltcqWsi9p4TFzW6boBSPB+ukU4M7bx9Z8QIQSnaIVUX6ulcPZef69jCLj3e3iOgBKcOFryRUi2UOlKqMaURs4paj5jjE2reE8QrF/KU2I+ZbvA+D169AJVAF1r5qiZMN7zev2SaClUGutBxGS4o9oLOMkhhP2WqdEzZgYcqEAi8Gp0yeW7FvOl2bKKy6SNYdfrmWpt6K4j4/KmleHgIcWBhVHKuIA5aTMX7eBiGftq2wFvLuctav8kHI0OOVrfvI+eOcWTMnLNQPuD9f6hSPkccdNtx7lL8P/aox21y23XflW6ZDVXD6KPQUcgpU6qTjnlkWEmp8O1vv+L/8+v/N6LCk8+2/PGf/CkXz9O9xnZvY2DR/35JrBt6iBxPinOexRwRUAEJDgSzWr3rgOriqTddAmaoKEGEIDgQbk4Gt+hBmN9Y8zxriIqEypQSuRghGuM4UcvaCDgzMZdP3bsPEfqLiADjLnlvgnp+f++HbgxD5OLZxhUJ1gwBJWqgC4FxP3F1vYNGcyzBGHpl0wnbTWDTdXR9oO+Uy6Gn75xjXqgMXXSPMkSCTs0/rkw5O2q/ZOcEyHsClVoKuxpcoYVC7Dpy1QYqy/QxUqrT2yYzqiklZ6/KyOJgvgqlZkxGQhno+41nm5ORuopIZqqVacyOo7CC1ULUjqpC7DpIEyaFLgz0nRt4UQeMhBaH/AlKUgfcaQ3uadMMmeGCafLnXnNxlL9ufM607WIckK6iohQ6qlWCFoIGpBhBIxYLKSfylKmpYHgHIylQqxtMTth04XMNyGSwsTUXKpRcoQ6IZMaUG91wJWgl5X2jwDbIQsrmDYS0ECKeygiREsCKsU8TuQhFAqUYgZEQIp1e0ItheORLRSgCicb5YIGxONvgLgtikU2p9FoZxkofi4Ns+wtS2dNp9ve0ejVDLQrVezCUIsRw4cYiSqkHv/g0svepybsc2/pal+Xkex5vPtatIof1cB1ZXNMsw6ftPT8IGf8HjD95d+KR3outorGSE4slWfEKq5IFe3lNvwlstz1f/PQL/uIv/jlXL/f3OsP9jYEGSDzg5Obwxfz52rY+fvgO8vOPZ052a5t5etaWKIGnAUIjXCmEtiAK4iCwVps9v0xBjL4LLR++ARXG0cmF6lQo9XqJJNwrpCheyZCnTE5GzYdLPVzw4QNrN+XicuDnf/Qzv8bScAw6k8wMdFGY9lek6x2o0oXAsycDz550zePfetQEb00rBiUnrGSGLhK0UpKQJ8D2buDk4uzEElDNXlKWC1kiJW6oVhEKU2MztBJRIFshhkBKBYKD20quoHvvnEehUyOEihGJ1UAnVCJpnHhdMkPXE71hQANzVgSFLoAIhgMV+r5HzYmDYhcWa69Yglyo1iBruaBW6DohxA7HcVS0U0J0AOM0JrDBa+frHsMxEQvJVM3EEBhi755/q6HsoxBDZJSRnK9RyfRdT917q+OpjtSasNo1YFMmqlJiRos4/iGAWGygvAkJlTpBzt7HWIODI82A4spbqJCKd9CMlWlfGPoBiX7dKResRkQzVkbMehJ71IQQNphkgnnJopCB5I2EzCgWScW4zpk+BoZO0KnQNeNRRLx/RIit8sYQK4TgdMe1VAKKWSClxJTae2hrY/7TVUb3l7sVUcvOL7GAh0YEjnL8s2nxBoviHHBw5gw5GtuPNKT+Y7ueh8hb8UK0aXU5BH762ZY//tkzvvv2Ba9f7Rg2HdTM9vKCzcWGIXT8/GdfMHTCZ08/4+r3X/Hbf/jmXud5QJpg1UBn/lv0OGo+D37ZauVr2KFgaU2IN5sOQaR50yBqS1VBNSNUCFG81WwIBPH2q6Fr5DebjVPSDhsHpuGekOXWa94Or/i5nKF/bs1YcQCYf3j64OZ8z3JVS6hv0we+eP6EoQuIFdQqatVLwrpAH8S94jwSROhDx3aIXGwjwyYStEeAlCbMIilVJr1uPPPF699FkOCMgBbq0npXRVy5VPP8c1XPU2MIkTJVas2ulEukysw/ELGUqFaYklLVFY5KxTpdFFEuRqrXhBioJmw3W3couwqhQ1FX9i06o9EVuCjOqtdAjLmMgAP4anUyqGogJsQwkEsmxkoXfF4F+lYGWjApHi2Rzr3YCMXASgEgxI5uaCV1QR0pH6CUiZoThroBKQWx7NEZNawWxCav4RejZmMzGGaBfY3N0FA2Q0/WiZKFoe8pNqG1gyqYJdCMkw0PLYWRPbox42Bad81UBEpCNKPVjbM+VnLOlKLk6hTUMXq0Q8yNQ6oDNFsHCXItTNlnYEK4Tomozr7ImLFSPB0kictN4NkTYxMNWkRI1FklUxb2E0zJblTOvAtZv2/3Pfrb+JG3vddwPlJ5bv+3ufqjUPm9znT/470LeRcGxdse4z6phB+b3Cd18uDUDNBhbGth//tXfLOfMBX21wXRyKYTOo2ECvvrHb/Z/5Zxd8W4z0wp8/r6fnPqQZgBwRfugz19+godkYP6J3YIvwkr/XrQwoiujmg0YBYQ3YPug6FRQUFD556zBggBCT3aBbq+W/YJXaSKzdVmJyO6O5QnJsyAoxPmf9wAMo8e2NxfrzXCmSbGF18zPLlgGCLbIS4QRJFMFKXXQBx6NiEwhIhqIVaj7KbGo+wgrv20x6oCBVSYUmYcM6Vaa6SjmG5g2FLzRCqJWgJXk3c+nNLU7B+hVCVnwUrBuCJooOtCK6uL1GyI7CEMpBqI6p3iqwXUplai11NoKZvYU0yppUI0r92Pkdg7uCVGa2Q2gorTHJec2ZcEUhArRBX6fmCz6cnJSwCpiVA7us2GYEJKlVq9zbBi5JpBhGQjeRzRMFFsYDNcIijdEHA640LOxYGjNZPzjpJ9/5KMWr1HgdgOrENsQOoe1A2UTi9RuaYgDBrI9corNcTIUghRseJGlkQIWihWsepsin3wahJVRbQ6J4I4DwEaQEZqHhuoMhLDxEaFPcKYx5ZGgTQlxwkImOyQalhLEOUijmOpSjHYj6W17nbcRs5GnpxV0Kqx7eFf/qLnTz6PSDPSqJWsiQljN8J1cvYJ5/16t4v22yjau5av27z3c+/1wd//MLIO/T9ovwfkkd9G3koJPYb331re1bMzPA3/2aD8bKNoSaSrkclcR+3HRE6Vl1ffoDgpn2hzfAqg3qflPvJAAOGh/niZJ0tb4RPPu6HnWxU47Q9EHBgI3ozHxEPzzfkhaAu3qjT9WD1nTDjgDkJH1/duHIgSQkfXb/HlLNH10kLVBnbweNZ2yH0WqNN8IjR6n9lS9oAHFXj1cuK39hX5sy2ff3ZB9/SCGNXxEdXvRwjCph/QmhDxPPh+zEgXUHXefRPP8ea8p5Q9qURSzkyj34OgHaWr7PfFw8UFpn0AU5I5GGx+PrUauVRsVqqho5CQKnRhoCTPKVciIWQ6lCFcIAxOACWGRDAtiHT0wwW1Gl0/0EdzxRh7uqGn67x1sqFocISrFTDJ3mjHjD4GYtjQhYD00e8PnhsXC2zDM7TfUMue0CnTOHmFQgEpHblksocMkLBh0z1ZQuAheGMqMs7v35Sw0CFUN+JsQg1i3HiX3xoQqyAbD8WrIMVbE5sEpDgng0qlFlfwMNETUe3RzkAypQRKjgSpRBmZKxysFqSvjFOC4rk9EWcchA5sotOELQyKMOUdIgEw72opEaR6gWA2rqfCbm+MOZMLTNkjBCmbtyAu3oY4ZzdVa4VNNJ5uhZ9/tqEjO29D6Mhm7HPi5d7ckJzN10ZH/C6Ws4eG3U/3Pf371KBfb/emc93eAO3dyUN7Btz3OB/Tq77PNT0UGPdjlXd/vcYmwrML2F4YMTqe6CIo+6m0dzZQrDqGzgwrBhPYVBGrdPc07h9kDCxgeluFw5rGPEWZHkoI15EEwKp7H8qCtp+rmwUHDog6u5uIYCpUDKnmeVkTZ5SrE8GUaRqxmohhw3YTKfs9tPB8zceLxn1fT1ktF+trMGav6XBzFScUuk6V/N3E6+vMi1c7Pnv6iieXkc1Fz8Xmgm7YELRSc6IKpH1y9jyrdCL0fSaGyhAvyARGlFQrySpjrZQgdDFQSqZUpZiSrZByJYtSi5Bz8FryII1a1pDQwEnqXAFBgyPObUeIwb3kridIRunROoAqxp4QnOFRgxDCgJnSD71TBPQQY0fXDwz9QN83j7OBQ6xUgnaIKrH30HfQQN9fEhsxRkojUbw1cYgdoevREMAi1fZ0w4DpyLTLDfZxQZXsvAlhIIaO3pP5hNg5TXEUSq30XSAXg64n6ITWQJCePCb3rG2L2Y6uEzIJLa4ItdthNVFKh8UdMboBWzE6bbgL7dGhx/JrSs4EUYoaIgHLGW1ES6rCPlekVmcFU8GkNgIgQ3sn1DITTLxywXAwZ86uyEudSMUYk7GfjOsRrkdIFXK1VoLorUqrgZlioj63xOemlcIuQS4ZuozhDZWKwdWu8s1r2Lf5onYg3PnYMhvj659rA339Oauft439Q0QIDvXgb8fu9ykqz7tK4G5t5PQJGDE/VFl6zZihYjy7CDy5VG+glsRLyy2gu+IKv0LAf1rxuigwLLtzXe/JpntvY8AaG1Bd5f5nIMyiKkV8kZs/dCvBt2lUsDgR2pJ3D41+1eZyMgVr0YBcHGEQg6K1YhkmSXRdJCUjSvADVuPVy29AnhJDYLvpGQYhTat2xfN1LGChheaoXcPxpPWqCP/6tP74xvRvIZupQtkZ12Piq+8SlxvhyUXg+dM9X3y2oT7bsuk7RKPX12Ns+60DuXDCHUe1OwNe320wItSJJBNWR1QDaZoWRZErHuIGqsWWl/F7HaIgWrHgy2ZsNfHUmROhQpQWjhaEQpRKFgMNxD4CHSG41xwj9L0SoveF2HQDXd/TBVd0sY/tATZe7Aai63SDzFqmVkQhjcnJe0LELKKt/DFPToxjRak1U7JblbFXqEqlUEvGrKNmT2loFEq+8gkderLtURFiqJ4eabl3tGBRPORuCmSKGZ1OaN8h5iRWYhtgQ2HyFFZVSjYqiRic9SHnRM0jASMEf4JWDKMjSkXIFAvU6s8kBEXwNE6wjmLGVKoTKlXj1a6wm4xUnABrP0JKkKoxVRizMRWnDc5VDnQYyJK6m5XlIWzn966xZTjOpIueOlOh5sLVrvJiZ2SbK3rqkbJdXuV7yqnHfi6X/9BtTo+73m793X1TB+9bPgTD34fY/xwfwbpt8fcxev6Q5bZ7d+4+Dio8UyVcZerkTrY7xJWuCl31qGKZo/MtPbz8inG/wsIHRwZmIpzmL5+ZA3MHQQd+tdSAr0jtJjR91fLp1eoCHkRA1UsK3dupvvAZdMFDpR49MBQhq9fNV4yaE7urKy4vLnn65IKffPmE/fULcm172aEYch52Cyw3AtY5ndEs3hYqXfdOOmsILPfDr7Hg9frjCFcTfHdd+Pbla7757oovnw88f7phux0IQRj6So2RQmSXKlMSqA6yCxo9F65gUYjVCZRSMUrsSLtrb/NLh+qervM0QJ6cNrdTj290XbdQ/0ZVLBsSApDR0FNqbYba4GBBCqLOoBh0C6KIZIbQEzslqhP8Bg3EPqDiILycCxqh76Oj7kuFOlJKJU+BEJRq4spWO2hYD5Uelezh9qqknBBVL5fMRpo6SilIyBgJzU6kUyy7QiOjxZkp/TmZGxV1Ry07SvY0iNJD8gqMIBHtnUa55kAIF4SYoVY6eULKI8a33vnPAtOU0QqdCFGM0a7IpbTKk0DLAyGSiJ2nKVLt2E2p9WNwlsVcMtO+kpIwFmOajJTdw98lV/ZTC/eX0rx/oMia0ktWmk3be7b6/DRpbXjEQjzEGFWprf1zKpWXV8arfaXgC0c9meRvo0pOFXBt75au1Pch+rBKL660+9zUa/b+T8exdnbW/CGnqKW6eq/laJ/z8rH82B9i3f1dRsAP7Vo+htxuCHhqOwp8FoSLXaUrLSxvzkkT8O/nCqCMG2jWwOQi4ulajHJP4pAHkw4dWYu0vLlbCP5Z9baxy6tsqxevfSzNMDBp6ygOzBMV5wcI0hxMceIaFUzcSMCAYhQRgrhCElU0dIh05Gx0G+WXv/wZRuB3X71inDwmKo2kYQY+KuYI7OjlcaVU0tSuq1U01GrObVAPS8p9Wpjiw2SfYMzwel/55uWOi2HP5VZ4ehl5/rTj2bPMxSY3dt5IreI9BGIlkPE4f/G8t7bFfyz0ArHvQDqqZe/oVyZC8P4NMUbPRZfglQQKORWERFBn6IshoOrbqwRUnGhJI4TQ08ceU4jq3qKqd7nvNaBBmKbkEYFoxKBYjeR9oGsgNQ0XWJ5QCrUIoY8QoVgilNYfry9OU1wSKbW6/twRFIpMjZ5YGdMVlUzQDWkqRAay7RCZ0OiofM/Fj+SUMXvtJFWl98oOFajJxyGQ0x6psI0wDAUrhVyFKq6mxFpTH7zDYIwe6i+ptg6Qze62SpoS45R9rhQYp8wu1ebpG7UqOXnOP02u7Kem7KuxNAoq1QtLPR3nKTGkdSo8XVtbba6gi+F6m7RXBrUOwYGWRmSfjNc7YcxNGTGHGw6U2fdZ0m+gxk+/X8axLuFb04GvBnpmv7PnPNl8nT54kxjraJ+sxvfm8/7Y5VwTnfnnaej/MSLwnqS59ZdB+FyEbSneiljD4lQX8+Z0QdR5bqJgQclWvUIreJSPIoR33bXwtPOSiyv09VtZDcheDeCWuVv5HjFoGzrvri96rbmOzjkS895z1gB3otVD2nhYRdRR6m75RK/l7zpUo1cYxIgR+ez5M7ZPnvPzV1e8enVFmRKWq5cqBvdUNQjb7QXPPn9GHyN/93e/5q//6h+opmjjgM4GeQ69nLov97lnbikxVmEajdeTEV8b2xcTTy8yz55MPH+y42K7YbPZELqOvlPInmMv2alss1WyFV/Qy55BExqMaoVUeopA0Vfk6p3/0pQYug6sMnQbSm70uXqBSkWI5JxAEl6Kmah4DpvGQlg1EeMTRJ2XoNZKFwqBzkv6tND3XVvYe4L0UCu1el+JThRVIVuCmiipoNpRVd0gwQ2RIBdY6IhFsZIwbX5qTqRyzZQVq0ouRpYroCfbq8Y82FOztXx7xryeFBEh4POjlkpK/l3QQik+RmTHoJHYQva5VO8hIOpebPE0REAZp+RVEbvEbsrsc6aUQk2V19eZXcIBfNmYcnUv33GDVGa0v3mjpqboZ7O00kC4rfFSFRyoK8aMqFk84SPLevXy3fm+2ypqtUPEoxH7SXm5p/FQzAr14c1j7jIE/LObhsWNtNwtx36TQXC8zc2tZ4ry+xz7D9kIuE3WEYtHb//dyV3pgQpcYPxUI09qJsw8MbW2daGBrjGSVSR2aHNitLWHtzQ7CILW9xAZuDEZ2lo0gxWl/SPzBcrhMxVvOlStrioRloBCA0uAdyKEWrw0UDtvbBNUMDUCTlxTzdMH1YqX4VkBq85BoBGs8GToeH75R/BLJ6fpQkeIwrDp6bqAitL3lzx5+pQYI/3wP/I3f/2P3jZXDa265F7cDpvxB3fnedbijtah10AxpSCkfeV6Knz7qrLtM0+2I88urnj6tOfpZxfkrnPim2KMdXJufhG2qkTJqGQCkRgyVgt9DICDDdFAtY6h27QSuwnR3CIu5p7zjBg3J72pUoh0WO4wqRCKGwi2Q3GLdIi9p3SsEGRLFyJiocEQjFq9Xj/olmqQgnNRxBoo0sobc8XUmMQQOi+V050j/VOhlgkTxaw6ZiRvqMnLBENURCIUcVxA6J31r86UxUpOlaBCjANWQuutMIK6WnfDxsGPVrbEWkAKY50Ys5HLHiuRnDPjtMMq5LHyejcyJeN6J1yPlTFXpuINgfbZGM2jSLVCaYA+0YPvO1Ndn7rBi7c1m8o296JoIbeWglp3EfT3rW21Ahudf2nbD/V7KlIoouRSeb0vvNi1FtHMxsD9F/z1GR+izN+FEj52SW6PYhyAzKf73z/y8VC5LcJwzqNef/ap1uTfBga8lUf/zHePcoesbHrD6IGfxMDnFAZzd9q8BA9F6FzBks2pyHa5NtRZIy1e6VVRo9yvg/HDMQMwP+i6XMiaRvO09angL2Rt3tq6DNG718zH9lbs8wVJwxHYHD61ilrAQmntXRVsohRlMqCAmfd9LzlxfZ0ZYs+Ti0u2l47mf3a55XI7sLm4JHberEelI8RItUop4wKUNBNKnjEOrdHQUabyWG6EGk9flLbFjFooBrUKUxWuErzYZb5+Wbh8MfH85cizi46LbST2Ee9br/QdEDO5bjBTanGvV6gEIlsCl1slZSNbIddrzDpyrpQ6k/kUKl7TaaKYZMQSQxQQb3uLzNsNBMtYroTeLcxKRqIz59WiSDS62GjvUWIYQGqjZR4Q8SZGYp5CciIeI49GMiNMAe1e+XZTAjNUveGOK62I2RW1CF0csDIhllAZPB+WRiAiGBphiJWokSCBWneYZHId6bsO1UTaew8Bsz0l7dmNiSlNvHh9zdU+MU6VnIxxquxGIxUn5NmPxj7hBkCFZO7hV5MV5G6V4zY7hMXmfN7KEGhR/vbSSgv4+zsxb2ZyOqtWE61l3Q/Mn7dI8w5EvCon1w25wNW45+urzOvJmxrPcYb7inG70vuQcpcpPt+Z03G+//Ge7016zmD7sYbaf4gYiI8hc7p87Uh/2cEfaaFPTfdJxaos3DYKhDarVQSClwMnAwtKCIYOQuyVNTrnTfKgaoLzjEowqzptVriI5/hl+f7Y8j0AEOUQWRCwKpR2XGk17tWkpc3dUwrRMQVGpRRDk0Cd2InQD5FxGimtrn/oI7mMmFaedT0pTdQhksdr8iSYBJDqNLtp4rtvvmvUyJ63rdiCiZjX3Lte3dPvjjpOHd+w5dc5ijkVYapwNVVeXU1s+8TlhXBxEbjYDjy7CM4oUdxYytZTS6WaEFRQmxg6UClIyU7MJILQkyZBzfPdsRNnyotQCFjdYOY8BQ6GM2rNkD0io+Z4jGxgNTsjXlGKCfDaqz7GnloiEjK1ejheMKKOdF1PrQkVR7Fr9CgCQbCxMpVXaCtDdPEWxCDksqNOHVJ3XA4DMQSyCmkCqkciuuBGXTHvT6E4f4OTD1XG/TW5XJF3mZQmrq8zu93EOCZ2V5ndVNmN8Gpf2Gdjn4yUWr2+NS8fSB54WhR0bU98VqM6h/ZtnZFuNb9y/LxPtZOcUxwnfx/MgvUBTlXdLS+9eNTiepeZ9k49/NXrxK+/qeym2SB/YGrgQVu/H1mQ7dw9+vn72+7p+5E3jerTlreizf1IlQU/1DLGAzfP3OsHnqjyHJBUsIbmrdVLl1FIOKA4KlgA6YXQCTX4GlSB0ilE/15V7/2yPhgzAPPNP3691uG4uiyGHBa8G5rySC/6gtUaFEkrjciTkRUk+0KsYm4AqFBU0OBeqYqHUXKFVAqCe/vddkM/XBLE+fXzVLnaJVJKXrUQwuKH7XZXXL946UOrnq5g8ZmMN8/xc9DCO7a+ETnwIxSEq+R14S/3yua1cbkpXD8NfPa04+mmZ9jsETbklfGVLRNMsQTFhGIRlQG1Sh+9KY+GjhCMSu9WJJGiiRiD58gtITp4bkp7qBnT10h8RraIWCKwpaQO6RRLxl72xFCco8DAcGxCF3qkq9RUCdpTyQwb50ZAlCoB7QJikWAgxZjEfeRaHNgpMtCFQq+DczTUHZbcLu5CJReQWik2kcvEdJ2pZce4v2a/z7y+2jM1Ss79vrBPhd3OuJ68Zj8lKFUYs7E3Y2osf2VWsiKNWXB5aOsZ35z+6k01TsK8y6TmkDYTlsMeHjpyVLGybH8ys5a0whmVdheNsFfswItXmV/9Y0Yxxmx8vav8/iVLiuChMic/HppaeJdySLG8OU2xNplOK4fevQhvCti8ST5WyP2u7oN3bXOf7x7lWOb76k60sRX4Iii9ZbIZFlr5oRpFgCh0g1JChU6cZTeCeMc3CKAtmq1Bna/HnNb8PvIWLYzhsFwdfpo4WOqs+yxHPxZr/oaGrfMPW9j9pC1WKnMuxI0ACTMQ0JnvRIUgBqUSe+HyySWb7UDfD0Rvx0eQiTFDLkqvhmjCqtdep1JJtbYFbvaWlEM2de2NrQOOp5nTu1eAOSx88/M5/+veZzal1MK0g90kvN7Dd68Kn19MPHsibPpC6CuxU2fEQ0gpM02VXJXQKdEqnRqbjbUuewnRQDEl2IapFvZlJNB7aGkushSlSqaWAKknSCWUSnfpgM4YC7W+IAQh10LsiofssxBDoAsQYiFIIOeERtAopFyoVeiCYCWj3UQ0JRQllyvUAiEI2nVYmpynIL5CSiCNlbHu2e2chCfnHdP1SEqJNGV2Y+Z6lxjHzH4sXO8r+9Fz+mNpNfrVQ/zF/PdaG/UB3lnQn3Jdavi1AVpleXBLNn/14Oa5cWhdPT9Pny22ON5LHm+OMrWpZNC6Xx7PsLWyXc5yw+E8DjOci+CZwXc7T6+FAvsCr5KxK3Ok4eHyKaQIHnL+hxjq31/mtOI7OtofQC7+h+rdv0luGFLrGdu8g17h6RB4FownxUvtbQMM6tFxMSToSvkLk1kjL2tl5+ZOVNBAmNvsRk8Z13Q/1qEHpAkO1oWD5O1IsR0ijccPc8EJsAqezqkBVla9eZjVc5sgasS2/koLi4h4ZjUI9AFCB10IBAmgDhzrgrcFHjpjMyh99P72KXTOxFSuMVVEOqQ4l7xVp3VMJYE1q57KjPeS5rEeIiLz/zcX3vXDvn1il7b/SfeDxSDwe9tApFgV0t492hdXlSevhMuhcrkVnjwJ5K14gyRRSggU9RITUW+xGzV4DX305kZTThTLdAqdCrS2vyqChM5LIqdM7C7oQibKni5soSoxJIIHowh0hKpoMWLwhxzUKZdrnSipQM0kuyZY33oQtOZTqnQVvJ1upmQQMuO4I4pAEXZpz7i/Jo17rq6uefn6iusxM07Gbl+4viqMY2XMjaEv2xLez3N3TMDb9x5SNjOl9OxrMzcx4riJFrDK29Nm8GGL2aM/VJkc5sNCLMI6dNos/WpH26qdKPTlbPMY7IwRsJ43XpUxH/90TlWMqwLpdSGI5xFTZQ7FnT/oLXJM6ftpL9wHE96O7qm/de9QWR/dkw8TKfmQyvNDlhK+Ddbgh2dAHN73IcAvngZ+ujGnPRcwxSuOOq8sUplp22eQvUci52atZl5NVQ130EpllwQLlXEUdlf3G9XDGAhp9Lvtt7sAdev92jLJstidsPicPkqvGRevvY+K4qx1qhBVCQH6TtFW3x5kIPYdnTrJTt8H+q6nUy8t08478gUiQ9zSRSEAqHMUtD7KpPGg8OfSTA/DtwZFxoJ9cGVdW9Oimb3tvnnCRhh5ZnNZPFBvCSyLgijkqpTqFLcvrioXvXD5qvLkonB5Edg0wGEMBRWj0lGLMuaK1cxUnGBIgxAlM4SOPg6UUuliJFdhzAURo9cLgsI2BvqwJ4TKtlM0FLJNKE+REj1PVTK7q0yvPf3WefxLVfb7PSEEJ3yKfh+RjJVMzpl92pOmPVfXe8/zZ2N//RpLlavridfjxNUuM42Fq+vM1VTZF+dtyDMdr8ni5c/GmzUEvuEEHYei1npjQXMq4PNe4yH+MyP9bcF4iLRugmee+aLkVY+wMmvmsWNPfhkNc73/4rHLqbo+jVStP+f8ePBW38PGozZTduPytCz4IfJDWX4Xo271933f0oed5/5HPK3Zf4iSfZcK+TbegLuqBOa/37UCXt+HNx3/U4si3DWeG5+t04EmDKpciNJZQXohD+LdWFP1tvICm623Sa/mqkqqESJYqQhOLf56J4ypVdhVZTdWZys1yPUdpwkOL9Ucv5+j/HNp03zxJzvOoc+jz5tLtVqN1rWsIvNy6MhJDZ6WjY0foAuKSKWWSKkZ0z2iQtd3iEGnPYHBQWsErLqBELXRHwtIcGBFqdnL2PK4jH3OdswRC8NxCiKNhIjqhEjztlJb9ONQoLVc/o0Q37H3d35l8u2OP/ZGTZ4DFkYRprHyeip8d1W42BQut5ntNrDZCBfbAEXJ1Y2vaoKYErvApXpR30b71r0wIQZRtmQBq4XYBTRmhgBBJoJFR7Mmw2oldpkoPSWPjRcCuqCUUti/viabMpVEwIgG0+tItkxJI9OU2O337K9GrneJq11imoxxcirnaS7Xm5yKt5oT9KRilNpY8uYoCtYwHhzmIGvSaQebLlGsI1S/HN37+enMZbm67D8/IwHx2oGZ7dCf6/ECZmaLIXA6t5FD3cC5OXI0C5bUxcpiPkdAtL6COQp3ss3QwefPnDHy1VXlenTvQx9oDXwsfMDbyGHNOr7j79q//T68DN9Xub9v1P6HAgSeMzjOyadoADzIoFscYC81qrWyS5VejKETYkMBTnh6YOgD3ojNO7mWAmkS0l55vauUxmXyat9a2MvsurRZKb5m3UfuHxk4up4Wn7hxlazATL6MHV5CW/3LslTb2qhoIVVrJYaYIcUX/6h+48Dr3EVbmVrw5Uyrt4ut1XvGdxap9Gj0bnKBzhdoKYBTyJoX2SHSUgRSCDjaXVYepZk2JPih0kCMWSMc8roPANqsbtmDROYF3KBaYBJjrMZVqbwcK8PryuVGebYtbOPIdhuInUdIaJ39vJ99x1iMWhRUKHkkdB05CRqVXvbAK8gdJgPI7E0KRodIodYRk2uoiZwCeRxbt8XElBMpFSgwpeIAvp2Rxsr1WLhKiTEZU4Kxddqr1UjVw9rVjLl6dn6B5hpa8ND6bKwd5eJncfd9SQmE5UtZlPf6ns5kHusnaKfPT+CQUjjnnc/jkDNGIN6fYZ5HcKgmkTni1EoAF0PjJjveqa97iDZYu0dywxBQgScXgZ993lNKZZpaTnFtFL1zFfnx5cjbXH/+cYZzJOeLD+8fKfgx9AR4V+P/kJiK+3Iu3CX+3CNgiMLQA+oevhZnJy2lNTArSs6VV5PweifsJk+JTjlT5rLm2Rtq0XNpAD7nJnCq9PvI2wEIl1C2X9rRM13qpw6h2lsHs1hJB39ZGnXhnDPPgGRDw+zteFOeqE7FGMUbwdSayGaoFAJKrhO1bhAak2HwXu9BAmaVfarewjeopwtMiOoITKnNQpFDTrVWI8b1QrwiVm0ESGs76HRqvItJOgdSyuGTdi6lFmNfjGmCq33mxRVc9HC5NS42xsWgbAdFZSDlSlXnAih1JnUKRDLRm917B0A2ZKmIFhJCycWfj4DtvStWyplpmhinQi7GlCppqoxTJhdjzDCl2rz9mVvBSNnrOAqH2TGH7Odc+3wXpTpwRAluALYJflv9huFW9RyAcsV7bg7aEgZaIgtzsOCuMKWwAogdpwF81xaT0BlX0J5Tm9cqM6vgDKSlRTr8iDq/NcbqXZqVWoUGHDoO79bVuW++cZ0E+gL7nBgnaxiK5c7feq0/ZDE7VFnfeB8/wniOz7+OXz1gv9W8/L5rykMV2vtUtDer1T6dKMC7lqp1SS3HCENsqdYU+e47IRXh5XViKk6Jr+I0/amwkA8hzakwnFBMWp4UbVwxFalKkbki7s3yPXoTyO1ryCoGPntXcrrxHNJdj3O5ztlKEK9NV2WmFVVRRDOqcel4iIT2eSD0A9uLZwz94N0OsWYtORUu4iQ+XRwQjUgtVBO2m4HnX3xG9/evyGOlNtDGmiCpNgNsBvjNpEgHpN9d9+v+cpelu4CVjrzgQ6lUNchVmCZhn43XY2UTjcteudwGLrcjl1shRBCrWOsWuA2xtY8u1GykNLZ+DM62NyZbIiJTKqQqpGSkVEnJt8nVWyfn4mQZ2Zycx42lOfEzV50sKu/ImLSjX5rC8gJ/3Ks/hvjd5iHNYXxZlOZp1Ibj887z0fdajn/z4bA8amvGhJ5s6uU8/vsMDpwNuTWnwGHsqwjAvBDeGPGJQbO8V/O/Dgy0hQJJjr5N+8zLrwq7qfD6uh74kH7E0h7VR1P8b1L29wMb86Bt3rW8r3Peh4Dp7Hv9ge/B2VTf9z7oSqG3c2RTfnddeD1lqgkFN/hVfK0eNtCpefo2+Cqi3mOekgvTuKcUYZwMK+XAg2JuENxH3opnYPmMkwVLVhe6XPixm3xYntqezfs5Vry26AGksSzhYVuTQh9i60/gJYYxBobhgovLJ1xebnn25CkqnsMOoWV+K4QYUK/VQEzpu4DVQiqVIWz45Z/+KV999Zp/+NtvSNXcA1vc/GMwWIjCZhPItbK/LkfXfQ6A864m8Tnld9Pr8U9KFXbF2E9wNVbidWHbCU8uhM1GGQLegCcEqo5oUu+DkJwxMmcjtbByKUouUAveI8DE8/dmS8nerIZsDulzsJPWRXnKzMV/IIwxWJi4DrS9czOb1WRY7sPqats9WUpWV1seDIFz6lVWRsE9QrNrR7oB/c617pQ2Po8EtPMeWTltdEfWyM30xcE4ZnlPlnsjq6taBeNs/V5xSOilqfI6G69akySHp/44IwKznL4Xdsd395H1DDyejbed//5nObdGnAINPwW5zVH5GNUNt/39qcht6/6cUpamL8XEO90mX0fdSPDIYuwgBG/h3nVC7AJx6FBRQsv0pWgE7UgJKom0rwveSKzeexa+Jc9Au9gjJd98pdUifWoYyBLWnl+kQ/nX/KlPqvlG+Srn6W6jEyOaYwfcoQp0Whh6peuUqEKgUvKObEbVQDcMFCtI33tPe6t0MQBGzYCFpZHSl19+wX/+X/xbXu//Hb/7p+/csp8XfQSzipnvW7Ix7lug197sYz3EIHjw5L6h52blo943QLwd9Jg8VP8qQf9aueyViwGiZsQKpXXYmxn4plpbLr81v2gd9Wal5JONVQnfiWU/K7NqR1PhOF9/w2fnYIA1o2KeM8IhP95eJLOTe3vGIFjuyfovO9xnkbnY8zigtj7ues76BwdK4eV8Kx1fa8vfrdD/Htyaw8MH1M1s6hwBy+Z/5MRIltMYm0dLvCLCrSmzY0VkBsmEEdhVI//oYwK3y/cyCOQALn0f6ue42qSd5xNVdO9DbrvWj20Qve15byNwOoAO/e8ZIJ2rMAP+5ojAZiMMm0g3dAxDz3YzsLnYUqsxvnxN6BTtnjCNEzkVrq6vecGeNBq1gFW7t5J/KzpiEXHFCqvVfEULKvPftvztIK35JjUlgv9UAAXDuwr6LofCLVcIlWI4CVE1t3jMGdQmRqwYViemqaPfDfRdx7C5RKPQd8PSf0ClQJhD/p5qELxcTqRyeRG4fDL4wn7S+nHmWhARahVybqNruZrbJs37KMNZr2qnHsvK1DoYX02JlCqU5B319pPx+jrTRyhVPSdl5hUTdiDiqe3ZG9XD7oCYrazbW8a5/nw2HuTgyR9tu5of86Q6KbJb7bNuM2PLd+40H9D/ttyn9Tltyd0fSIVbyawcODHc87fFez81a5d7vDZyVr/P0Q9rfNaO1xNvpYwjX6xFV+C4wmHpTtDejWNaay9zPVz3HBK4m/WuGFwXGFta4sduDhzuEMtvssRl26dtXeLICJtXrXZ/RY4jQrwfQ2A5/z1BdQ9ZU24rY7ytpPBtlO/bcgPcFyz5seRdnvs2QrApVXbZp1dQkCDOKYAhVbFc0ViwcWTMCdvvECqUyjTBVI08VnItTKlQiy3r211rwqk8oLTwZp+vuSnRrNRh5Q3eGMXZV/PwubWVb1mlHMhX8Rz07Bnm6rXjAcWqo+GnfaaWSs4Tw7BFqlcxbC8haGXTb52asYDlgsTWwhh1tkNR1ISr13v+9q//lm9+8x3YbIycUhH7Ytr3wrPPLyil8vLFNSXVZWH+UJN30XUr7/TsSmUrpQFLf6hUneZyX5y2cj7A/KwXb3VVh3+gzr3nDDs3nLNpjpMVt/26vpXH4fw5UnOycJ3e+yXCwNENs9WxD/bsyqQyljKdo2hB28Hm8tpFQ6zeDlvQEG7c2nzsZgA0FLDZqmOgeEXDmsp7Zcvc73af2abZ5aQqFDMyx8bjj1naHV9+n+/FwbhsH8x/MxuT/pnCuSzQ+4kK8HAw4b2OuzZ07pGTv2v79xGx+LFHPk5LEE+lVkgE9qU0Lpamdcz78oy7wn6f2V9PqDip0PyT6gihVColW2uT7n1jpAGNvRLuHWMG2urIYjEf2c8316GzE639XKIHRyhsb6ZzAEseGMRrhaxgRZr6BmmsetUygiPiuyoELWwHZ8ITy4BbTMGEUrxvvUmg6yPgbHiokqvxu99/zV//h3/g1cuEHXEGrK+leXalMo75RInczxA4tdTf6oVYGQLzEBb76/TWz6sgcvi76ahqp9z2dmDUW1sAa49aaJ7o/PxWivUN3sXDPILzf8/Ri2NDwQ5jvuV2zgRYp3EUW1mzvru0lES7R4dND5NXjq9B7Ph9mKe3LZU9LTUw2wZ2KPoxDlGLk+X6cDQ5PMbDG7i+lvOex7x/rh5RKM0TPp7RH39BXk/bldl66zZ3bTd/d26GzZ7/wQA9mZ9H+8qN49hqr3d51z6JZ/COvfSPHd7/FORN154MXk8wptaHVFsKHCGId6U1gXEGr1sj4OscXBg7QYYO6aqX3hXQCktxgVVv3HcPeSDPwOrlMTtZJFeK5tz+dkwDeliY5+Pb0ttARKlWMVFHY5tRsyzH9pBu62Ewr7rF89KalFI3XjIojuzej4m+jyxhkwrjLhEDDQruHPpfffM1r68z1ngWjflhruuVfTXO2Xj53f6gPNAbyuve93bOI80Lvdz87uZOx3/Kmc9u7jBjyN3bv224BxyHi64XiZN00dnBcDB0bgMcrUl5Doc+/fumQTB/fvheDp+tV+ij0PrNsd0Yc9v/qPvAYiS4ZTCD924A/dq2SwBAWKUbjg0gH3ODV4qfyysevKRTzTgiOjrRkp4iMhZzuRknp/PkRugX9yLm86odPv90ZJVqPDM7jbuNmCN77ezR1+vY+ozzOzx/Jjdu/VrehWo7BzJ+H/IQPoLbnJN3MbY3GQYP5mj5kUg2Y1cM7YWNSsMZOYqwFMgzw+qsaaoD47cXHZ99/pTPv/iSz37yE0wqKRvjNLLb7Ziurrl69ZLXL15zfZXvNZYHAQhdOR7/zfLZLSv3en/hKC/sSrccvaQye+vmN0VgwefNOT4RQdS9f1cAQggBVSEXGPeZvkv0Q0/dOHgu54KqEWJE4kBJyc9nASlCKcI0Wat7V0yyn7hd1vKicBg/tloYHjB3bwu3nV147ooezCvWsVV1/P25P96waq4PqUuz7dW128Erv22huM/LvcYPzNd40yDwi5x39+qE9bHmObC69LXSvjEeWZZ/V/5y9L0r4TliINAwEnOTrCVdfzJWN6BmZSbUtu2scud5OxsEq9NwdG9hYbecx3NqNK/LFOHY4LJm6M37HeJrB84MWe7Bp2QIrG258xPz2Bu/RamcfHt6zGPS5jNGIedfq9Pvv6+cGskPiZi9zXnuc/zK3e/1u/D073t9H5JI6GOKKWwuIz/94hLF2I+ZV1d7dlNhmjK5zI7BoQLBK9gy+zhShhG92NN3kQGhY153AvuqWOv4ex/5HtUEh8XlUM50R8h79mBkLsk6jg7MXrjBctGqjZ1fDI2OroyLESBOEBRavEGdsakKTCmx24/YKxAJXG4jdBGJnmTItTrHsdTWfdaAgErrGbBkGg9McR9Ebrt1Jx6fiByvdreNb1nRTj34Y+15auTdPM783Fi82WWJXSnFN+UUz6VH1obAvM36fsuN8csyWNfLh2s6tX3m2XVarrdaZg78XO2ezgRBawSFV5UcPP9FyZ8+l9MHKIf8s6zuiYfv1mM6vgeLMlpHG5brPVzHQeyw0SrqcFB4h80+1YX1LiPgsM2bx366xc19js902zFvs6+/r5w3eN8vpfB8jtNxVKsn1y83trlLeb+NYn+UYzGEKVW++fo105jZJZjqzCx4qBDydamtVxVSLuyvr/jqt1dE/ZW/9229qS2sWKpRzLgfYuAtjIHDxD1VUIeF5lYr9EhxtaW8La46KyjhWOEJrvTFKxhKy4U0ZteFzc2Lw5QghsTojMMYKRX2YSL0HV3f4eRBCVW/dNV280qh7yIxeHimSlMEJ+mMu+7L+trfl9z7+Hdtdmob3Pi6PcejePzBtlh7TW985vOpVviQ26x+p5tuHv3RYVbHXnkuN0tbj5f405GcNVDa4c0MtdOjrI2NwzfHIFmWwZ6e78josNX9XAyp4/u1/L6OONGMo1Vq4+Y9Pu/Lisxg0BOW0GWvT9MweFtZT+vbPPvTyMGHlocoxvuG+G9be96kyN/0/N+HEr9vBOSua/lUDdqHi1GL8Oq68Np8GakrR2dJrNvBsZlXNkEophQzRlvXRRkyJztNGgX6/Z7jwwCEy0p8IPk85C1hyRS0cx89NLNDvML8n+VrldZa9VDGNi9wtSGwbUWf6tGCSi5GhxCDL3YaGlthUBAlxJ44RELXAYFShC52aDQ0CFYF0Qi1EvuO58+eEoIzuf2gFsr1/W9/38wF28nP269vAYfOYZG1Zn2L1fQuA+G0ZHX+/DB1ZgU4n3e17co4OB3QErNanfo2D2wO79uy14olcPWzTdsjRX20xQp6fsM8OFkE5yjYnGI7XiT9uMdlbjNF8S1qblEccynccUrLTmKFP6DZfW85ipyc/A7ceKb3lU/JcDiHC7nvvnfKyXtxF/7koWN5m3F9iGjJxxdpBgCuwBdHeM24emoQrl2MxtMzOzPM5dUsk1bq/efB/QGEcrIItg9mQh6d10I7bL9sbQ6JOnQuPtg5xozWrkcL/KyHDPeoVKHMiEqnql+YCQVBtfOOhjESNLaOhY3yVo1SK0zFjYVGnhNjj1lEgmIloV1PiAqtFMNsTe16z/t0JhR++vl99p/ltpfyNq9hVhUHpP8psOye1zH/e6zjjsb0EHDS2XPYfAyOlNW5F+A4XdC2Y5kuK0V+Zv/5xTh9jrb6fMmzr4CcrI7tE/HGGA6lhevtz5xmjnacWBfLPWgbGnOjodmQOXy3errLgZaIy0LEdHO21rUhcOb7H4q8ez/105FPIeQ++6TznLzt/LfxFax/P7eOvYuxvw8D4X2DF2eukba6nNlCljXG79Ph/b71mLML0zr9ybJ6zRs8/F4/oJpgVv6zIXBwR+9aqOFwC2b602VRnD3AerBuoHn+i3fkis0MojRiFTWCCkEhSIsGmOMLtEKZMokRxKu2S6p0XWLoN4RgqPZoDL5IqiASCFKJ/UCM3s+gACJ67xXo9CV4H5btGy3xRdHY2W1m5TLrlEOR1W0XeTjfmrr2cNjT4993Ah405xo1ovMcu2PhkDZ55qiFGz40+/T8QrRc7xmDbL56WbiQ1yO0Q2/O+bYJCzHW/MWpMXJOTo2bdcbgYHDO3v9t+8nZOTAjjQ/jFHQJMBw/3x862dBdM8yDo3+Ycs5rf0gVwSyHzOAZR+OO490VNbjrszfJXVHFd7W+vitD5XaZnarz450/d06bw7t81/XdxHocg1AXPNYD3ogHYQbcYWknnRfNtcezPq/Zwhng3ttNxTSvU80R8uO1N3quGEAayUL0NsZ+od7fWZpCsFIpkkEUKwVEWgtcIwRnGYwxgBRSnrzBUPU+0aKZoB21Qqcw9HOJ4NslCo6BcMee/H33P80jP3TSz/d2tjLPRpkWD9GO9jkzImYldeoTnyqnu4yVw3X5RDGzQwcugNbIiBWR0I0x2MrwNGjoz6Uz3foiVnGCk+td3dPDND461XKL1kr5ZLNDr5E5xXBeFgPs5jcnv62f+e3P+2zo+NzxRZeI3I9JDk/3x6v2H0JAdJ/I433xPG3RaH7a7fPmQwAgTyOst23zQ0glyD3p6uH89d4ViTl7vnviuE7lAQyEzStrrrziddEHZb64+67A57/spm0yK6rjwdvKoBBQp/3RIITgzRoEKMUOk9WEqh7WValUE1SVEDpC7LzcMCiIYbVg1dvqSjJqdbCg1EKyPbUYVkY2fWjphxU17QPl+1qZ5wyCw316yDhOLbQbZzp4xqscj9e8s2jDg+9+au8dcv63VRWsIyVHR5kNyiWaIXAy4f3489iOrmzZbWaMlnaM9bo2n8eWRNodYsd/3OXhH7FSyuEe33zs56xkbtn27WU2pg/33YDqFN/GA1TLpy/nnsybZvkPTb7P0zqnIO+rNA9+5O1pyA8h9znvuzQC3mQkfWiD411EVd7m+d3bGFA/w2rClGPFsFYs9RA0nW2Ew5p/2Et1ZTFJY7RrRoB7e/Nrbs1z9EldrWLFxxRQaistaMESvFFhxWqmlomSjWRAbRGCvmKdUUSIsadY8Zx1rfR9pAvOV3DOH75NTi23d20QPFRu3XV+SL7VSRjp4IEfPplD5bKiYrpp4JnZEZHQ3WM/TIrjF+4wuMPn565nNlJkeeYHQ6Ztu7rGc5r95gK5MjBWV7++0HOXZHdY/efSKfd9rg+x6t2WduNK1dhEuNx2TKXy6rqS7MdhDhye9U2x1TaP8jCRU4NZTj6/Y793IXd5veeiEPfd9py87Zjv45nfZXS9yfO/Lap63/G+C/1z/zRBAygpdgSwWvs+s2qR1WRao8+WkrVzgzXncRezRkOE/yzOsqiCAzHASVlUGgVsaWkEaSH/TKeeUtCoxKiEqIgGutDRxUDfdwT1LLiEuFxILoU0TR6JKK07vMpaO9xL3pVF/fYv2x2+0omCOoQk11bCwQharw+13X81ZQb8ycnCcS5ScKtYmxMe6jlrer3pEGLORzHPnXm0xipcRps7txzL2oUcAW/nyAIrToR7qJw3XfObEOJ3fbcsKOvUjRxMoaDw2Vb4ky+2fPF8w29fXvE3+723RX2UH4XcGTY/iQGdS1neti2sIoBnlBW3fPc+5K734V2f/zbA95vG8dBxve/owrs4/gOMAZ8sOodh7UTpc+6GHNCTc3h1vV0p5UxI/OapZ7yiK6TDeOY4vgCSWOKlAoSZUEM8xTDjw4LMHq4voId2Cx7FyMVIeR1paCUMd+Rxb9yqTzmPtY4ks9wyXxrOoOXnbVa36fi71fM7fZnmSMHN57tiOkNuGCjHx1/vdyyHeXQwTA4mzIH8eg7rry/9oE59K2ulPctxT42meQB2ahLIuVu2Gv+7C7fK4aatzu9vWafGT54q/+wnG375k2cMTy7YA+H3o5fhPGD+/hDlh3B17zv0fi5qcisQd7Xtx0oHnMpdnv5pHvyu/d8mvH9fZb52eD6UnIsqvE3E4E1yf8xAy/0rtNXUF8GjCd7ywEsp1jq3eo/Biigi1pSSOtLfqlsgnqdwVkIVaO10NXiUQEUPKQlpZYzmhEIVKNkcF1Cd3KbaQcHMCgoa015rB2vmIXJdvNYfwpJzuxxNpDlnD7eupMuzbQGeo2e5OPQnypKbE/AshmDxTR6Cdz13TWd+n/XzPA3Fo0jWvmtfnTnGzWs4m7+7YzxvWqzu8zLfb7GZTRGjD/CzJ8o//9mWX37xhM+fP6VsLhi+G4ny7dnOez9GOW/K3l/et7K+VTF/75TgD/sBn0YVz8lDlODHcsbW5YH3kQ+Be3iIPJiBsLJ2MppCOVIS0nre16OF+uDhrW/AwUMU8SYNs4HhUhZ8lgSZ7QNC10LR1dMDXRSChnbEprUMSq0Uc9DhhOMGzHz8ZZl44kZHnakfzVsSzGEDW1pELIH1hxoFpxP5bXJC70UWd/9mlAdYwvjrYMxBjbNS4/PdOROOvO0FN1dk68DAcrSjxbGdaT2sZdU/55UfKDyPAw0z3uV04dUjr/kGQO3EADo+8MOf3TtZzOZnhTEE4Y+fd/zFzwZ+8XzDJnZo6NGLLRdPtmy3gde72pqAvTu5cVveUu4T9r5PmPpdXN6nDJh7X3IfbMrHGt9D0fDn5E1GxPoc3/fdPJ2rb3OMN+1zGol9l/IAY+AQWp8Hcqw3XLF6pPmQ/9UTc/22treHi50/a6HfuRmRmbMQqlASaKMAKJORJgEmBCgFqN4qtjQgo9VDzlfEiFHoWpdBaXwFUQMi3tvArDCOlTR5f2izOSpwt+9xzsI95Hl5NyvW95Czk3Bt2DmHM97/sn3aLvnQZOoUQtgiRItWf3NU4HBe7xUhs7K30+1k9X/7DpamQDcV/uqCDA/9+wGX78zOPcOVcXZ6uLObydGPN8m7NvgqFRFjo8IfPVP+4mc9v/xsoBcoOVPM2Gye8OzzwpPLX/PNd8nfi/M37K3k0HPh+13bnQA1PEJ3V6roxyr3AcedUxAfQt6HA3PXNXwIh+n7nuOhYfvvE+a/b/TxoXJvY6DWAyjM8+jrEbCA7BYnSxoPAJ6jr/XQLmFtCBw+m7XO+mINqwfjAWBa/HNaKN9PumACcIPDq+qEPoozE3ZK10dCUDQGuuCfiyoxCDFGYoiYFaYx8c13r/j66z37nbf9FcpxUPsNN/28pXmSbPiUsAXWvGbxJ8ZSQ3DwxNelR6djv8tMWt+Lddp71RDxqJul/2iNsFa3uJ7Omflf00U1HQ/i3PO53SCaOcDfqDI/8mMToEP4YhP45bPITzYQ60QqlSqRrVYuthd8KR1ffP6c3/xux/SOQwPv8xbMI1VxwHInQkbIVo+ey+kS9EOWhyD3H1J6dl/w38cC7d13DO/yPJ+CLKDkdzTGDwog9EV78f/Pb3DidRneytWAJW97x7wy85z+2hsAFpT34j02RT+DwiQYqkYfA12nhOAphxCVLgZiFxg2PV23IahXEnSdu7reBEkJ2iESyWWHhCuGqSO+nGAsWLXFy5T1vyex69vKYNY4+fW/3zvJ+Y5lGc4Rj70tDr/jReZITftaTrZcGXrHBtGBMOjgnSsslM8GaGvU0VIOJyp5/dc5sOrbymlO/VN3QBVlE43PLoTLXiAbpUXjTA0V79dxedHzxWdPuLxQrvf5oUUxd8r7xc8c8B1DJ6gZOTecDy2S+Kk/pB+hfIpK9cco3xdz8LbP6QF0xOuzzYRDK0a4xga3HsZRqdnJQc5GB5gjAEaYdc7aMDg45e6jqxBVCWrEThn60PADQtd1hBhQUfo+EGKk6ztEAjEG+i60cLMgqKcdTLEa6LrIMHQMG2W3c0Y8E4OqzahZRcSlKT1uqqPTYPfhU+G2Pz+4nFHqa1l740tb39U9mIdfzZZ200eHbxbbov/XeImV0WGmCxphYSI8o6WPPlmaedwWibmHzMc82Cz32udDP7O1YSTAEKBXqKUwJjcQtOFd9vs9grEZNjx//ozLbeRbzUz1Iw3+rUSIAS62gWmsWFp4Jn+08rah4/vs9328+3dtBPwhGxVvU+Hwoe7XgwGEnssPiARcPXhHeLFTv+NY88uJ8l/jBMAjBs2eaH2Z1+2JwWZw4bxvAxNqrIQoRDW6KPSxbwaCEEJEgxJjR9dHZzNsQEVRUA3etdAEVWc3EFNMOy4unvBku+fqdaGU2TDxlIEdabfzcvK6vjPA1fuSVfLlZHwno5Wb+nJWprNRdFJc4kaEgM49KJZw/kG7e4Oe9cFllUdwqSuWRDU9mlDHc+vQ5OeNcmS9vnnzW7d9jw+1rv717mazFSooEdNKFi/TLXlCx5H9mOl65fLigifPNnRf7UljPeAozsgHvKQ3iBt4fRSCVAqVsmLFlDe8e4/yKI/ycLl/mmAJ3962mNzumR0c++MciTMQGt4y+LBNlYP6PD2kqrb/PX+oLToQg3g0QSt0EVFFAmgUTHGjILhRUQVydbWuVES83FCjEkJPRNl0xnbb0fc7xhFqPQ71W1uw5vEeAQXvuo9v+P6TkLsGeS6SMEeH7sJQAGX+ZXW/jubT0bFvDkKPDBFpKYl1c4812dFbKIx1+uMDyl0ewGKkLamXw+0RNbpO6LtIoPibZJX9bs/169c8vdiwGSLPn12yGV5xPZYbxz9/vo87T2dkTmxMoCm11qy23ubHIw/1/N4GJHZunx+7h/5DuL5PaYxiP/Qi1Ud5lEd5lEd5lEf5XvJD72j6KI/yKI/yKI/yKN9THo2BR3mUR3mUR3mUP3B5NAYe5VEe5VEe5VH+wOXRGHiUR3mUR3mUR/kDl0dj4FEe5VEe5VEe5Q9cHo2BR3mUR3mUR3mUP3B5NAYe5VEe5VEe5VH+wOXRGHiUR3mUR3mUR/kDl0dj4FEe5VEe5VEe5Q9c/v+K9CqfwdHu0QAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Let's choose another image and ask the same question.\n", + "image_idx = 95\n", + "image = vid_frames[image_idx]\n", + "question = \"Can you describe what this man holding the cat is doing and how he feels?\"\n", + "\n", + "show_img(image)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The man holding the cat appears to be in a formal setting, possibly a restaurant or a club, as he is wearing a tuxedo. He is sitting in a chair and holding a cat in his lap. His expression suggests that he is feeling somewhat displeased or annoyed. It is possible that he is dealing with an unexpected situation or someone who has upset him.<|im_end|>\n" + ] + } + ], + "source": [ + "result = model.predict_forward(\n", + " image=image,\n", + " text=question,\n", + " tokenizer=tokenizer,\n", + ")\n", + "print(result['prediction'])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "vlm_demo", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demo.py b/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..c2d48f86155675e594cd8ea535c4b623825ddd53 --- /dev/null +++ b/demo.py @@ -0,0 +1,98 @@ +import argparse +import os + +from PIL import Image +from transformers import AutoModelForCausalLM, AutoTokenizer + +import cv2 +try: + from mmengine.visualization import Visualizer +except ImportError: + Visualizer = None + print("Warning: mmengine is not installed, visualization is disabled.") + + +def parse_args(): + parser = argparse.ArgumentParser(description='Video Reasoning Segmentation') + parser.add_argument('image_folder', help='Path to image file') + parser.add_argument('--model_path', default="ByteDance/Sa2VA-8B") + parser.add_argument('--work-dir', default=None, help='The dir to save results.') + parser.add_argument('--text', type=str, default="Please describe the video content.") + parser.add_argument('--select', type=int, default=-1) + args = parser.parse_args() + return args + + +def visualize(pred_mask, image_path, work_dir): + visualizer = Visualizer() + img = cv2.imread(image_path) + visualizer.set_image(img) + visualizer.draw_binary_masks(pred_mask, colors='g', alphas=0.4) + visual_result = visualizer.get_image() + + output_path = os.path.join(work_dir, os.path.basename(image_path)) + cv2.imwrite(output_path, visual_result) + +if __name__ == "__main__": + cfg = parse_args() + model_path = cfg.model_path + model = AutoModelForCausalLM.from_pretrained( + model_path, + torch_dtype="auto", + device_map="auto", + trust_remote_code=True + ) + + tokenizer = AutoTokenizer.from_pretrained( + model_path, + trust_remote_code=True + ) + + image_files = [] + image_paths = [] + image_extensions = {".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"} + for filename in sorted(list(os.listdir(cfg.image_folder))): + if os.path.splitext(filename)[1].lower() in image_extensions: + image_files.append(filename) + image_paths.append(os.path.join(cfg.image_folder, filename)) + + vid_frames = [] + for img_path in image_paths: + img = Image.open(img_path).convert('RGB') + vid_frames.append(img) + + + if cfg.select > 0: + img_frame = vid_frames[cfg.select - 1] + + print(f"Selected frame {cfg.select}") + print(f"The input is:\n{cfg.text}") + result = model.predict_forward( + image=img_frame, + text=cfg.text, + tokenizer=tokenizer, + ) + else: + print(f"The input is:\n{cfg.text}") + result = model.predict_forward( + video=vid_frames, + text=cfg.text, + tokenizer=tokenizer, + ) + + prediction = result['prediction'] + print(f"The output is:\n{prediction}") + + if '[SEG]' in prediction and Visualizer is not None: + _seg_idx = 0 + pred_masks = result['prediction_masks'][_seg_idx] + for frame_idx in range(len(vid_frames)): + pred_mask = pred_masks[frame_idx] + if cfg.work_dir: + os.makedirs(cfg.work_dir, exist_ok=True) + visualize(pred_mask, image_paths[frame_idx], cfg.work_dir) + else: + os.makedirs('./temp_visualize_results', exist_ok=True) + visualize(pred_mask, image_paths[frame_idx], './temp_visualize_results') + else: + pass diff --git a/demo/demo.py b/demo/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..c2d48f86155675e594cd8ea535c4b623825ddd53 --- /dev/null +++ b/demo/demo.py @@ -0,0 +1,98 @@ +import argparse +import os + +from PIL import Image +from transformers import AutoModelForCausalLM, AutoTokenizer + +import cv2 +try: + from mmengine.visualization import Visualizer +except ImportError: + Visualizer = None + print("Warning: mmengine is not installed, visualization is disabled.") + + +def parse_args(): + parser = argparse.ArgumentParser(description='Video Reasoning Segmentation') + parser.add_argument('image_folder', help='Path to image file') + parser.add_argument('--model_path', default="ByteDance/Sa2VA-8B") + parser.add_argument('--work-dir', default=None, help='The dir to save results.') + parser.add_argument('--text', type=str, default="Please describe the video content.") + parser.add_argument('--select', type=int, default=-1) + args = parser.parse_args() + return args + + +def visualize(pred_mask, image_path, work_dir): + visualizer = Visualizer() + img = cv2.imread(image_path) + visualizer.set_image(img) + visualizer.draw_binary_masks(pred_mask, colors='g', alphas=0.4) + visual_result = visualizer.get_image() + + output_path = os.path.join(work_dir, os.path.basename(image_path)) + cv2.imwrite(output_path, visual_result) + +if __name__ == "__main__": + cfg = parse_args() + model_path = cfg.model_path + model = AutoModelForCausalLM.from_pretrained( + model_path, + torch_dtype="auto", + device_map="auto", + trust_remote_code=True + ) + + tokenizer = AutoTokenizer.from_pretrained( + model_path, + trust_remote_code=True + ) + + image_files = [] + image_paths = [] + image_extensions = {".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"} + for filename in sorted(list(os.listdir(cfg.image_folder))): + if os.path.splitext(filename)[1].lower() in image_extensions: + image_files.append(filename) + image_paths.append(os.path.join(cfg.image_folder, filename)) + + vid_frames = [] + for img_path in image_paths: + img = Image.open(img_path).convert('RGB') + vid_frames.append(img) + + + if cfg.select > 0: + img_frame = vid_frames[cfg.select - 1] + + print(f"Selected frame {cfg.select}") + print(f"The input is:\n{cfg.text}") + result = model.predict_forward( + image=img_frame, + text=cfg.text, + tokenizer=tokenizer, + ) + else: + print(f"The input is:\n{cfg.text}") + result = model.predict_forward( + video=vid_frames, + text=cfg.text, + tokenizer=tokenizer, + ) + + prediction = result['prediction'] + print(f"The output is:\n{prediction}") + + if '[SEG]' in prediction and Visualizer is not None: + _seg_idx = 0 + pred_masks = result['prediction_masks'][_seg_idx] + for frame_idx in range(len(vid_frames)): + pred_mask = pred_masks[frame_idx] + if cfg.work_dir: + os.makedirs(cfg.work_dir, exist_ok=True) + visualize(pred_mask, image_paths[frame_idx], cfg.work_dir) + else: + os.makedirs('./temp_visualize_results', exist_ok=True) + visualize(pred_mask, image_paths[frame_idx], './temp_visualize_results') + else: + pass diff --git a/demo/requirements.txt b/demo/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..41966347b17320d1bf4ec644054006564c50c922 --- /dev/null +++ b/demo/requirements.txt @@ -0,0 +1,10 @@ +torch==2.3.1 +torchvision==0.18.1 +transformers==4.42.3 +opencv-python-headless<4.10 +peft<0.14.0 +timm==1.0.9 +einops==0.8.0 +flash_attn +sentencepiece==0.2.0 +mmengine<1 diff --git a/projects/glamm/datasets/__init__.py b/projects/glamm/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d2467502ed37bf7a9d1a6e28d620129abf5b0577 --- /dev/null +++ b/projects/glamm/datasets/__init__.py @@ -0,0 +1,7 @@ +from .semantic_seg_dataset import SemanticSegDataset, ADE20kSemanticSegDataset, \ + COCOStuffSemanticSegDataset, PascalPartSemanticSegDataset, PacoSemanticSegDataset +from .gcg_dataset import GCGDataset, GranDfGCGDataset, RefCOCOgGCGDataset, OpenPsgGCGDataset, Flickr30kGCGDataset +from .region_level_dataset import RefCocoGRegionDataset, VisualGenomeRegionDataset +from .refcoco_segm_dataset import ReferSegmDataset +from .utils.utils import * +from .collate_fns.glamm_collate_fn import glamm_collate_fn diff --git a/projects/glamm/datasets/collate_fns/glamm_collate_fn.py b/projects/glamm/datasets/collate_fns/glamm_collate_fn.py new file mode 100644 index 0000000000000000000000000000000000000000..ef28868df06f62099304a1cba034af77a6274149 --- /dev/null +++ b/projects/glamm/datasets/collate_fns/glamm_collate_fn.py @@ -0,0 +1,136 @@ +from typing import Dict, Sequence + +import torch +from torch.nn.utils.rnn import pad_sequence + +from xtuner.parallel.sequence import (get_sequence_parallel_world_size, + pad_for_sequence_parallel) +from xtuner.utils import DEFAULT_PAD_TOKEN_INDEX, IGNORE_INDEX + + +def glamm_collate_fn(instances: Sequence[Dict], + pad_index: int = DEFAULT_PAD_TOKEN_INDEX, + return_hf_format: bool = False, + use_varlen_attn: bool = False): + seq_parallel_world_size = get_sequence_parallel_world_size() + + input_ids, labels = [], [] + has_image = any(inst.get('pixel_values') is not None for inst in instances) + has_grounding_image = any(inst.get('g_pixel_values') is not None for inst in instances) + has_mask = any(inst.get('masks') is not None for inst in instances) + has_bboxes = any(inst.get('bboxes') is not None for inst in instances) + has_points = any(inst.get('points') is not None for inst in instances) + + if use_varlen_attn: + position_ids, cumulative_len = [], [] + assert len(instances) == 1, ( + f'If utilizing varlen attention, the batch size should be' + f' set to 1, but got {len(instances)}') + assert not has_image, 'Currently, it is not configured to ' + 'accommodate the use of varlen Attention in multimodal training' + + if has_image: + pixel_values = [] + if has_grounding_image: + grounding_pixel_values = [] + if has_mask: + object_masks = [] + if has_bboxes: + object_bboxes = [] + if has_points: + prompt_points = [] + + for example in instances: + input_ids.append(torch.LongTensor(example['input_ids'])) + labels.append(torch.LongTensor(example['labels'])) + if use_varlen_attn: + cumulative_len.append(torch.IntTensor(example['cumulative_len'])) + position_ids.append(torch.LongTensor(example['position_ids'])) + + if has_image: + pixel_values.append(example['pixel_values']) + if has_grounding_image: + grounding_pixel_values.append(example['g_pixel_values']) + if has_mask: + if 'masks' in example.keys() and example['masks'] is not None: + object_masks.append(example['masks']) + if has_bboxes: + if 'bboxes' in example.keys() and example['bboxes'] is not None: + object_bboxes.append(example['bboxes']) + if has_points: + if 'points' in example.keys() and example['points'] is not None: + prompt_points.append(example['points']) + + ori_length = [len(ids) for ids in input_ids] + if len(instances) > 1: + input_ids = pad_sequence( + input_ids, batch_first=True, padding_value=pad_index) + labels = pad_sequence( + labels, batch_first=True, padding_value=IGNORE_INDEX) + else: + input_ids = torch.stack(input_ids) + labels = torch.stack(labels) + + if use_varlen_attn: + assert input_ids.size(1) % seq_parallel_world_size == 0 + attention_mask = None + position_ids = torch.stack(position_ids, dim=0) + else: + # Some tokenizers have the same eos token and pad token, so input_ids + # cannot be masked directly based on the pad token id. + attention_mask = torch.zeros_like(input_ids).bool() + for i, length in enumerate(ori_length): + attention_mask[i, :length] = True + + bs, seq_len = input_ids.shape + position_ids = torch.arange(seq_len).unsqueeze(0).long().repeat(bs, 1) + + if seq_parallel_world_size > 1: + input_ids = pad_for_sequence_parallel(input_ids, pad_index) + labels = pad_for_sequence_parallel(labels, IGNORE_INDEX) + position_ids = pad_for_sequence_parallel(position_ids, 0) + if attention_mask is not None: + attention_mask = pad_for_sequence_parallel(attention_mask, 0) + + if use_varlen_attn: + max_seqlen = ( + cumulative_len[0][1:] - # noqa: W504 + cumulative_len[0][:-1]).max().item() + data_dict = { + 'input_ids': input_ids, + 'cumulative_len': cumulative_len, + 'position_ids': position_ids, + 'labels': labels, + 'max_seqlen': max_seqlen + } + else: + data_dict = { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'position_ids': position_ids, + 'labels': labels + } + + if has_image: + if all(x.shape == pixel_values[0].shape for x in pixel_values): + pixel_values = torch.stack(pixel_values, dim=0) + data_dict['pixel_values'] = pixel_values + + if has_grounding_image: + # if all(x.shape == grounding_pixel_values[0].shape for x in grounding_pixel_values): + # grounding_pixel_values = torch.stack(grounding_pixel_values, dim=0) + data_dict['g_pixel_values'] = grounding_pixel_values + + if has_mask: + data_dict['masks'] = object_masks + + if has_bboxes: + data_dict['bboxes'] = object_bboxes + + if has_points: + data_dict['points'] = prompt_points + + if return_hf_format: + return data_dict + else: + return {'data': data_dict, 'data_samples': None} diff --git a/projects/glamm/datasets/gcg_dataset.py b/projects/glamm/datasets/gcg_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..2b89ec9f314af103563b80ee13cd79589addb1ae --- /dev/null +++ b/projects/glamm/datasets/gcg_dataset.py @@ -0,0 +1,349 @@ +import copy +import random +import glob +import json +import logging +import os +import torch + +from mmengine import print_log +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset +import numpy as np +import torch.nn.functional as F +from pycocotools.coco import COCO +from pycocotools import mask as mask_utils + +from xtuner.registry import BUILDER + +from xtuner.dataset.utils import encode_fn +from xtuner.dataset.map_fns import llava_map_fn + +from projects.glamm.datasets.utils.utils import expand2square + +from projects.glamm.datasets.utils.utils import GCG_QUESTIONS, ANSWER_LIST +from projects.glamm.utils import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +class GCGDataset(Dataset): + def __init__(self, + image_folder, + image_processor, + data_path=None, + tokenizer=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False, + repeats=1, + num_classes_per_sample=3, + extra_image_processor=None): + super().__init__() + self.question_templates = GCG_QUESTIONS + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + self.num_classes_per_sample = num_classes_per_sample + self.tokenizer = BUILDER.build(tokenizer) + + self.tokenizer.add_tokens( + [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True + ) + reg_tokens = ['', ''] + segmentation_tokens = ['[SEG]'] + phrase_tokens = ['

', '

'] + special_tokens = reg_tokens + segmentation_tokens + phrase_tokens + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.max_length = max_length + self.template_map_fn = BUILDER.build(template_map_fn) + + self.text_data = self.json_file_preprocess(data_path, image_folder) + self.image_folder = image_folder + + self.image_processor = BUILDER.build(image_processor) + size = self.image_processor.crop_size + + if isinstance(size, dict): + self.image_w, self.image_h = size['width'], size['height'] + elif isinstance(size, int): + self.image_h, self.image_w = size, size + else: + self.image_w, self.image_h = size + + self.pad_image_to_square = pad_image_to_square + self.repeats = repeats + + def json_file_preprocess(self, data_path, image_folder=None): + with open(data_path, 'r') as f: + json_data = json.load(f) + return json_data + + @property + def modality_length(self): + length_list = [] + for data_dict in self.text_data: + cur_len = 100 + length_list.append(cur_len) + return length_list * self.repeats + + def __len__(self): + return len(self.text_data) * self.repeats + + def real_len(self): + return len(self.text_data) + + def _parse_annotations(self, ann_info): + image_path = os.path.join(self.image_folder, ann_info['file_name']) + image = Image.open(image_path).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + ann_info['g_pixel_values'] = g_pixel_values + + width, height = image.size + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + ann_info['pixel_values'] = image + + caption = ann_info['caption'].strip('"').strip() + masks, phrases, tokens_positive = [], [], [] + for word, grounding in ann_info["groundings"].items(): + phrases.append(word) + tokens_positive.append(grounding["token_positives"]) + + # Convert segmentation to binary mask + binary_mask = np.zeros((height, width), dtype=np.uint8) + for rle in grounding["rle_masks"]: + m = mask_utils.decode(rle).astype(np.uint8) + binary_mask += m.squeeze() + masks.append(binary_mask) + + def sort_by_start_index(items, order): + return [items[i] for i in order] + + phrase_order = sorted(range(len(tokens_positive)), key=lambda x: tokens_positive[x][0]) + masks = sort_by_start_index(masks, phrase_order) + phrases = sort_by_start_index(phrases, phrase_order) + tokens_positive = sort_by_start_index(tokens_positive, phrase_order) + + ann_info.update({ + 'image_path': image_path, + 'caption': caption, + 'masks': masks, + 'phrases': phrases, + 'tokens_positive': tokens_positive, + }) + return ann_info + + def create_conversation(self, caption, tokens_positive): + question = random.choice(self.question_templates).strip() + + # Prepare caption with tags + def tag_caption(caption, tokens): + for start, end in sorted(tokens, key=lambda x: x[0], reverse=True): + caption = f"{caption[:start]}

{caption[start:end]}

[SEG]{caption[end:]}" + return caption + + detailed_answer = tag_caption(caption, tokens_positive) + + question = 'The provides an overview of the picture.\n' + question + conversation = [{'input': question, 'output': detailed_answer}] + return conversation + + def __getitem__(self, index): + index = index % self.real_len() + data_dict = {} + ann_info = copy.deepcopy(self.text_data[index]) + ann_info = self._parse_annotations(ann_info) + + data_dict['g_pixel_values'] = ann_info.pop('g_pixel_values') + data_dict['pixel_values'] = ann_info.pop('pixel_values') + if len(ann_info['masks']) == 0: + return self.__getitem__(0) + data_dict['masks'] = torch.from_numpy(np.stack(ann_info['masks'], axis=0)) + + conversation = self.create_conversation(ann_info['caption'], ann_info['tokens_positive']) + data_dict['conversation'] = conversation + + result = self.template_map_fn(data_dict) + data_dict.update(result) + + result = encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + return data_dict + +class GranDfGCGDataset(GCGDataset): + pass +class RefCOCOgGCGDataset(GCGDataset): + def json_file_preprocess(self, data_path, image_folder=None): + with open(data_path, 'r') as f: + json_data = json.load(f) + return [list(line.values())[0] for line in json_data] + + def _parse_annotations(self, ann_info): + image_path = os.path.join(self.image_folder, ann_info['img_file_name']) + image = Image.open(image_path).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + ann_info['g_pixel_values'] = g_pixel_values + + width, height = image.size + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + ann_info['pixel_values'] = image + + caption = ann_info['caption'].strip('"').strip().lower() + masks, phrases, tokens_positive = [], [], [] + for detail in ann_info['refs']: + phrase = detail['sentence'] + if phrase.lower() in caption: + phrases.append(phrase) + index = caption.find(phrase) + end_index = index + len(phrase) if index != -1 else -1 + tokens_positive.append([index, end_index]) + + binary_mask = np.zeros((height, width), dtype=np.uint8) + for seg in detail["segmentation"]: + rles = mask_utils.frPyObjects([seg], height, width) + m = mask_utils.decode(rles) + m = m.astype(np.uint8) + binary_mask += m.squeeze() + masks.append(binary_mask) + + def sort_by_start_index(items, order): + return [items[i] for i in order] + + phrase_order = sorted(range(len(tokens_positive)), key=lambda x: tokens_positive[x][0]) + masks = sort_by_start_index(masks, phrase_order) + phrases = sort_by_start_index(phrases, phrase_order) + tokens_positive = sort_by_start_index(tokens_positive, phrase_order) + + ann_info.update({ + 'image_path': image_path, + 'caption': caption, + 'masks': masks, + 'phrases': phrases, + 'tokens_positive': tokens_positive, + }) + return ann_info + +class OpenPsgGCGDataset(GCGDataset): + pass + +class Flickr30kGCGDataset(GCGDataset): + + def json_file_preprocess(self, data_path, image_folder=None): + def filter_images(data_infos, min_size): + return [i for i, info in enumerate(data_infos) if min(info['width'], info['height']) >= min_size] + + self.coco = COCO(data_path) + self.image_ids = self.coco.getImgIds() + data_infos = [] + total_ann_ids = [] + removed_img_count = 0 + for img_id in self.image_ids: + info = self.coco.loadImgs([img_id])[0] + if len(info['caption'].split(' ')) < 3: + removed_img_count += 1 + continue + info['filename'] = info['file_name'].split('_')[-1] + info['height'] = int(info['height']) + info['width'] = int(info['width']) + data_infos.append(info) + ann_ids = self.coco.getAnnIds(imgIds=[img_id]) + total_ann_ids.extend(ann_ids) + assert len(set(total_ann_ids)) == len(total_ann_ids), f"Non-unique annotation IDs in '{data_path}'!" + print(f'Removed {removed_img_count} images.') + data_infos = [data_infos[i] for i in filter_images(data_infos, min_size=32)] + + return data_infos + + def _parse_annotations(self, img_info): + ann_ids = self.coco.getAnnIds(imgIds=img_info['id']) + ann_info = self.coco.loadAnns(ann_ids) + + annotations = {'phrases': [], 'caption': img_info['caption'], 'masks': [], 'tokens_positive': []} + image_path = os.path.join(self.image_folder, img_info['file_name']) + image = Image.open(image_path).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + annotations['g_pixel_values'] = g_pixel_values + + width, height = image.size + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + annotations['pixel_values'] = image + + for ann in ann_info: + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) + inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) + if inter_w * inter_h == 0 or ann['area'] <= 0 or w < 1 or h < 1: + continue + bbox = [x1, y1, x1 + w, y1 + h] + tokens_positive = ann['tokens_positive'] + phrase = [img_info['caption'][span[0]:span[1]] for span in tokens_positive] + annotations['phrases'].append(phrase[0]) + annotations['tokens_positive'].append(tokens_positive[0]) + + rle = ann['sam_mask'] + mask_decoded = mask_utils.decode(rle).astype(np.uint8) + annotations['masks'].append(mask_decoded) + + def sort_by_start_index(items, order): + return [items[i] for i in order] + + phrase_order = sorted(range(len(annotations['tokens_positive'])), key=lambda x: annotations['tokens_positive'][x][0]) + annotations['masks'] = sort_by_start_index(annotations['masks'], phrase_order) + annotations['phrases'] = sort_by_start_index(annotations['phrases'], phrase_order) + annotations['tokens_positive'] = sort_by_start_index(annotations['tokens_positive'], phrase_order) + + return annotations + +if __name__ == '__main__': + from transformers import CLIPImageProcessor, AutoTokenizer + from third_parts.segment_anything.utils.transforms import ResizeLongestSide + pretrained_model = 'MBZUAI/GLaMM-GranD-Pretrained' + llm_name_or_path = 'lmsys/vicuna-7b-v1.5' + + tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path) + image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path='openai/clip-vit-large-patch14-336') + extra_image_processor = dict( + type=ResizeLongestSide, + target_length=1024, + ) + from xtuner.utils.templates import PROMPT_TEMPLATE + prompt_template = PROMPT_TEMPLATE.vicuna + from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory, template_map_fn + from projects.glamm.datasets.collate_fns.glamm_collate_fn import glamm_collate_fn + dataset = Flickr30kGCGDataset( + image_folder='data/flickr30k/flickr30k-images/', + image_processor=image_processor, + data_path='./data/GranDf/annotations/train/flickr_mergedGT_GCG_train.json', + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=2048, + pad_image_to_square=True, + repeats=1, + num_classes_per_sample=3, + extra_image_processor=extra_image_processor) + + for i in range(1000): + print(dataset[i]) \ No newline at end of file diff --git a/projects/glamm/datasets/refcoco_segm_dataset.py b/projects/glamm/datasets/refcoco_segm_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d954cf45df913fa56d30e736bf84288a868ef494 --- /dev/null +++ b/projects/glamm/datasets/refcoco_segm_dataset.py @@ -0,0 +1,195 @@ +import copy +import random +import glob +import json +import logging +import os +import torch + +from mmengine import print_log +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset +import numpy as np +import torch.nn.functional as F +from pycocotools.coco import COCO +from pycocotools import mask as mask_utils + +from xtuner.registry import BUILDER + +from xtuner.dataset.utils import encode_fn +from xtuner.dataset.map_fns import llava_map_fn + +from projects.glamm.datasets.utils.utils import expand2square + +from projects.glamm.datasets.utils.utils import SEG_QUESTIONS, ANSWER_LIST +from projects.glamm.utils import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + +from third_parts.mmdet.datasets.refcoco import RefCocoDataset + + +class ReferSegmDataset(RefCocoDataset): + def __init__(self, + data_root, + ann_file=None, + split_file=None, + image_processor=None, + extra_image_processor=None, + data_prefix=dict(img_path='train2014/'), + tokenizer=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False, + num_classes_per_sample=3): + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + pipeline=None, + ann_file=ann_file, + split_file=split_file, + ) + self.begin_str = f"""{DEFAULT_IMAGE_TOKEN} provides an overview of the picture.\n""" + + self.question_templates = SEG_QUESTIONS + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + self.num_classes_per_sample = num_classes_per_sample + self.tokenizer = BUILDER.build(tokenizer) + + self.tokenizer.add_tokens( + [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True + ) + reg_tokens = ['', ''] + segmentation_tokens = ['[SEG]'] + phrase_tokens = ['

', '

'] + special_tokens = reg_tokens + segmentation_tokens + phrase_tokens + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.max_length = max_length + self.template_map_fn = BUILDER.build(template_map_fn) + + self.image_processor = BUILDER.build(image_processor) + size = self.image_processor.crop_size + if isinstance(size, dict): + self.image_w, self.image_h = size['width'], size['height'] + self.pad_image_to_square = pad_image_to_square + + @property + def modality_length(self): + import pickle + length_list = [] + for idx in range(len(self)): + length_list.append(100) + # for idx in range(len(self)): + # if self.serialize_data: + # start_addr = 0 if idx == 0 else self.data_address[idx - 1].item() + # end_addr = self.data_address[idx].item() + # bytes = memoryview( + # self.data_bytes[start_addr:end_addr]) # type: ignore + # data_dict = pickle.loads(bytes) + # else: + # data_dict = copy.deepcopy(self.data_list[idx]) + return length_list + + def _parse_annotations(self, ann_info): + image_path = ann_info['img_path'] + image = Image.open(image_path).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy( + g_image).permute(2, 0, 1).contiguous() + ann_info['g_pixel_values'] = g_pixel_values + + width, height = image.size + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + ann_info['pixel_values'] = image + + masks, phrases = [], [] + instances, text = ann_info['instances'], ann_info['text'] + index = np.random.choice(range(len(instances)), min( + len(instances), self.num_classes_per_sample)) + for idx in index: + inst = instances[idx] + phrase = text[idx].lower() + phrases.append(phrase) + binary_mask = np.zeros((height, width), dtype=np.uint8) + for seg in inst["mask"]: + rles = mask_utils.frPyObjects([seg], height, width) + m = mask_utils.decode(rles) + m = m.astype(np.uint8) + binary_mask += m.squeeze() + masks.append(binary_mask) + + ann_info.update({ + 'masks': masks, + 'phrases': phrases, + }) + return ann_info + + def __getitem__(self, idx): + data_dict = {} + ann_info = super().__getitem__(idx) + ann_info = self._parse_annotations(ann_info) + + data_dict['g_pixel_values'] = ann_info.pop('g_pixel_values') + data_dict['pixel_values'] = ann_info.pop('pixel_values') + if len(ann_info['masks']) == 0: + return self.__getitem__(0) + data_dict['masks'] = torch.from_numpy( + np.stack(ann_info['masks'], axis=0)) + + conversation = [] + for i, phrase in enumerate(ann_info['phrases']): + question = random.choice(SEG_QUESTIONS).format(class_name=phrase) + conversation.append( + {'input': question, 'output': random.choice(ANSWER_LIST)}) + + data_dict['conversation'] = conversation + result = self.template_map_fn(data_dict) + data_dict.update(result) + + result = encode_fn(data_dict, tokenizer=self.tokenizer, + max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + return data_dict + +if __name__ == '__main__': + from transformers import CLIPImageProcessor, AutoTokenizer + from third_parts.segment_anything.utils.transforms import ResizeLongestSide + pretrained_model = 'MBZUAI/GLaMM-GranD-Pretrained' + llm_name_or_path = 'lmsys/vicuna-7b-v1.5' + + tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path) + image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path='openai/clip-vit-large-patch14-336') + extra_image_processor = dict( + type=ResizeLongestSide, + target_length=1024, + ) + from xtuner.utils.templates import PROMPT_TEMPLATE + prompt_template = PROMPT_TEMPLATE.vicuna + from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory, template_map_fn + from projects.glamm.datasets.collate_fns.glamm_collate_fn import glamm_collate_fn + + dataset = ReferSegmDataset( + tokenizer=tokenizer, + image_processor=image_processor, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + extra_image_processor=extra_image_processor, + data_root='data/coco/', + data_prefix=dict(img_path='train2014/'), + ann_file='refcoco+/instances.json', + split_file='refcoco+/refs(unc).p', + ) + for i in range(1000): + dataset[i] diff --git a/projects/glamm/datasets/region_level_dataset.py b/projects/glamm/datasets/region_level_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..85a43f62ef4539e6dc20908a91afce7036c05826 --- /dev/null +++ b/projects/glamm/datasets/region_level_dataset.py @@ -0,0 +1,297 @@ +import copy +import random +import glob +import json +import logging +import os +import torch + +from mmengine import print_log +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset +import numpy as np +import torch.nn.functional as F +from pycocotools.coco import COCO +from pycocotools import mask as mask_utils + +from xtuner.registry import BUILDER + +from xtuner.dataset.utils import encode_fn +from xtuner.dataset.map_fns import llava_map_fn + +from projects.glamm.datasets.utils.utils import expand2square + +from projects.glamm.datasets.utils.utils import ANSWER_LIST, REGION_QUESTIONS +from projects.glamm.utils import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + + +class RegionDataset(Dataset): + def __init__(self, + image_folder, + image_processor, + data_path=None, + tokenizer=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False, + repeats=1, + num_classes_per_sample=3, + extra_image_processor=None): + super().__init__() + + self.begin_str = f"""{DEFAULT_IMAGE_TOKEN} provides an overview of the picture.\n""" + self.question_templates = REGION_QUESTIONS + + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + self.num_classes_per_sample = num_classes_per_sample + self.tokenizer = BUILDER.build(tokenizer) + + self.tokenizer.add_tokens( + [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True + ) + reg_tokens = ['', ''] + segmentation_tokens = ['[SEG]'] + phrase_tokens = ['

', '

'] + special_tokens = reg_tokens + segmentation_tokens + phrase_tokens + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.max_length = max_length + self.template_map_fn = BUILDER.build(template_map_fn) + + self.text_data = self._load_annotations(data_path, image_folder) + self.image_folder = image_folder + + self.image_processor = BUILDER.build(image_processor) + size = self.image_processor.crop_size + + if isinstance(size, dict): + self.image_w, self.image_h = size['width'], size['height'] + elif isinstance(size, int): + self.image_h, self.image_w = size, size + else: + self.image_w, self.image_h = size + + self.pad_image_to_square = pad_image_to_square + self.repeats = repeats + + def _load_annotations(self, data_path, image_folder=None): + self.coco = COCO(data_path) + img_ids = self.coco.getImgIds() + data_infos = [] + for img_id in img_ids: + info = self.coco.loadImgs([img_id])[0] + info['filename'] = info['file_name'].split('_')[-1] + info['height'] = int(info['height']) + info['width'] = int(info['width']) + if min(info['height'], info['width']) < 32: + continue + data_infos.append(info) + return data_infos + + @property + def modality_length(self): + length_list = [] + for data_dict in self.text_data: + cur_len = 100 + length_list.append(cur_len) + return length_list * self.repeats + + def __len__(self): + return len(self.text_data) * self.repeats + + def real_len(self): + return len(self.text_data) + + def region_processor(self, orig_size, post_size, bboxes, labels): + orig_h, orig_w = orig_size + post_h, post_w = post_size + y_scale = post_h / orig_h + x_scale = post_w / orig_w + shuffle_ids = torch.randperm(len(labels))[:self.num_classes_per_sample] + selected_bboxes = bboxes[shuffle_ids] + + # Ensure selected_bboxes is two-dimensional + if len(selected_bboxes.shape) == 1: + selected_bboxes = np.expand_dims(selected_bboxes, axis=0) + + selected_labels = [labels[i] for i in shuffle_ids] + selected_bboxes[:, [0, 2]] *= x_scale + selected_bboxes[:, [1, 3]] *= y_scale + selected_bboxes = torch.tensor( + selected_bboxes, dtype=torch.float32) / post_h + return selected_bboxes, selected_labels + + def _parse_annotations(self, img_info): + data_dict = {} + bboxes, captions = [], [] + ann_info = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_info['id'])) + image_path = os.path.join(self.image_folder, img_info['file_name']) + image = Image.open(image_path).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy( + g_image).permute(2, 0, 1).contiguous() + data_dict['g_pixel_values'] = g_pixel_values + + orig_w, orig_h = image.size + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + post_h, post_w = image.shape[1:3] + data_dict['pixel_values'] = image + + for ann in ann_info: + if ann.get('ignore', False) or ann['area'] <= 0 or ann['bbox'][2] < 1 or ann['bbox'][3] < 1: + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, orig_w) - max(x1, 0)) + inter_h = max(0, min(y1 + h, orig_h) - max(y1, 0)) + if inter_w * inter_h == 0: + continue + bbox = [x1, y1, x1 + w, y1 + h] + + if bbox: + bboxes.append(bbox) + captions.append(img_info['caption']) + + if len(bboxes) == 0: + return self.__getitem__(0) + + bboxes = np.array(bboxes, dtype=np.float32) + seg_map = img_info['file_name'].replace('jpg', 'png') + bboxes, captions = self.region_processor((orig_h, orig_w), (post_h, post_w), bboxes, captions) + + data_dict['bboxes'] = bboxes + data_dict['captions'] = captions + data_dict['seg_map'] = seg_map + return data_dict + + def create_conversation(self, captions): + questions = [] + answers = [] + for i, label in enumerate(captions): + question = random.choice(self.question_templates).strip().replace('', f'region{i + 1} ') + questions.append(question) + answers.append(label) + + conversation = [] + for i, (question, answer) in enumerate(zip(questions, answers)): + if i == 0: + question = self.begin_str + question + conversation.append({'input': question, 'output': answer}) + return conversation + + def __getitem__(self, index): + index = index % self.real_len() + data_dict = {} + ann_info = copy.deepcopy(self.text_data[index]) + ann_info = self._parse_annotations(ann_info) + + data_dict['g_pixel_values'] = ann_info.pop('g_pixel_values', None) + data_dict['pixel_values'] = ann_info.pop('pixel_values') + data_dict['bboxes'] = ann_info.pop('bboxes', None) + + conversation = self.create_conversation(ann_info['captions']) + data_dict['conversation'] = conversation + + result = self.template_map_fn(data_dict) + data_dict.update(result) + + result = encode_fn(data_dict, tokenizer=self.tokenizer, + max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + return data_dict + +class RefCocoGRegionDataset(RegionDataset): + pass + +class VisualGenomeRegionDataset(RegionDataset): + def _parse_annotations(self, img_info): + data_dict = {} + bboxes, captions = [], [] + ann_info = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_info['id'])) + image_path = os.path.join(self.image_folder, img_info['file_name']) + image = Image.open(image_path).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy( + g_image).permute(2, 0, 1).contiguous() + data_dict['g_pixel_values'] = g_pixel_values + + orig_w, orig_h = image.size + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + post_h, post_w = image.shape[1:3] + data_dict['pixel_values'] = image + + for ann in ann_info: + if ann.get('ignore', False) or ann['area'] <= 0 or ann['bbox'][2] < 1 or ann['bbox'][3] < 1: + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, orig_w) - max(x1, 0)) + inter_h = max(0, min(y1 + h, orig_h) - max(y1, 0)) + if inter_w * inter_h == 0: + continue + bbox = [x1, y1, x1 + w, y1 + h] + + if bbox: + bboxes.append(bbox) + captions.append(ann['caption'].strip()) + + if len(bboxes) == 0: + return self.__getitem__(0) + + bboxes = np.array(bboxes, dtype=np.float32) + seg_map = img_info['file_name'].replace('jpg', 'png') + bboxes, captions = self.region_processor((orig_h, orig_w), (post_h, post_w), bboxes, captions) + + data_dict['bboxes'] = bboxes + data_dict['captions'] = captions + data_dict['seg_map'] = seg_map + return data_dict + +if __name__ == '__main__': + from transformers import CLIPImageProcessor, AutoTokenizer + from third_parts.segment_anything.utils.transforms import ResizeLongestSide + pretrained_model = 'MBZUAI/GLaMM-GranD-Pretrained' + llm_name_or_path = 'lmsys/vicuna-7b-v1.5' + + tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path) + image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path='openai/clip-vit-large-patch14-336') + extra_image_processor = dict( + type=ResizeLongestSide, + target_length=1024, + ) + from xtuner.utils.templates import PROMPT_TEMPLATE + prompt_template = PROMPT_TEMPLATE.vicuna + from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory, template_map_fn + from projects.glamm.datasets.collate_fns.glamm_collate_fn import glamm_collate_fn + dataset = VisualGenomeRegionDataset( + image_folder='./data/visual_genome/images', + image_processor=image_processor, + data_path='data/visual_genome/train.json', + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=2048, + pad_image_to_square=False, + repeats=1, + num_classes_per_sample=3, + extra_image_processor=None) + + for i in range(1000): + print(dataset[i]) diff --git a/projects/glamm/datasets/semantic_seg_dataset.py b/projects/glamm/datasets/semantic_seg_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1f9e77783ac1e84ea4b2ee39d5a7701cc602d9 --- /dev/null +++ b/projects/glamm/datasets/semantic_seg_dataset.py @@ -0,0 +1,424 @@ +import copy +import random +import glob +import json +import logging +import os +import torch + +from mmengine import print_log +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset +import numpy as np +import torch.nn.functional as F +from pycocotools.coco import COCO + +from xtuner.registry import BUILDER + +from xtuner.dataset.utils import encode_fn +from xtuner.dataset.map_fns import llava_map_fn + +from projects.glamm.datasets.utils.utils import expand2square + +from projects.glamm.datasets.utils.utils import SEG_QUESTIONS, ANSWER_LIST +from projects.glamm.utils import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + + +class SemanticSegDataset(Dataset): + def __init__(self, + image_folder, + image_processor, + data_path=None, + tokenizer=None, + offline_processed_text_folder=None, + max_dataset_length=None, + dataset_map_fn=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False, + num_proc=8, + lazy=False, + repeats=1, + gcg_format=False, + num_classes_per_sample=3, + extra_image_processor=None): + super().__init__() + self.gcg_format = gcg_format + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + self.num_classes_per_sample = num_classes_per_sample + self.tokenizer = BUILDER.build(tokenizer) + + self.tokenizer.add_tokens( + [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True + ) + reg_tokens = ['', ''] + segmentation_tokens = ['[SEG]'] + phrase_tokens = ['

', '

'] + special_tokens = reg_tokens + segmentation_tokens + phrase_tokens + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + assert offline_processed_text_folder or (data_path and tokenizer) + self.lazy = lazy + + self.max_length = max_length + self.dataset_map_fn = dataset_map_fn + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if offline_processed_text_folder and data_path: + print_log( + 'Both `offline_processed_text_folder` and ' + '`data_path` are set, and we load dataset from' + '`offline_processed_text_folder` ' + f'({offline_processed_text_folder})', + logger='current', + level=logging.WARNING) + + if offline_processed_text_folder is not None: + raise NotImplementedError + else: + self.image_label_datas = self.json_file_preprocess(data_path, image_folder) + + self.image_folder = image_folder + + if isinstance(image_processor, dict) or isinstance(image_processor, Config) or isinstance(image_processor, ConfigDict): + self.image_processor = BUILDER.build(image_processor) + else: + self.image_processor = image_processor + + size = self.image_processor.crop_size + + if isinstance(size, dict): + self.image_w, self.image_h = size['width'], size['height'] + elif isinstance(size, int): + self.image_h, self.image_w = size, size + else: + self.image_w, self.image_h = size + + self.pad_image_to_square = pad_image_to_square + self.down_ratio = 1 + self.repeats = repeats + + def json_file_preprocess(self, data_path, image_folder): + # ade20k + with open(data_path, 'r') as file: + ade20k_classes = json.load(file) + ade20k_image_dir = image_folder + ade20k_images = [os.path.join(ade20k_image_dir, img) for img in os.listdir(ade20k_image_dir) if + img.endswith('.jpg')] + ade20k_labels = [img.replace(".jpg", ".png").replace( + "images", "annotations") for img in ade20k_images] + self.classes = np.array(ade20k_classes) + + ret = [] + for image, label in zip(ade20k_images, ade20k_labels): + ret.append({"image": image, "label": label}) + return ret + + def __len__(self): + return len(self.image_label_datas) * self.repeats + + @property + def modality_length(self): + length_list = [] + for data_dict in self.image_label_datas: + length_list.append(100) + length_list = length_list * self.repeats + return length_list + + def real_len(self): + return len(self.image_label_datas) + + def decode_mask(self, label_path): + label = np.array(Image.open(label_path)) + + # ade20k + label = np.where(label == 0, 255, label - 1) + unique_labels = [lbl for lbl in np.unique(label) if lbl != 255] + if not unique_labels: + return None, None + + selected_labels = np.random.choice(unique_labels, min( + len(unique_labels), self.num_classes_per_sample), replace=False) + label = torch.from_numpy(label).long() + masks = torch.stack([label == class_id for class_id in selected_labels], dim=0) + return masks, selected_labels + + def __getitem__(self, index): + index = index % self.real_len() + data_dict = copy.deepcopy(self.image_label_datas[index]) + + assert 'image' in data_dict.keys() + if data_dict.get('image', None) is not None: + image_file = data_dict['image'] + image = Image.open(image_file).convert('RGB') + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + data_dict['g_pixel_values'] = g_pixel_values + + ori_width, ori_height = image.size + if self.pad_image_to_square: + image = expand2square(image, tuple(int(x * 255) + for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + data_dict['pixel_values'] = image + + # process and get masks + data_dict['masks'], class_id = self.decode_mask(data_dict['label']) + if class_id is None: + return self.__getitem__(0) + + if self.gcg_format: + pass + else: + conversation = [] + for i, c_id in enumerate(class_id): + question = random.choice(SEG_QUESTIONS).format( + class_name=self.classes[c_id].lower()) + if i == 0: + question = f"""The {DEFAULT_IMAGE_TOKEN} provides an overview of the picture.\n""" + question + conversation.append( + {'input': question, 'output': random.choice(ANSWER_LIST)}) + + data_dict.update({'conversation': conversation}) + else: + if hasattr(self.image_processor, 'crop_size'): + crop_size = self.image_processor.crop_size + else: + crop_size = self.image_processor.size + data_dict['pixel_values'] = torch.zeros(3, crop_size['height'], + crop_size['width']) + data_dict['masks'] = None + + if self.lazy: + result = self.template_map_fn(data_dict) + data_dict.update(result) + + result = encode_fn(data_dict, tokenizer=self.tokenizer, + max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + return data_dict + +class ADE20kSemanticSegDataset(SemanticSegDataset): + def __init__(self, + image_folder, + image_processor, + data_path=None, + tokenizer=None, + offline_processed_text_folder=None, + max_dataset_length=None, + dataset_map_fn=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False, + num_proc=8, + lazy=False, + repeats=1, + gcg_format=False, + num_classes_per_sample=3, + extra_image_processor=None): + super().__init__( + image_folder=image_folder, + image_processor=image_processor, + data_path=data_path, + tokenizer=tokenizer, + offline_processed_text_folder=offline_processed_text_folder, + max_dataset_length=max_dataset_length, + dataset_map_fn=dataset_map_fn, + template_map_fn=template_map_fn, + max_length=max_length, + pad_image_to_square=pad_image_to_square, + num_proc=num_proc, + lazy=lazy, + repeats=repeats, + gcg_format=gcg_format, + num_classes_per_sample=num_classes_per_sample, + extra_image_processor=extra_image_processor, + ) + +class COCOStuffSemanticSegDataset(SemanticSegDataset): + def __init__(self, + image_folder, + image_processor, + data_path=None, + tokenizer=None, + offline_processed_text_folder=None, + max_dataset_length=None, + dataset_map_fn=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False, + num_proc=8, + lazy=False, + repeats=1, + label_path=None, + gcg_format=False, + num_classes_per_sample=3, + extra_image_processor=None): + self.label_path = label_path + super().__init__( + image_folder=image_folder, + image_processor=image_processor, + data_path=data_path, + tokenizer=tokenizer, + offline_processed_text_folder=offline_processed_text_folder, + max_dataset_length=max_dataset_length, + dataset_map_fn=dataset_map_fn, + template_map_fn=template_map_fn, + max_length=max_length, + pad_image_to_square=pad_image_to_square, + num_proc=num_proc, + lazy=lazy, + repeats=repeats, + gcg_format=gcg_format, + num_classes_per_sample=num_classes_per_sample, + extra_image_processor=extra_image_processor, + ) + self.cocostuff_class2index = {c: i for i, c in enumerate(self.classes)} + + def json_file_preprocess(self, data_path, image_folder): + # coco stuff + assert self.label_path is not None + with open(data_path, 'r') as file: + cocostuff_classes = [line.strip().split(": ")[-1] + for line in file.readlines()[1:]] + coco_stuff_image_dir = image_folder + coco_stuff_label_dir = self.label_path + coco_stuff_labels = glob.glob( + os.path.join(coco_stuff_label_dir, "*.png")) + + coco_stuff_images = [label.replace(".png", ".jpg").replace(coco_stuff_label_dir, coco_stuff_image_dir) + for label in coco_stuff_labels] + + self.classes = np.array(cocostuff_classes) + + ret = [] + for image, label in zip(coco_stuff_images, coco_stuff_labels): + ret.append({"image": image, "label": label}) + return ret + + def decode_mask(self, label_path): + label = np.array(Image.open(label_path)) + + # coco stuff + ignored_classes = [index for class_name, + index in self.cocostuff_class2index.items() if "-" in class_name] + label = np.where(np.isin(label, ignored_classes), 255, label) + + unique_labels = [lbl for lbl in np.unique(label) if lbl != 255] + if not unique_labels: + print("No valid label !!!") + return None, None + + # only choose 1 + selected_labels = np.random.choice(unique_labels, min( + len(unique_labels), self.num_classes_per_sample), replace=False) + + label = torch.from_numpy(label).long() + masks = torch.stack( + [label == class_id for class_id in selected_labels], dim=0) + return masks, selected_labels + +class PascalPartSemanticSegDataset(SemanticSegDataset): + + def json_file_preprocess(self, data_path, image_folder): + self.coco_api = COCO(data_path) + img_ids = self.coco_api.getImgIds() + all_classes = self.coco_api.loadCats(self.coco_api.getCatIds()) + class_map_pascal_part = {} + for cat in all_classes: + cat_main, cat_part = cat["name"].strip().split(":") + name = (cat_main, cat_part) + class_map_pascal_part[cat["id"]] = name + self.classes = class_map_pascal_part + return img_ids + + def __getitem__(self, index): + index = index % self.real_len() + img_id = self.image_label_datas[index] + img_info = self.coco_api.loadImgs([img_id])[0] + file_name = img_info["file_name"] + data_dict = {} + + image_file = os.path.join(self.image_folder, file_name) + image = Image.open(image_file).convert('RGB') + + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + data_dict['g_pixel_values'] = g_pixel_values + + if self.pad_image_to_square: + image = expand2square( + image, tuple(int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + data_dict['pixel_values'] = image + + annotation_ids = self.coco_api.getAnnIds(imgIds=img_info["id"]) + annotations = self.coco_api.loadAnns(annotation_ids) + + if not annotations: + return self.__getitem__(0) + + sampled_anns = np.random.choice(annotations, min( + len(annotations), self.num_classes_per_sample), replace=False) + + conversation = [] + for i, ann in enumerate(sampled_anns): + cat_id = ann['category_id'] + sampled_cls = self.classes[cat_id] + if isinstance(sampled_cls, tuple): + obj, part = sampled_cls + name = f"{obj} {part}" if random.random() < 0.5 else f"the {part} of the {obj}" + else: + name = sampled_cls + question = random.choice(SEG_QUESTIONS).format(class_name=name) + if i == 0: + question = f"""The {DEFAULT_IMAGE_TOKEN} provides an overview of the picture.\n""" + question + conversation.append( + {'input': question, 'output': random.choice(ANSWER_LIST)}) + + masks = [self.coco_api.annToMask(ann) for ann in sampled_anns] + masks = np.stack(masks, axis=0) + masks = torch.from_numpy(masks) + + data_dict['masks'] = masks + data_dict['conversation'] = conversation + + if self.lazy: + result = self.template_map_fn(data_dict) + data_dict.update(result) + + result = encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + return data_dict + +class PacoSemanticSegDataset(PascalPartSemanticSegDataset): + def json_file_preprocess(self, data_path, image_folder): + self.coco_api = COCO(data_path) + all_classes = self.coco_api.loadCats(self.coco_api.getCatIds()) + class_map_paco = {} + for cat in all_classes: + cat_split = cat["name"].strip().split(":") + if len(cat_split) == 1: + name = cat_split[0].split("_(")[0] + else: + assert len(cat_split) == 2 + obj, part = cat_split + obj = obj.split("_(")[0] + part = part.split("_(")[0] + name = (obj, part) + class_map_paco[cat["id"]] = name + self.classes = class_map_paco + return self.coco_api.getImgIds() \ No newline at end of file diff --git a/projects/glamm/datasets/utils/ade20k_classes.json b/projects/glamm/datasets/utils/ade20k_classes.json new file mode 100644 index 0000000000000000000000000000000000000000..1f96e616bc3fd2f8c0ec4caea975d77c680f44bb --- /dev/null +++ b/projects/glamm/datasets/utils/ade20k_classes.json @@ -0,0 +1,30 @@ +[ + "wall", "building", "sky", "floor", "tree", "ceiling", "road", + "bed", "windowpane", "grass", "cabinet", "sidewalk", + "person", "earth", "door", "table", "mountain", "plant", + "curtain", "chair", "car", "water", "painting", "sofa", + "shelf", "house", "sea", "mirror", "rug", "field", "armchair", + "seat", "fence", "desk", "rock", "wardrobe", "lamp", + "bathtub", "railing", "cushion", "base", "box", "column", + "signboard", "chest of drawers", "counter", "sand", "sink", + "skyscraper", "fireplace", "refrigerator", "grandstand", + "path", "stairs", "runway", "case", "pool table", "pillow", + "screen door", "stairway", "river", "bridge", "bookcase", + "blind", "coffee table", "toilet", "flower", "book", "hill", + "bench", "countertop", "stove", "palm", "kitchen island", + "computer", "swivel chair", "boat", "bar", "arcade machine", + "hovel", "bus", "towel", "light", "truck", "tower", + "chandelier", "awning", "streetlight", "booth", + "television receiver", "airplane", "dirt track", "apparel", + "pole", "land", "bannister", "escalator", "ottoman", "bottle", + "buffet", "poster", "stage", "van", "ship", "fountain", + "conveyer belt", "canopy", "washer", "plaything", + "swimming pool", "stool", "barrel", "basket", "waterfall", + "tent", "bag", "minibike", "cradle", "oven", "ball", "food", + "step", "tank", "trade name", "microwave", "pot", "animal", + "bicycle", "lake", "dishwasher", "screen", "blanket", + "sculpture", "hood", "sconce", "vase", "traffic light", + "tray", "ashcan", "fan", "pier", "crt screen", "plate", + "monitor", "bulletin board", "shower", "radiator", "glass", + "clock", "flag" +] \ No newline at end of file diff --git a/projects/glamm/datasets/utils/cocostuff_classes.txt b/projects/glamm/datasets/utils/cocostuff_classes.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d5a692b83ac8eead2bfffa805e1115cef737bae --- /dev/null +++ b/projects/glamm/datasets/utils/cocostuff_classes.txt @@ -0,0 +1,183 @@ +0: unlabeled +1: person +2: bicycle +3: car +4: motorcycle +5: airplane +6: bus +7: train +8: truck +9: boat +10: traffic light +11: fire hydrant +12: street sign +13: stop sign +14: parking meter +15: bench +16: bird +17: cat +18: dog +19: horse +20: sheep +21: cow +22: elephant +23: bear +24: zebra +25: giraffe +26: hat +27: backpack +28: umbrella +29: shoe +30: eye glasses +31: handbag +32: tie +33: suitcase +34: frisbee +35: skis +36: snowboard +37: sports ball +38: kite +39: baseball bat +40: baseball glove +41: skateboard +42: surfboard +43: tennis racket +44: bottle +45: plate +46: wine glass +47: cup +48: fork +49: knife +50: spoon +51: bowl +52: banana +53: apple +54: sandwich +55: orange +56: broccoli +57: carrot +58: hot dog +59: pizza +60: donut +61: cake +62: chair +63: couch +64: potted plant +65: bed +66: mirror +67: dining table +68: window +69: desk +70: toilet +71: door +72: tv +73: laptop +74: mouse +75: remote +76: keyboard +77: cell phone +78: microwave +79: oven +80: toaster +81: sink +82: refrigerator +83: blender +84: book +85: clock +86: vase +87: scissors +88: teddy bear +89: hair drier +90: toothbrush +91: hair brush +92: banner +93: blanket +94: branch +95: bridge +96: building-other +97: bush +98: cabinet +99: cage +100: cardboard +101: carpet +102: ceiling-other +103: ceiling-tile +104: cloth +105: clothes +106: clouds +107: counter +108: cupboard +109: curtain +110: desk-stuff +111: dirt +112: door-stuff +113: fence +114: floor-marble +115: floor-other +116: floor-stone +117: floor-tile +118: floor-wood +119: flower +120: fog +121: food-other +122: fruit +123: furniture-other +124: grass +125: gravel +126: ground-other +127: hill +128: house +129: leaves +130: light +131: mat +132: metal +133: mirror-stuff +134: moss +135: mountain +136: mud +137: napkin +138: net +139: paper +140: pavement +141: pillow +142: plant-other +143: plastic +144: platform +145: playingfield +146: railing +147: railroad +148: river +149: road +150: rock +151: roof +152: rug +153: salad +154: sand +155: sea +156: shelf +157: sky +158: skyscraper +159: snow +160: solid-other +161: stairs +162: stone +163: straw +164: structural-other +165: table +166: tent +167: textile-other +168: towel +169: tree +170: vegetable +171: wall-brick +172: wall-concrete +173: wall-other +174: wall-panel +175: wall-stone +176: wall-tile +177: wall-wood +178: water-other +179: waterdrops +180: window-blind +181: window-other +182: wood diff --git a/projects/glamm/datasets/utils/utils.py b/projects/glamm/datasets/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d795d1e36a0c15d011a329203ab56102c924efac --- /dev/null +++ b/projects/glamm/datasets/utils/utils.py @@ -0,0 +1,131 @@ +from PIL import Image + + + +def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + +CAPTION_QUESTIONS = [ + 'Could you please give me a detailed description of the image?', + 'Can you provide a thorough description of the this image?', + 'Please provide a thorough description of the this image', + 'Please provide a thorough description of the this image.', + 'Please describe in detail the contents of the image.', + 'Please describe in detail the contents of the image', + 'Could you give a comprehensive explanation of what can be found within this picture?', + 'Could you give me an elaborate explanation of this picture?', + 'Could you provide me with a detailed analysis of this photo?', + 'Could you please give me a detailed description of the image?', + 'Can you provide a thorough description of the this image?', + 'Please describe in detail the contents of the image', + 'Please describe in detail the contents of the image.', + 'Can you give a comprehensive explanation of this photo', + 'Please provide an elaborate explanation of this picture.', + 'Please provide an elaborate explanation of this picture', + 'Could you provide me with a detailed analysis of this photo', +] + +REGION_QUESTIONS = [ + 'Can you provide me with a detailed description of the region in the picture marked by ?', + "I'm curious about the region represented by in the picture. Could you describe it in detail?", + 'What can you tell me about the region indicated by in the image?', + "I'd like to know more about the area in the photo labeled . Can you give me a detailed description?", + 'Could you describe the region shown as in the picture in great detail?', + 'What details can you give me about the region outlined by in the photo?', + 'Please provide me with a comprehensive description of the region marked with in the image.', + 'Can you give me a detailed account of the region labeled as in the picture?', + "I'm interested in learning more about the region represented by in the photo. Can you describe it in detail?", + 'What is the region outlined by in the picture like? Could you give me a detailed description?', + 'Can you provide me with a detailed description of the region in the picture marked by , please?', + "I'm curious about the region represented by in the picture. Could you describe it in detail, please?", + 'What can you tell me about the region indicated by in the image, exactly?', + "I'd like to know more about the area in the photo labeled , please. Can you give me a detailed description?", + 'Could you describe the region shown as in the picture in great detail, please?', + 'What details can you give me about the region outlined by in the photo, please?', + 'Please provide me with a comprehensive description of the region marked with in the image, please.', + 'Can you give me a detailed account of the region labeled as in the picture, please?', + "I'm interested in learning more about the region represented by in the photo. Can you describe it in detail, please?", + 'What is the region outlined by in the picture like, please? Could you give me a detailed description?', +] + +REGION_GROUP_QUESTIONS = [ + 'Could you please give me a detailed description of these areas ?', + 'Can you provide a thorough description of the regions in this image?', + 'Please describe in detail the contents of the boxed areas .', + 'Could you give a comprehensive explanation of what can be found within in the picture?', + 'Could you give me an elaborate explanation of the regions in this picture?', + 'Can you provide a comprehensive description of the areas identified by in this photo?', + 'Help me understand the specific locations labeled in this picture in detail, please.', + 'What is the detailed information about the areas marked by in this image?', + 'Could you provide me with a detailed analysis of the regions designated in this photo?', + 'What are the specific features of the areas marked in this picture that you can describe in detail?', + 'Could you elaborate on the regions identified by in this image?', + 'What can you tell me about the areas labeled in this picture?', + 'Can you provide a thorough analysis of the specific locations designated in this photo?', + 'I am interested in learning more about the regions marked in this image. Can you provide me with more information?', + 'Could you please provide a detailed description of the areas identified by in this photo?', + 'What is the significance of the regions labeled in this picture?', + 'I would like to know more about the specific locations designated in this image. Can you provide me with more information?', + 'Can you provide a detailed breakdown of the regions marked in this photo?', + 'What specific features can you tell me about the areas identified by in this picture?', + 'Could you please provide a comprehensive explanation of the locations labeled in this image?', + 'Can you provide a detailed account of the regions designated in this photo?', + 'I am curious about the areas marked in this picture. Can you provide me with a detailed analysis?', + 'What important details can you tell me about the specific locations identified by in this image?', + 'Could you please provide a detailed description of the regions labeled in this photo?', + 'What can you tell me about the features of the areas designated in this picture?', + 'Can you provide a comprehensive overview of the regions marked in this image?', + 'I would like to know more about the specific locations identified by in this photo. Can you provide me with more information?', + 'What is the detailed information you have on the areas labeled in this picture?', + 'Could you provide me with a thorough analysis of the regions designated in this image?', + 'Can you provide a detailed explanation of the specific locations marked by in this photo?' +] + +GCG_QUESTIONS = [ + 'Could you please give me a detailed description of the image? Please respond with interleaved segmentation masks for the corresponding parts of the answer.', + 'Can you provide a thorough description of the this image? Please output with interleaved segmentation masks for the corresponding phrases.', + 'Please describe in detail the contents of the image. Please respond with interleaved segmentation masks for the corresponding parts of the answer.', + 'Could you give a comprehensive explanation of what can be found within this picture? Please output with interleaved segmentation masks for the corresponding phrases.', + 'Could you give me an elaborate explanation of this picture? Please respond with interleaved segmentation masks for the corresponding phrases.', + 'Could you provide me with a detailed analysis of this photo? Please output with interleaved segmentation masks for the corresponding parts of the answer.', +] + +SEG_QUESTIONS = [ + "Can you segment the {class_name} in this image?", + "Please segment {class_name} in this image.", + "What is {class_name} in this image? Please respond with segmentation mask.", + "What is {class_name} in this image? Please output segmentation mask.", + + "Can you segment the {class_name} in this image", + "Please segment {class_name} in this image", + "What is {class_name} in this image? Please respond with segmentation mask", + "What is {class_name} in this image? Please output segmentation mask", + + "Could you provide a segmentation mask for the {class_name} in this image?", + "Please identify and segment the {class_name} in this image.", + "Where is the {class_name} in this picture? Please respond with a segmentation mask.", + "Can you highlight the {class_name} in this image with a segmentation mask?", + + "Could you provide a segmentation mask for the {class_name} in this image", + "Please identify and segment the {class_name} in this image", + "Where is the {class_name} in this picture? Please respond with a segmentation mask", + "Can you highlight the {class_name} in this image with a segmentation mask", +] + +ANSWER_LIST = [ + "It is [SEG].", + "Sure, [SEG].", + "Sure, it is [SEG].", + "Sure, the segmentation result is [SEG].", + "[SEG].", +] \ No newline at end of file diff --git a/projects/glamm/models/glamm.py b/projects/glamm/models/glamm.py new file mode 100644 index 0000000000000000000000000000000000000000..71d6d317cc92ee43b1bc7054aa5b05fc9459ca4d --- /dev/null +++ b/projects/glamm/models/glamm.py @@ -0,0 +1,183 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from xtuner.registry import BUILDER +from xtuner.model.utils import LoadWoInit, guess_load_checkpoint +from xtuner.model.llava import LLaVAModel + +from mmengine.model import BaseModel +from mmengine import print_log + +from projects.glamm.utils import prepare_inputs_labels_for_multimodal +from projects.glamm.utils import DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + + +class GLaMM(LLaVAModel): + def __init__(self, + use_activation_checkpointing=True, + tokenizer=None, + grounding_encoder=None, + region_encoder=None, + loss_mask=None, + loss_dice=None, + *args, **kwargs): + super(GLaMM, self).__init__( + *args, use_activation_checkpointing=use_activation_checkpointing, **kwargs) + + self.use_activation_checkpointing = use_activation_checkpointing + self.tokenizer = BUILDER.build(tokenizer) + self._add_special_tokens() + + self.grounding_encoder = BUILDER.build(grounding_encoder) + self.grounding_encoder.requires_grad_(False) + self.grounding_encoder.mask_decoder.requires_grad_(True) + + if region_encoder is not None: + self.region_encoder = BUILDER.build(region_encoder) + + in_dim = self.config.hidden_size + out_dim = self.grounding_encoder.mask_decoder.transformer_dim + self.text_hidden_fcs = nn.Sequential( + nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True), + nn.Linear(in_dim, out_dim), nn.Dropout(0.0) + ) + + self.loss_mask = BUILDER.build(loss_mask) + self.loss_dice = BUILDER.build(loss_dice) + + def _add_special_tokens(self): + reg_tokens = ['', '', '', ''] + segmentation_tokens = ['[SEG]'] + phrase_tokens = ['

', '

'] + special_tokens = reg_tokens + segmentation_tokens + phrase_tokens + num_new_tokens = self.tokenizer.add_tokens( + special_tokens, special_tokens=True) + if num_new_tokens > 0: + self.llm.resize_token_embeddings(len(self.tokenizer)) + input_embeddings = self.llm.get_input_embeddings().weight.data + output_embeddings = self.llm.get_output_embeddings().weight.data + + input_embeddings_avg = input_embeddings[:-num_new_tokens].mean( + dim=0, keepdim=True) + output_embeddings_avg = output_embeddings[:-num_new_tokens].mean( + dim=0, keepdim=True) + + input_embeddings[-num_new_tokens:] = input_embeddings_avg + output_embeddings[-num_new_tokens:] = output_embeddings_avg + + self.seg_token_idx = self.tokenizer("[SEG]", add_special_tokens=False).input_ids[0] + self.bop_token_idx = self.tokenizer("

", add_special_tokens=False).input_ids[0] + self.eop_token_idx = self.tokenizer("

", add_special_tokens=False).input_ids[0] + self.bbox_token_idx = self.tokenizer("", add_special_tokens=False).input_ids[0] + + if self.use_activation_checkpointing or self.use_llm_lora or not self.freeze_llm: + self.llm.enable_input_require_grads() + + def forward(self, data, data_samples=None, mode='loss'): + if 'pixel_values' in data: + visual_outputs = self.visual_encoder( + data['pixel_values'].to(self.visual_encoder.dtype), + output_hidden_states=True) + pixel_values = self.projector( + visual_outputs.hidden_states[self.visual_select_layer][:, 1:]) + data['pixel_values'] = pixel_values + bboxes = data.pop('bboxes', None) + if bboxes is not None: + select_hidden_state_layer = -2 + num_level_reg_features = 4 + mlvl_reg_features = visual_outputs.hidden_states[select_hidden_state_layer::-3] + mlvl_reg_features = mlvl_reg_features[::-1] + mlvl_reg_features = mlvl_reg_features[-num_level_reg_features:] + mlvl_reg_features = [item[:, 1:] for item in mlvl_reg_features] + mlvl_reg_features = self.region_encoder(mlvl_reg_features, bboxes) + data = prepare_inputs_labels_for_multimodal(llm=self.llm, **data) + + if bboxes is not None: + inputs_embeds = data['inputs_embeds'] + for i, reg_feat in enumerate(mlvl_reg_features): + reg_mask = data['new_input_ids'][i] == self.bbox_token_idx + inputs_embeds[i][reg_mask] = reg_feat + data['inputs_embeds'] = inputs_embeds + + if mode == 'loss': + return self.compute_loss(data, data_samples) + elif mode == 'predict': + return self.predict(data, data_samples) + elif mode == 'tensor': + return self._forward(data, data_samples) + else: + raise NotImplementedError + + def compute_loss(self, data, data_samples=None): + g_pixel_values = data.pop('g_pixel_values', None) + gt_masks = data.pop('masks', None) + new_input_ids = data.pop('new_input_ids', None) + + output = self.llm(output_hidden_states=True, **data) + if gt_masks is None: + return {'llm_loss': output.loss} + + resize_list = [pixel.shape[-2:] for pixel in g_pixel_values] + ori_size_list = [mask.shape[-2:] for mask in gt_masks] + g_pixel_values = torch.stack([ + self.grounding_encoder.preprocess(pixel) for pixel in g_pixel_values + ]) + image_embeddings = self.grounding_encoder.image_encoder(g_pixel_values) + + seg_token_mask = new_input_ids == self.seg_token_idx + hidden_states = output.hidden_states + hidden_states = self.text_hidden_fcs(hidden_states[-1]) + pred_embeddings = hidden_states[seg_token_mask] + + seg_token_counts = seg_token_mask.int().sum(-1) + pred_embeddings_list = torch.split(pred_embeddings, seg_token_counts.tolist(), dim=0) + + pred_masks = self._generate_and_postprocess_masks( + pred_embeddings_list, image_embeddings, resize_list, ori_size_list) + + bs = len(pred_masks) + loss_mask, loss_dice = 0, 0 + for i in range(bs): + pred_mask = pred_masks[i] + gt_mask = gt_masks[i] + + sam_loss_mask = self.loss_mask(pred_mask, gt_mask) + sam_loss_dice = self.loss_dice(pred_mask, gt_mask) + accuracy = torch.eq((pred_mask.sigmoid() > 0.5), gt_mask).to(pred_mask).mean() + loss_mask += sam_loss_mask + loss_dice += sam_loss_dice + + + loss_dict = { + 'loss_mask': loss_mask / bs, + 'loss_dice': loss_dice / bs, + 'accuracy': accuracy, + 'llm_loss': output.loss, + } + return loss_dict + + + def _generate_and_postprocess_masks(self, pred_embeddings, image_embeddings, resize_list=None, orig_size_list=None, infer=False): + pred_masks = [] + for i, pred_embedding in enumerate(pred_embeddings): + sparse_embeddings, dense_embeddings = self.grounding_encoder.prompt_encoder( + points=None, boxes=None, masks=None, text_embeds=pred_embedding.unsqueeze(1) + ) + sparse_embeddings = sparse_embeddings.to(pred_embedding.dtype) + low_res_masks, _ = self.grounding_encoder.mask_decoder( + image_embeddings=image_embeddings[i].unsqueeze(0), + image_pe=self.grounding_encoder.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, + multimask_output=False, ) + + pred_mask = self.grounding_encoder.postprocess_masks( + low_res_masks, input_size=resize_list[i], original_size=orig_size_list[i], ) + pred_masks.append(pred_mask[:, 0]) + return pred_masks + + def predict(self, data): + pass + + def _forward(self, data, dta_samples=None): + outputs = self.llm(**data) + return outputs diff --git a/projects/glamm/models/region_encoder.py b/projects/glamm/models/region_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5dd8dc103f30a6ebe46b69e6e7ceb36fe16364fd --- /dev/null +++ b/projects/glamm/models/region_encoder.py @@ -0,0 +1,359 @@ +from abc import ABCMeta, abstractmethod +from typing import List, Optional, Tuple +from torch import Tensor + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv import ops +from mmcv.cnn import ConvModule, Linear +from mmengine.model import BaseModule + +class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): + """Base class for RoI extractor. + + Args: + roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and + arguments. + out_channels (int): Output channels of RoI layers. + featmap_strides (list[int]): Strides of input feature maps. + init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ + dict], optional): Initialization config dict. Defaults to None. + """ + + def __init__(self, + roi_layer, + out_channels: int, + featmap_strides: List[int], + init_cfg=None) -> None: + super().__init__(init_cfg=init_cfg) + self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) + self.out_channels = out_channels + self.featmap_strides = featmap_strides + + @property + def num_inputs(self) -> int: + """int: Number of input feature maps.""" + return len(self.featmap_strides) + + def build_roi_layers(self, layer_cfg, + featmap_strides: List[int]) -> nn.ModuleList: + """Build RoI operator to extract feature from each level feature map. + + Args: + layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and + config RoI layer operation. Options are modules under + ``mmcv/ops`` such as ``RoIAlign``. + featmap_strides (list[int]): The stride of input feature map w.r.t + to the original image size, which would be used to scale RoI + coordinate (original image coordinate system) to feature + coordinate system. + + Returns: + :obj:`nn.ModuleList`: The RoI extractor modules for each level + feature map. + """ + + cfg = layer_cfg.copy() + layer_type = cfg.pop('type') + if isinstance(layer_type, str): + assert hasattr(ops, layer_type) + layer_cls = getattr(ops, layer_type) + else: + layer_cls = layer_type + roi_layers = nn.ModuleList( + [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) + return roi_layers + + def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor: + """Scale RoI coordinates by scale factor. + + Args: + rois (Tensor): RoI (Region of Interest), shape (n, 5) + scale_factor (float): Scale factor that RoI will be multiplied by. + + Returns: + Tensor: Scaled RoI. + """ + + cx = (rois[:, 1] + rois[:, 3]) * 0.5 + cy = (rois[:, 2] + rois[:, 4]) * 0.5 + w = rois[:, 3] - rois[:, 1] + h = rois[:, 4] - rois[:, 2] + new_w = w * scale_factor + new_h = h * scale_factor + x1 = cx - new_w * 0.5 + x2 = cx + new_w * 0.5 + y1 = cy - new_h * 0.5 + y2 = cy + new_h * 0.5 + new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) + return new_rois + + @abstractmethod + def forward(self, + feats: Tuple[Tensor], + rois: Tensor, + roi_scale_factor: Optional[float] = None) -> Tensor: + """Extractor ROI feats. + + Args: + feats (Tuple[Tensor]): Multi-scale features. + rois (Tensor): RoIs with the shape (n, 5) where the first + column indicates batch id of each RoI. + roi_scale_factor (Optional[float]): RoI scale factor. + Defaults to None. + + Returns: + Tensor: RoI feature. + """ + pass + + +class MLVLFuseModule(nn.Module): + def __init__(self, input_dims=1024, embed_dims=1024, num_levels=3, num_fuse=4): + super(MLVLFuseModule, self).__init__() + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_fuse = num_fuse + self.input_dims = input_dims + self.shuffle_channles = embed_dims // 4 + + # contains the tuple of level indices that will do the interaction + self.fuse_lvl_list = [] + num_levels = self.num_levels + for lvl in range(num_levels): + top_lvl = min(lvl + 1, num_levels - 1) + dow_lvl = max(lvl - 1, 0) + tar_lvl = lvl + self.fuse_lvl_list.append((tar_lvl, top_lvl, dow_lvl)) + + self.remain_chs = self.embed_dims - self.shuffle_channles * 2 + self._init_layers() + + def generate_coordinate(self, featmap_sizes, device='cuda'): + + x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) + y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) + y, x = torch.meshgrid(y_range, x_range) + y = y.expand([featmap_sizes[0], 1, -1, -1]) + x = x.expand([featmap_sizes[0], 1, -1, -1]) + coord_feat = torch.cat([x, y], 1) + + return coord_feat + + def _init_layers(self): + self.input_conv = nn.ModuleList([nn.Conv2d(self.input_dims + 2, + self.embed_dims, 1) + for _ in range(self.num_levels)]) + self.fuse_convs = nn.ModuleList() + for i in range(self.num_fuse): + self.fuse_convs.append( + ConvModule(self.embed_dims, + self.embed_dims, + 3, + stride=1, + padding=3 // 2, + conv_cfg=None, + norm_cfg=dict(type='GN', + num_groups=64, + requires_grad=True) + )) + + def init_weights(self): + pass + + def _single_shuffle(self, inputs, conv_module): + if not isinstance(conv_module, (nn.ModuleList, list)): + conv_module = [conv_module] + for single_conv_m in conv_module: + fused_inputs = [] + for fuse_lvl_tuple in self.fuse_lvl_list: + tar_lvl, top_lvl, dow_lvl = fuse_lvl_tuple + tar_input = inputs[tar_lvl] + top_input = inputs[top_lvl] + down_input = inputs[dow_lvl] + remain = tar_input[:, :self.remain_chs] + from_top = top_input[:, self.remain_chs:][:, self.shuffle_channles:] + from_top = F.interpolate(from_top.to(torch.float32), + size=tar_input.shape[-2:], + mode='bilinear', + align_corners=True) + from_down = down_input[:, self.remain_chs:][:, :self.shuffle_channles] + from_down = F.interpolate(from_down.to(torch.float32), + size=tar_input.shape[-2:], + mode='bilinear', + align_corners=True) + fused_inputs.append( + torch.cat([remain, from_top.to(remain.dtype), from_down.to(remain.dtype)], dim=1)) + fused_inputs = [single_conv_m(item) for item in fused_inputs] + inputs = fused_inputs + return inputs + + def forward(self, inputs, ): + feat_size = [item.shape for item in inputs] + new_inputs = [] + for feat, single_feat_size in zip(inputs, feat_size): + coord_feat = self.generate_coordinate( + single_feat_size, device=inputs[0].device) + # feat = torch.cat([feat, coord_feat], dim=1) + feat = torch.cat([feat, coord_feat.to(feat.dtype)], dim=1) + new_inputs.append(feat) + inputs = new_inputs + + inputs = [self.input_conv[lvl](item) + for lvl, item in enumerate(inputs)] + + for conv_m in self.fuse_convs: + inputs = self._single_shuffle(inputs, [conv_m]) + return inputs + + +class MlvlRoIExtractor(BaseRoIExtractor): + def __init__(self, + roi_layer, + out_channels, + featmap_strides, + embed_dims=1024, + stride=1, + norm_init=True, + fuse_level=3, + finest_scale=56, + init_cfg=None): + super(MlvlRoIExtractor, self).__init__(roi_layer, out_channels, + featmap_strides, init_cfg) + self.embed_dims = embed_dims + self.finest_scale = finest_scale + self.fuse_level = fuse_level + self.norm_init = norm_init + + self.pconvs = nn.ModuleList( + nn.Conv2d(self.embed_dims, self.embed_dims, 3, stride=1, padding=1) + for _ in range(self.fuse_level)) + self.pos_embedd = nn.Sequential( + nn.Linear(4, 256), + nn.ReLU(inplace=True), + nn.LayerNorm(256), + nn.Linear(256, 1024), + nn.ReLU(inplace=True), + nn.LayerNorm(1024), + ) + self.updims = nn.Linear(1024, 4096) + + self.flatten_linear = nn.Linear( + self.embed_dims * self.roi_layers[0].output_size[0] ** 2, 1024) + + self.norm_init_weights() + + # self.dtype = torch.float32 + def norm_init_weights(self): + pass + + def forward(self, feats, rois, roi_scale_factor=None): + """Forward function.""" + num_imgs = len(rois) + # feats = [item for item in feats] + batch_rois = torch.cat(rois, dim=0).to(feats[0].dtype) + pos_embedd = self.pos_embedd(batch_rois) + out_size = self.roi_layers[0].output_size + num_levels = len(feats) + if feats[0].dim() == 3: + h = w = int(math.sqrt(feats[0].shape[1])) + assert h == 16 + assert w == 16 + b, c = feats[0].shape[0], feats[0].shape[-1] + feats = [item.reshape(b, h, w, c).permute(0, 3, 1, 2) + for item in feats] + new_rois = [] + for img_id, single_img_roi in enumerate(rois): + # rescale to original img scale + single_img_roi = single_img_roi * 224 + + roi_img_id = single_img_roi.new_ones(len(single_img_roi)) * img_id + single_img_roi = torch.cat( + [roi_img_id[:, None], single_img_roi], dim=1) + new_rois.append(single_img_roi) + rois = torch.cat(new_rois) + + roi_feats = feats[0].new_zeros(self.fuse_level, + rois.size(0), self.out_channels, *out_size) + + for i in range(num_levels): + if len(rois) > 0: + rois_ = rois + ori_dtype = feats[i].dtype + roi_feats_t = self.roi_layers[i](feats[i].to( + torch.float32), rois_.to(torch.float32)) + + roi_feats[i] = roi_feats_t.to(ori_dtype) + + else: + roi_feats += sum( + x.view(-1)[0] + for x in self.parameters()) * 0. + feats[i].sum() * 0. + + fuse_roi_feats = [] + for i in range(self.fuse_level): + fuse_roi_feats.append(self.pconvs[i](roi_feats[i])) + + fuse_roi_feats = sum(fuse_roi_feats) + fuse_roi_feats = F.relu(fuse_roi_feats) + fuse_roi_feats = fuse_roi_feats.flatten(1, -1) + fuse_roi_feats = self.flatten_linear(fuse_roi_feats) + fuse_roi_feats = fuse_roi_feats + pos_embedd + fuse_roi_feats = self.updims(fuse_roi_feats) + query_feats = [] + for i in range(num_imgs): + mask = rois[:, 0] == i + query_feats.append(fuse_roi_feats[mask]) + + return query_feats + + +class MLVLROIQueryModule(nn.Module): + def __init__(self, embed_dims=1024, out_dims=4096, + num_levels=3): + super(MLVLROIQueryModule, self).__init__() + self.mlvl_fuse = MLVLFuseModule(input_dims=embed_dims, + embed_dims=embed_dims, + num_levels=num_levels, + num_fuse=5) + strids = [14 / 8, 14 / 4, 14 / 2, 14] + assert len(strids) == num_levels + bbox_roi_extractor = dict(roi_layer=dict(type='RoIAlign', + output_size=14, + sampling_ratio=2), + out_channels=embed_dims, + embed_dims=embed_dims, + fuse_level=num_levels, + featmap_strides=strids) + + self.roi_align = MlvlRoIExtractor(**bbox_roi_extractor) + + def forward(self, mlvl_feats, bboxes): + if mlvl_feats[0].dim() == 3: + h = w = int(math.sqrt(mlvl_feats[0].shape[1])) + assert h == 24 + assert w == 24 + b, c = mlvl_feats[0].shape[0], mlvl_feats[0].shape[-1] + mlvl_feats = [item.reshape(b, h, w, c).permute(0, 3, 1, 2) for item in mlvl_feats] + base_shape = mlvl_feats[0].shape[-2:] + num_level = len(mlvl_feats) + to_shape = [(base_shape[0] * 2 ** level, base_shape[1] * 2 ** level) + for level in range(num_level)] + to_shape = to_shape[::-1] + for level in range(num_level): + feat = mlvl_feats[level] + shape = to_shape[level] + # feat = feat + # mlvl_feats[level] = F.interpolate(feat, size=shape, mode='bilinear', align_corners=True) + # todo: temporary fix for "upsample_bilinear2d_out_frame" not implemented for 'BFloat16' + feat = feat.to(torch.float32) + mlvl_feats[level] = F.interpolate( + feat, size=shape, mode='bilinear', align_corners=True) + mlvl_feats[level] = mlvl_feats[level].to(torch.bfloat16) + + mlvl_feats = self.mlvl_fuse(mlvl_feats) + + return self.roi_align(mlvl_feats, bboxes) diff --git a/projects/glamm/utils.py b/projects/glamm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bd89b03f7ec0700038be4d77aec2822da3686a24 --- /dev/null +++ b/projects/glamm/utils.py @@ -0,0 +1,280 @@ +from enum import Enum + +import numpy as np +import torch +import torch.distributed as dist + +from transformers import PreTrainedModel +from typing import List, Optional + + +IGNORE_INDEX = -100 +IMAGE_TOKEN_INDEX = -200 + +DEFAULT_EOS_TOKEN = '' +DEFAULT_BOS_TOKEN = '' +DEFAULT_UNK_TOKEN = '' + +DEFAULT_IMAGE_TOKEN = "" +DEFAULT_IMAGE_PATCH_TOKEN = "" +DEFAULT_IM_START_TOKEN = "" +DEFAULT_IM_END_TOKEN = "" +DEFAULT_BBOX_TOKEN = "" + + + +# Modified from https://github.com/haotian-liu/LLaVA/blob/82fc5e0e5f4393a4c26851fa32c69ab37ea3b146/llava/model/llava_arch.py#L99 # noqa: E501 +def prepare_inputs_labels_for_multimodal( + llm: PreTrainedModel, + input_ids: torch.LongTensor = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + labels: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + **kwargs): + if pixel_values is None: + kwargs.update({ + 'input_ids': input_ids, + 'position_ids': position_ids, + 'attention_mask': attention_mask, + 'past_key_values': past_key_values, + 'inputs_embeds': None, + 'labels': labels + }) + return kwargs + + _labels = labels + _position_ids = position_ids + _attention_mask = attention_mask + if attention_mask is None: + attention_mask = torch.ones_like(input_ids, dtype=torch.bool) + else: + attention_mask = attention_mask.bool() + if position_ids is None: + position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) + if labels is None: + labels = torch.full_like(input_ids, IGNORE_INDEX) + + # remove the padding using attention_mask -- TODO: double check + input_ids = [ + cur_input_ids[cur_attention_mask] + for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask) + ] + labels = [ + cur_labels[cur_attention_mask] + for cur_labels, cur_attention_mask in zip(labels, attention_mask) + ] + + new_inputs_embeds = [] + new_labels = [] + new_input_ids = [] + cur_image_idx = 0 + for batch_idx, cur_input_ids in enumerate(input_ids): + num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() + if num_images == 0: + cur_pixel_values = pixel_values[cur_image_idx] + cur_inputs_embeds_1 = llm.get_input_embeddings()(cur_input_ids) + cur_inputs_embeds = torch.cat([cur_inputs_embeds_1, cur_pixel_values[0:0]], dim=0) + new_inputs_embeds.append(cur_inputs_embeds) + new_labels.append(labels[batch_idx]) + new_input_ids.append(cur_input_ids) + cur_image_idx += 1 + continue + + image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] + cur_input_ids_noim = [] + cur_labels = labels[batch_idx] + cur_labels_noim = [] + for i in range(len(image_token_indices) - 1): + cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + 1:image_token_indices[i + 1]]) + cur_labels_noim.append(cur_labels[image_token_indices[i] + 1:image_token_indices[i + 1]]) + + split_sizes = [x.shape[0] for x in cur_labels_noim] + cur_inputs_embeds = llm.get_input_embeddings()(torch.cat(cur_input_ids_noim)) + cur_inputs_embeds_no_im = torch.split(cur_inputs_embeds, split_sizes, dim=0) + cur_new_inputs_embeds = [] + cur_new_labels = [] + cur_new_input_ids = [] + + for i in range(num_images + 1): + cur_new_inputs_embeds.append(cur_inputs_embeds_no_im[i]) + cur_new_labels.append(cur_labels_noim[i]) + cur_new_input_ids.append(cur_input_ids_noim[i]) + if i < num_images: + cur_pixel_values = pixel_values[cur_image_idx] + cur_image_idx += 1 + cur_new_inputs_embeds.append(cur_pixel_values) + cur_new_labels.append(torch.full((cur_pixel_values.shape[0], ), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) + cur_new_input_ids.append(torch.full((cur_pixel_values.shape[0], ), IMAGE_TOKEN_INDEX, device=cur_input_ids.device, dtype=cur_input_ids.dtype)) + + cur_new_inputs_embeds = torch.cat(cur_new_inputs_embeds) + cur_new_labels = torch.cat(cur_new_labels) + cur_new_input_ids = torch.cat(cur_new_input_ids) + + new_inputs_embeds.append(cur_new_inputs_embeds) + new_labels.append(cur_new_labels) + new_input_ids.append(cur_new_input_ids) + + # Combine them + max_len = max(x.shape[0] for x in new_inputs_embeds) + batch_size = len(new_inputs_embeds) + + new_inputs_embeds_padded = [] + new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device) + new_input_ids_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_input_ids[0].dtype, device=new_input_ids[0].device) + attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) + position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) + + for i, (cur_new_embed, cur_new_labels, cur_new_input_ids) in enumerate(zip(new_inputs_embeds, new_labels, new_input_ids)): + cur_len = cur_new_embed.shape[0] + new_inputs_embeds_padded.append(torch.cat((cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)) + if cur_len > 0: + new_labels_padded[i, :cur_len] = cur_new_labels + new_input_ids_padded[i, :cur_len] = cur_new_input_ids + attention_mask[i, :cur_len] = True + position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) + + new_inputs_embeds = torch.stack(new_inputs_embeds_padded, dim=0) + + if _labels is None: + new_labels = None + else: + new_labels = new_labels_padded + + new_input_ids = new_input_ids_padded + + if _attention_mask is None: + attention_mask = None + else: + attention_mask = attention_mask.to(dtype=_attention_mask.dtype) + + if _position_ids is None: + position_ids = None + + kwargs.update({ + 'input_ids': None, + 'position_ids': position_ids, + 'attention_mask': attention_mask, + 'past_key_values': past_key_values, + 'inputs_embeds': new_inputs_embeds, + 'labels': new_labels, + 'new_input_ids': new_input_ids + }) + return kwargs + +class Summary(Enum): + NONE = 0 + AVERAGE = 1 + SUM = 2 + COUNT = 3 + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self, name, fmt=":f", summary_type=Summary.AVERAGE): + self.name = name + self.fmt = fmt + self.summary_type = summary_type + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def all_reduce(self): + device = "cuda" if torch.cuda.is_available() else "cpu" + if isinstance(self.sum, np.ndarray): + total = torch.tensor( + self.sum.tolist() + + [ + self.count, + ], + dtype=torch.float32, + device=device, + ) + else: + total = torch.tensor( + [self.sum, self.count], dtype=torch.float32, device=device + ) + + dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) + if total.shape[0] > 2: + self.sum, self.count = total[:-1].cpu().numpy(), total[-1].cpu().item() + else: + self.sum, self.count = total.tolist() + self.avg = self.sum / (self.count + 1e-5) + + def __str__(self): + fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})" + return fmtstr.format(**self.__dict__) + + def summary(self): + fmtstr = "" + if self.summary_type is Summary.NONE: + fmtstr = "" + elif self.summary_type is Summary.AVERAGE: + fmtstr = "{name} {avg:.3f}" + elif self.summary_type is Summary.SUM: + fmtstr = "{name} {sum:.3f}" + elif self.summary_type is Summary.COUNT: + fmtstr = "{name} {count:.3f}" + else: + raise ValueError("invalid summary type %r" % self.summary_type) + + return fmtstr.format(**self.__dict__) + + +def intersectionAndUnionGPU(output, target, K, ignore_index=255): + # 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1. + assert output.dim() in [1, 2, 3] + assert output.shape == target.shape + output = output.view(-1) + target = target.view(-1) + output[target == ignore_index] = ignore_index + intersection = output[output == target] + area_intersection = torch.histc(intersection, bins=K, min=0, max=K - 1) + area_output = torch.histc(output, bins=K, min=0, max=K - 1) + area_target = torch.histc(target, bins=K, min=0, max=K - 1) + area_union = area_output + area_target - area_intersection + return area_intersection, area_union, area_target + + +class ProgressMeter(object): + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print("\t".join(entries)) + + def display_summary(self): + entries = [" *"] + entries += [meter.summary() for meter in self.meters] + print(" ".join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = "{:" + str(num_digits) + "d}" + return "[" + fmt + "/" + fmt.format(num_batches) + "]" + + +def dict_to_cuda(input_dict): + for k, v in input_dict.items(): + if isinstance(input_dict[k], torch.Tensor): + input_dict[k] = v.cuda(non_blocking=True) + elif isinstance(v, list) and len(v) > 0: + input_dict[k] = [ele.cuda(non_blocking=True) if isinstance(ele, torch.Tensor) else ele for ele in v] + return input_dict diff --git a/projects/llava_sam2/configs/sa2va_4b.py b/projects/llava_sam2/configs/sa2va_4b.py new file mode 100644 index 0000000000000000000000000000000000000000..e0d12cb2a7fbb0af130e57dc7acdd8ff8513ac70 --- /dev/null +++ b/projects/llava_sam2/configs/sa2va_4b.py @@ -0,0 +1,548 @@ +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import AutoTokenizer + +from xtuner.dataset import ConcatDataset +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine.hooks import DatasetInfoHook +from xtuner.engine.runner import TrainLoop +from xtuner.utils import PROMPT_TEMPLATE +from xtuner.dataset.map_fns import template_map_fn_factory + +from third_parts.mmdet.models.losses import DiceLoss, CrossEntropyLoss +from peft import LoraConfig + +from projects.llava_sam2.models.internvl import InternVL_Slowfast + +from projects.llava_sam2.models import VideoLLaVASAMModel, SAM2TrainRunner, VideoLLaVASAMModel_zero3 +from projects.llava_sam2.datasets import VideoReVOSDataset, VideoMeVISDataset, VideoRefYoutubeVOSDataset, video_lisa_collate_fn, VideoSAM2Dataset +from projects.llava_sam2.datasets import VideoChatUniViDataset +from projects.llava_sam2.datasets import RefCOCOgGCGDataset, OpenPsgGCGDataset, FlickrGCGDataset, GranDfGCGDataset, OspreyDataset, OspreyDescriptionDataset, OspreyShortDescriptionDataset +from projects.llava_sam2.datasets import LLaVADataset +from projects.llava_sam2.datasets import ReferSegmDataset +from projects.llava_sam2.models.preprocess.image_resize import DirectResize + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +path = './pretrained/InternVL2_5-4B' +pretrained_pth = None + +# Data +prompt_template = PROMPT_TEMPLATE.phi3_chat +max_length = 8192 + +# Scheduler & Optimizer +batch_size = 2 # per_device +accumulative_counts = 4 +dataloader_num_workers = 4 +max_epochs = 1 +optim_type = AdamW +# official 1024 -> 4e-5 +# lr = 1e-6 +lr = 4e-5 +betas = (0.9, 0.999) +weight_decay = 0.05 +max_norm = 1 # grad clip +warmup_ratio = 0.05 + +# Save +save_steps = 1000 +save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited) + +special_tokens = ['[SEG]', '

', '

', '', ''] + +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=path, + trust_remote_code=True, + padding_side='right') + +extra_image_processor = dict( + type=DirectResize, + target_length=1024, +) +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +model = dict( + type=VideoLLaVASAMModel_zero3, + special_tokens=special_tokens, + frozen_sam2_decoder=False, + mllm=dict( + type=InternVL_Slowfast, + model_path=path, + freeze_llm=True, + freeze_visual_encoder=True, + llm_lora=dict( + type=LoraConfig, + r=128, + lora_alpha=256, + lora_dropout=0.05, + bias='none', + task_type='CAUSAL_LM'), + special_tokens=special_tokens, + ), + tokenizer=tokenizer, + grounding_encoder=dict( + type=SAM2TrainRunner, + ), + loss_mask=dict( + type=CrossEntropyLoss, + use_sigmoid=True, + reduction='mean', + loss_weight=2.0), + loss_dice=dict( + type=DiceLoss, + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=0.5), + pretrained_pth=pretrained_pth, + loss_sample_points=True, + # loss_sample_points=False, + bs=batch_size, +) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### + + +VIDEO_DATAS = './data/video_datas/' +IMG_DATAS = './data/image_datas/' + +############### video res +data_root_revos = './data/video_datas/revos/' +video_revos_image_folder = data_root_revos +video_revos_expression_file = data_root_revos + 'meta_expressions_train_.json' +video_revos_mask_file = data_root_revos + 'mask_dict.json' + +data_root_mevis = './data/video_datas/mevis/train/' +video_mevis_image_folder = data_root_mevis + 'JPEGImages' +video_mevis_expression_file = data_root_mevis + 'meta_expressions.json' +video_mevis_mask_file = data_root_mevis + 'mask_dict.json' + +data_root_refytvos = './data/video_datas/rvos/' +video_refytvos_image_folder = data_root_refytvos + 'train/JPEGImages/' +video_refytvos_expression_file = data_root_refytvos + 'meta_expressions/train/meta_expressions.json' +video_refytvos_mask_file = data_root_refytvos + 'mask_dict.pkl' + +video_revos_dataset = dict( + type=VideoReVOSDataset, + image_folder=video_revos_image_folder, + expression_file=video_revos_expression_file, + mask_file=video_revos_mask_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=10, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + sampled_frames=5, +) + +video_mevis_dataset = dict( + type=VideoMeVISDataset, + image_folder=video_mevis_image_folder, + expression_file=video_mevis_expression_file, + mask_file=video_mevis_mask_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=4, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + sampled_frames=5, +) + +video_refytvos_dataset = dict( + type=VideoRefYoutubeVOSDataset, + image_folder=video_refytvos_image_folder, + expression_file=video_refytvos_expression_file, + mask_file=video_refytvos_mask_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=4, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + sampled_frames=5, +) + +################### Video chat +data_root_video_chatunivi = VIDEO_DATAS + 'video_vlm/video_chat/' +video_chatunivi_image_folder = data_root_video_chatunivi + 'Activity_Videos/' +video_chatunivi_json_file = data_root_video_chatunivi+ 'video_chat.json' + +video_qa_dataset = dict( + type=VideoChatUniViDataset, + image_folder=video_chatunivi_image_folder, + json_file=video_chatunivi_json_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=1, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + sampled_frames=5, +) + +################## image chat +llava_vqa_dataset = dict( + type=LLaVADataset, + tokenizer=tokenizer, + data_path='data/llava_data/LLaVA-Instruct-150K/llava_v1_5_mix665k.json', + prompt_template=prompt_template, + special_tokens=special_tokens, + image_folder='data/llava_data/llava_images/', +) + +################## image res +refcoco_segm_dataset=dict( + type=ReferSegmDataset, + tokenizer=tokenizer, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + data_root='data/ref_seg/refcoco', + data_prefix=dict(img_path='coco2014/train2014/'), + ann_file='instances.json', + split_file='refs(unc).p', + prompt_template=prompt_template, + num_classes_per_sample=5, + max_length=max_length, +) +refcoco_plus_segm_dataset=dict( + type=ReferSegmDataset, + tokenizer=tokenizer, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + data_root='data/ref_seg/refcoco+', + data_prefix=dict(img_path='coco2014/train2014/'), + ann_file='instances.json', + split_file='refs(unc).p', + prompt_template=prompt_template, + num_classes_per_sample=5, + max_length=max_length, +) +refcocog_segm_dataset=dict( + type=ReferSegmDataset, + tokenizer=tokenizer, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + data_root='data/ref_seg/refcocog', + data_prefix=dict(img_path='coco2014/train2014/'), + ann_file='instances.json', + split_file='refs(umd).p', + prompt_template=prompt_template, + num_classes_per_sample=5, + max_length=max_length, +) + +# image gcg datas +glamm_data_root = './data/glamm_data/' + +refcocog_image_path = glamm_data_root + 'images/coco2014/train2014/' +refcocog_ann_file = glamm_data_root + 'annotations/RefCOCOg_GCG_train.json' + +grandf_image_path = glamm_data_root + 'images/grandf/train/' +grandf_ann_file = glamm_data_root + 'annotations/GranDf_HA_GCG_train.json' + +flickr_image_path = glamm_data_root + 'images/flickr30k/Flickr30K/' +flickr_ann_file = glamm_data_root + 'annotations/flickr_mergedGT_GCG_train.json' + +psg_image_path = glamm_data_root + 'images/coco2017/' +psg_ann_file = glamm_data_root + 'annotations/OpenPsgGCG_train.json' + +glamm_refcocog_dataset = dict( + type=RefCOCOgGCGDataset, + image_folder=refcocog_image_path, + data_path=refcocog_ann_file, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=dict(type=template_map_fn_factory, template=prompt_template), + extra_image_processor=extra_image_processor, + lazy=True, + repeats=1, +) + +glamm_grandf_dataset = dict( + type=GranDfGCGDataset, + data_path=grandf_ann_file, + image_folder=grandf_image_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=dict(type=template_map_fn_factory, template=prompt_template), + extra_image_processor=extra_image_processor, + lazy=True, + repeats=10, +) + +glamm_psg_dataset = dict( + type=OpenPsgGCGDataset, + data_path=psg_ann_file, + image_folder=psg_image_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=dict(type=template_map_fn_factory, template=prompt_template), + extra_image_processor=extra_image_processor, + lazy=True, + repeats=1, +) + +glamm_flickr_dataset = dict( + type=FlickrGCGDataset, + data_path=flickr_ann_file, + image_folder=flickr_image_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=dict(type=template_map_fn_factory, template=prompt_template), + extra_image_processor=extra_image_processor, + lazy=True, + repeats=1, +) + +# sam2 data +data_sam2_folder = VIDEO_DATAS + 'segmentation_datasets/sam_v_full/' +data_sam2_expression_file = './whole_pesudo_cap_v3/sam_v_final_v3.json' + +video_sam2_dataset = dict( + type=VideoSAM2Dataset, + sam2_folder=data_sam2_folder, + expression_file=data_sam2_expression_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=4, + special_tokens=special_tokens, + extra_image_processor=extra_image_processor, + sampled_frames=5, + select_number=5, +) + +# osprey +data_osprey_file = VIDEO_DATAS + 'osprey-724k/Osprey-724K/osprey_conversation.json' +data_osprey_image_folders = [ + IMG_DATAS+ 'coco/train2014/', + IMG_DATAS + 'coco/val2014/', + IMG_DATAS + 'coco/train2017/', + IMG_DATAS + 'coco/val2017/', +] + +image_osprey_dataset = dict( + type=OspreyDataset, + image_folder=data_osprey_image_folders, + data_path=data_osprey_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=1, + special_tokens=special_tokens, +) + +data_osprey_detail_description_file = VIDEO_DATAS + 'osprey-724k/Osprey-724K/osprey_detail_description.json' +image_osprey_description_dataset = dict( + type=OspreyDescriptionDataset, + image_folder=data_osprey_image_folders, + data_path=data_osprey_detail_description_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=1, + special_tokens=special_tokens, +) + +data_osprey_short_file = VIDEO_DATAS + 'osprey-724k/Osprey-724K/osprey_short_form.json' +image_osprey_short_dataset = dict( + type=OspreyShortDescriptionDataset, + image_folder=data_osprey_image_folders, + data_path=data_osprey_short_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=1, + special_tokens=special_tokens, +) + +data_osprey_part_file = VIDEO_DATAS + 'osprey-724k/Osprey-724K/osprey_part_level.json' +image_osprey_part_dataset = dict( + type=OspreyDataset, + image_folder=data_osprey_image_folders, + data_path=data_osprey_part_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=1, + special_tokens=special_tokens, +) + +data_osprey_positive_neg_file = VIDEO_DATAS + 'osprey-724k/Osprey-724K/osprey_lvis_positive_negative.json' +image_osprey_positive_neg_dataset = dict( + type=OspreyDataset, + image_folder=data_osprey_image_folders, + data_path=data_osprey_positive_neg_file, + tokenizer=tokenizer, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + lazy=True, + repeats=1, + special_tokens=special_tokens, +) + +train_dataset = dict( + type=ConcatDataset, datasets=[ + # sem seg + # semantic_seg_ade20k_dataset, + # ref seg + refcoco_segm_dataset, refcoco_plus_segm_dataset, refcocog_segm_dataset, + refcoco_segm_dataset, refcoco_plus_segm_dataset, refcocog_segm_dataset, + refcoco_segm_dataset, refcoco_plus_segm_dataset, refcocog_segm_dataset, + refcoco_segm_dataset, refcoco_plus_segm_dataset, refcocog_segm_dataset, + # image qa + llava_vqa_dataset, + # video res + video_mevis_dataset, video_revos_dataset, video_refytvos_dataset, + # video chat + video_qa_dataset, + # sam2 pesudo + video_sam2_dataset, + # gcg data + glamm_psg_dataset, + glamm_grandf_dataset, + glamm_flickr_dataset, + glamm_refcocog_dataset, + # visual prompt + image_osprey_dataset, image_osprey_description_dataset, + image_osprey_part_dataset, image_osprey_short_dataset, + image_osprey_positive_neg_dataset, + ] +) +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=train_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=video_lisa_collate_fn) +) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='bfloat16' +) + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + end=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(type=TrainLoop, max_epochs=max_epochs) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + # dict(type=DatasetInfoHook, tokenizer=tokenizer), +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 10 iterations. + logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per `save_steps`. + checkpoint=dict( + type=CheckpointHook, + save_optimizer=False, + by_epoch=False, + interval=save_steps, + max_keep_ckpts=save_total_limit), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) + +# set log processor +log_processor = dict(by_epoch=False) diff --git a/projects/llava_sam2/datasets/ChatUniVi_Dataset.py b/projects/llava_sam2/datasets/ChatUniVi_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a4dfdf7679279168cea2b27361d93e58b942beb9 --- /dev/null +++ b/projects/llava_sam2/datasets/ChatUniVi_Dataset.py @@ -0,0 +1,389 @@ +import logging +import os +from typing import Literal + +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict, load_from_disk +from mmengine import print_log +from PIL import Image +from torch.utils.data import Dataset +import numpy as np + +from xtuner.registry import BUILDER +from xtuner.dataset.huggingface import build_origin_dataset +import copy +from .encode_fn import video_lisa_encode_fn +import json +import cv2 +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode +from decord import VideoReader, cpu + + +def _get_rawvideo_dec(video_path, select_frames=5): + + if os.path.exists(video_path): + vreader = VideoReader(video_path, ctx=cpu(0)) + elif os.path.exists(video_path.replace('mkv', 'mp4')): + vreader = VideoReader(video_path.replace('mkv', 'mp4'), ctx=cpu(0)) + else: + print(video_path) + raise FileNotFoundError + + fps = vreader.get_avg_fps() + f_start = 0 + f_end = len(vreader) - 1 + num_frames = f_end - f_start + 1 + assert num_frames > 0, f'num_frames: {num_frames}, f_start: {f_start}, f_end: {f_end}, fps: {fps}, video_path: {video_path}' + # T x 3 x H x W + if num_frames <= select_frames: + sample_pos = range(f_start, f_end + 1) + else: + split_point = np.linspace(0, num_frames, num=select_frames+1, dtype=int) + sample_pos = [np.random.randint(split_point[i], split_point[i+1]) for i in range(select_frames)] + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + return patch_images + + +class VideoChatUniViDataset(Dataset): + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + FAST_IMG_CONTEXT_TOKEN = '' + FAST_IMG_START_TOKEN = '' + FAST_IMG_END_TOKEN = '' + + def __init__(self, + image_folder, + json_file, + extra_image_processor=None, + tokenizer=None, + sampled_frames=10, + offline_processed_text_folder=None, + template_map_fn=None, + max_length=2048, + lazy=True, + repeats=1, + special_tokens=None, + use_fast=False, + n_fast_images=50, + fast_pool_size=4, + arch_type: Literal['intern_vl', 'qwen'] = 'intern_vl', + preprocessor=None, + ): + assert lazy is True + self.tokenizer = BUILDER.build(tokenizer) + self.sampled_frames = sampled_frames + assert offline_processed_text_folder or (json_file and tokenizer) + self.lazy = lazy + + self.max_length = max_length + + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if offline_processed_text_folder and json_file: + print_log( + 'Both `offline_processed_text_folder` and ' + '`data_path` are set, and we load dataset from' + '`offline_processed_text_folder` ' + f'({offline_processed_text_folder})', + logger='current', + level=logging.WARNING) + + if offline_processed_text_folder is not None: + raise NotImplementedError + else: + json_datas = self.json_file_preprocess(json_file) + self.json_datas = json_datas + json_data = DatasetDict({'train': HFDataset.from_list(json_datas)}) + if self.lazy: + self.text_data = build_origin_dataset(json_data, 'train') + else: + raise NotImplementedError + + self.image_folder = image_folder + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + + self.arch_type = arch_type + if self.arch_type == 'qwen': + self.IMG_CONTEXT_TOKEN = '<|image_pad|>' + self.IMG_START_TOKEN = '<|vision_start|>' + self.IMG_END_TOKEN = '<|vision_end|>' + elif self.arch_type == 'llava': + self.IMG_CONTEXT_TOKEN = '' + self.IMG_START_TOKEN = '' + self.IMG_END_TOKEN = '' + self.repeats = repeats + + self._system = '' + + self.downsample_ratio = 0.5 + if self.arch_type == 'llava': + self.downsample_ratio = 1 + self.image_size = 448 + if self.arch_type == 'llava': + self.image_size = 336 + patch_size = 14 + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + if self.arch_type == 'qwen': + self.patch_token = 1 + + if preprocessor is None: + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + self.preprocessor = None + else: + self.transformer = None + self.preprocessor = BUILDER.build(preprocessor) + + self.arch_type = arch_type + + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.use_fast = use_fast + self.n_fast_images = n_fast_images + self.fast_pool_size = fast_pool_size + + # for visualization debug + self.save_folder = './work_dirs/video_debug/' + self.cur_number = 0 + + print("Video Chat dataset, include {} items.".format(len(self.text_data))) + + def __len__(self): + return len(self.text_data) * self.repeats + + @property + def modality_length(self): + length_list = [] + for data_dict in self.text_data: + cur_len = 10000 + length_list.append(cur_len) + return length_list + + def real_len(self): + return len(self.text_data) + + def json_file_preprocess(self, json_file): + # prepare expression annotation files + with open(json_file, 'r') as f: + json_datas = json.load(f) + return json_datas + + def dataset_map_fn(self, data_dict, select_k=5): + assert 'video' in data_dict + # video + video_file = data_dict['video'] + video_file = os.path.join(self.image_folder, video_file) + images = _get_rawvideo_dec(video_file, select_frames=select_k) + if self.use_fast: + fast_images = _get_rawvideo_dec(video_file, select_frames=self.n_fast_images) + else: + fast_images = None + + conversation = data_dict['conversations'] + + # prepare text + if self.use_fast: + text_dict = self.prepare_text( + select_k, conversation, num_image_tokens=self.patch_token, + n_fast_images=len(fast_images), + ) + else: + text_dict = self.prepare_text( + select_k, conversation, num_image_tokens=self.patch_token, + ) + + + ret = {'images': images, 'conversation': text_dict['conversation'], 'fast_images': fast_images} + return ret + + def prepare_text(self, n_frames, conversation, num_image_tokens=256, n_fast_images=0): + + if self.use_fast: + fast_frame_token_str = f'{self.FAST_IMG_START_TOKEN}' \ + f'{self.FAST_IMG_CONTEXT_TOKEN * n_fast_images * self.fast_pool_size * self.fast_pool_size}' \ + f'{self.FAST_IMG_END_TOKEN}' + '\n' + else: + fast_frame_token_str = '' + + frame_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + + questions = [] + answers = [] + + for conv in conversation: + if conv['from'] == 'human': + questions.append(conv['value'].replace('', '')) + else: + answers.append(conv['value']) + assert len(questions) == len(answers) + + qa_list = [] + for i, (question, answer) in enumerate(zip(questions, answers)): + if i == 0: + frame_tokens = frame_token_str + '\n' + # frame_tokens = '=' + ' ' + frame_tokens = frame_tokens * n_frames + frame_tokens = frame_tokens.strip() + frame_tokens = fast_frame_token_str + frame_tokens + qa_list.append( + {'from': 'human', 'value': frame_tokens + question} + ) + else: + qa_list.append( + {'from': 'human', 'value': question} + ) + qa_list.append( + {'from': 'gpt', 'value': answer} + ) + + input = '' + conversation = [] + for msg in qa_list: + if msg['from'] == 'human': + input += msg['value'] + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + + # add system information + conversation[0].update({'system': self._system}) + return {'conversation': conversation} + + def __getitem__(self, index): + index = index % self.real_len() + selected_data_dict = copy.deepcopy(self.text_data[index]) + data_dict = self.dataset_map_fn(selected_data_dict, select_k=self.sampled_frames) + + + assert 'images' in data_dict.keys() + if self.use_fast: + assert 'fast_images' in data_dict.keys() + pixel_values = [] + num_video_tokens = None + num_frame_tokens = None + if data_dict.get('images', None) is not None: + frames_files = data_dict['images'] + for frame_image in frames_files: + frame_image = frame_image.convert('RGB') + ori_width, ori_height = frame_image.size + + if self.preprocessor is not None: + pass + else: + frame_image = self.transformer(frame_image) + pixel_values.append(frame_image) + + if self.preprocessor is not None: + if self.arch_type == 'qwen': + _data_dict = self.preprocessor(pixel_values, do_resize=True, size=(self.image_size, self.image_size)) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + _data_dict['image_grid_thw'] = torch.tensor(_data_dict['image_grid_thw'], dtype=torch.int) + num_frame_tokens = int(_data_dict['image_grid_thw'][0].prod() * (self.downsample_ratio ** 2)) + num_frames = _data_dict['image_grid_thw'].shape[0] + num_video_tokens = num_frame_tokens * num_frames + elif self.arch_type == 'llava': + _data_dict = self.preprocessor(pixel_values, do_resize=True, + size=(self.image_size, self.image_size)) + _data_dict['pixel_values'] = np.stack(_data_dict['pixel_values'], axis=0) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + else: + raise NotImplementedError + data_dict.update(_data_dict) + else: + pixel_values = torch.stack(pixel_values, dim=0) # (n_f, 3, h, w) + data_dict['pixel_values'] = pixel_values + else: + data_dict['pixel_values'] = torch.zeros(0, 3, self.image_size, self.image_size) + data_dict['masks'] = None + + if num_video_tokens is not None: + assert self.patch_token == 1 + input_str = data_dict['conversation'][0]['input'] + input_str = input_str.replace(self.IMG_CONTEXT_TOKEN, self.IMG_CONTEXT_TOKEN * num_frame_tokens) + assert input_str.count(self.IMG_CONTEXT_TOKEN) == num_video_tokens + data_dict['conversation'][0]['input'] = input_str + + result = self.template_map_fn(data_dict) + data_dict.update(result) + result = video_lisa_encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + # for fast branch + if self.use_fast: + fast_pixel_values = [] + frames_files = data_dict['fast_images'] + for frame_image in frames_files: + frame_image = frame_image.convert('RGB') + ori_width, ori_height = frame_image.size + + frame_image = self.transformer(frame_image) + fast_pixel_values.append(frame_image) + + fast_pixel_values = torch.stack(fast_pixel_values, dim=0) # (n_f, 3, h, w) + data_dict['fast_pixel_values'] = fast_pixel_values + + + # # for debug + # self.visualization_debug(data_dict) + # if self.cur_number < 10: + # return self[random.randint(0, len(self))] + + data_dict['type'] = 'video' + return data_dict + + def visualization_debug(self, data_dict): + save_folder = os.path.join(self.save_folder, 'sample_{}'.format(self.cur_number)) + if not os.path.exists(save_folder): + os.mkdir(save_folder) + self.cur_number += 1 + + # images + + show_images = [] + + pixel_values = data_dict['pixel_values'] + save_folder_image = os.path.join(save_folder, 'image') + if not os.path.exists(save_folder_image): + os.mkdir(save_folder_image) + for i_image, image_pixel_value in enumerate(pixel_values): + # print(image_pixel_value.shape) + image_pixel_value[0] = image_pixel_value[0] * 0.2686 + image_pixel_value[1] = image_pixel_value[1] * 0.2613 + image_pixel_value[2] = image_pixel_value[2] * 0.2757 + image_pixel_value[0] = image_pixel_value[0] + 0.4814 + image_pixel_value[1] = image_pixel_value[1] + 0.4578 + image_pixel_value[2] = image_pixel_value[2] + 0.4082 + image_pixel_value = image_pixel_value * 255 + image_pixel_value = image_pixel_value.permute(1, 2, 0) + image_pixel_value = image_pixel_value.to(torch.uint8).numpy() + # print(os.path.join(save_folder_image, '{}.jpg'.format(i_image))) + # print(image_pixel_value.shape) + show_images.append(image_pixel_value) + cv2.imwrite(os.path.join(save_folder_image, '{}.jpg'.format(i_image)), image_pixel_value) + + # text + input_text = self.tokenizer.decode(data_dict['input_ids'], skip_special_tokens=False) + with open(os.path.join(save_folder, 'text.json'), 'w') as f: + json.dump([input_text], f) + + return diff --git a/projects/llava_sam2/datasets/GCG_Dataset.py b/projects/llava_sam2/datasets/GCG_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4a45e75200cf21011cc513b6e6182dac7c190297 --- /dev/null +++ b/projects/llava_sam2/datasets/GCG_Dataset.py @@ -0,0 +1,375 @@ +import json +import os + +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict, load_from_disk +from PIL import Image +from torch.utils.data import Dataset +from pycocotools import mask +import numpy as np +import copy + +from xtuner.registry import BUILDER +from xtuner.dataset.huggingface import process_hf_dataset, build_origin_dataset +import torchvision.transforms as T +from xtuner.utils import DEFAULT_IMAGE_TOKEN +from torchvision.transforms.functional import InterpolationMode +from .encode_fn import video_lisa_encode_fn +from .utils import dynamic_preprocess + +from .gcg_process import glamm_openpsg_map_fn, glamm_flickr_map_fn, glamm_granf_map_fn, glamm_refcocog_map_fn + +class GCGDataset(Dataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super().__init__() + assert lazy + self.lazy = lazy + self.max_length = max_length + + json_data = self.json_file_preprocess(data_path) + json_data = DatasetDict({'train': HFDataset.from_list(json_data)}) + self.text_data = build_origin_dataset(json_data, 'train') + + self.image_folder = image_folder + + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + + self.repeats = repeats + + self._system = '' + + self.min_dynamic_patch = 1 + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + self.image_size = 448 + self.use_thumbnail = True + patch_size = 14 + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.single_image_mode = single_image_mode + + def json_file_preprocess(self, data_path): + with open(data_path, 'r') as f: + json_data = json.load(f) + return json_data + + @property + def modality_length(self): + length_list = [] + for data_dict in self.text_data: + if self.lazy: + cur_len = 100 + else: + cur_len = len(data_dict['input_ids']) + if data_dict.get('image', None) is None: + cur_len = -cur_len + length_list.append(cur_len) + return length_list * self.repeats + + def __len__(self): + return len(self.text_data) * self.repeats + + def real_len(self): + return len(self.text_data) + + def decode_mask(self, object_masks, ori_height, ori_width): + binary_masks = [] + for object_mask in object_masks: + binary_mask = np.zeros((ori_height, ori_width), dtype=np.uint8) + for seg in object_mask: + rles = mask.frPyObjects([seg], ori_height, ori_width) + m = mask.decode(rles) + m = m.astype(np.uint8) + binary_mask += m.squeeze() + + binary_masks.append(binary_mask) + if len(binary_masks) == 0: + return None + masks = np.stack(binary_masks, axis=0) + masks = torch.from_numpy(masks) + return masks + + def dataset_map_fn(self, data_dict): + data_dict = glamm_refcocog_map_fn(data_dict) + return data_dict + + def replace_image_str(self, data_dict, image_str): + data_dict['conversation'][0]['input'] = \ + data_dict['conversation'][0]['input'].replace(DEFAULT_IMAGE_TOKEN, image_str) + return data_dict + + def __getitem__(self, index): + + index = index % self.real_len() + data_dict = copy.deepcopy(self.text_data[index]) + + # parse datasets + result = self.dataset_map_fn(data_dict) + data_dict.update(result) + + # process image + image_file = data_dict['image'] + image = Image.open(os.path.join(self.image_folder, + image_file)).convert('RGB') + ori_width, ori_height = image.size + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + data_dict['g_pixel_values'] = g_pixel_values + + if self.single_image_mode: + images = [image] + else: + images = dynamic_preprocess(image, self.min_dynamic_patch, + self.max_dynamic_patch, + self.image_size, self.use_thumbnail) + pixel_values = [self.transformer(image) for image in images] + pixel_values = torch.stack(pixel_values) + data_dict['pixel_values'] = pixel_values + + num_image_tokens = pixel_values.shape[0] * self.patch_token + image_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + + data_dict = self.replace_image_str(data_dict, image_token_str) + + result = self.template_map_fn(data_dict) + data_dict.update(result) + result = video_lisa_encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, + with_image_token=True) + data_dict.update(result) + # process mask + data_dict['masks'] = self.decode_mask(data_dict['masks'], ori_height=ori_height, ori_width=ori_width) + + if data_dict['masks'] is None: + return self.__getitem__(0) + + return data_dict + +class RefCOCOgGCGDataset(GCGDataset): + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super().__init__( + image_folder=image_folder, + data_path=data_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=template_map_fn, + extra_image_processor=extra_image_processor, + lazy=lazy, + repeats=repeats, + single_image_mode=single_image_mode, + ) + + def json_file_preprocess(self, data_path): + json_data = json.load(open(data_path)) + + # convert {id: dict} to dict(..., id=xx) + for idx in range(len(json_data)): + id = list(json_data[idx].keys())[0] + json_data[idx] = json_data[idx][id] + json_data[idx].update({'id': id}) + return json_data + +class GranDfGCGDataset(GCGDataset): + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super().__init__( + image_folder=image_folder, + data_path=data_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=template_map_fn, + extra_image_processor=extra_image_processor, + lazy=lazy, + repeats=repeats, + single_image_mode=single_image_mode, + ) + + def dataset_map_fn(self, data_dict): + data_dict = glamm_granf_map_fn(data_dict) + return data_dict + + def decode_mask(self, object_masks, ori_height, ori_width): + binary_masks = [] + for object_mask in object_masks: + binary_mask = np.zeros((ori_height, ori_width), dtype=np.uint8) + + for rle in object_mask: + m = mask.decode(rle).astype(np.uint8) + binary_mask += m.squeeze() + + binary_masks.append(binary_mask) + if len(binary_masks) == 0: + return None + masks = np.stack(binary_masks, axis=0) + masks = torch.from_numpy(masks) + return masks + +class OpenPsgGCGDataset(GranDfGCGDataset): + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super().__init__( + image_folder=image_folder, + data_path=data_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=template_map_fn, + extra_image_processor=extra_image_processor, + lazy=lazy, + repeats=repeats, + single_image_mode=single_image_mode, + ) + def dataset_map_fn(self, data_dict): + data_dict = glamm_openpsg_map_fn(data_dict) + return data_dict + + +class FlickrGCGDataset(GCGDataset): + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super().__init__( + image_folder=image_folder, + data_path=data_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=template_map_fn, + extra_image_processor=extra_image_processor, + lazy=lazy, + repeats=repeats, + single_image_mode=single_image_mode, + ) + + def dataset_map_fn(self, data_dict): + data_dict = glamm_flickr_map_fn(data_dict) + return data_dict + + def json_file_preprocess(self, data_path): + def filter_images(data_infos, min_size): + return [i for i, info in enumerate(data_infos) if min(info['width'], info['height']) >= min_size] + + # convert {id: dict} to dict(..., id=xx) + from pycocotools.coco import COCO + self.coco = COCO(data_path) + self.image_ids = self.coco.getImgIds() + data_infos = [] + total_ann_ids = [] + removed_img_count = 0 + for img_id in self.image_ids: + info = self.coco.loadImgs([img_id])[0] + if len(info['caption'].split(' ')) < 3: + removed_img_count += 1 + continue + info['filename'] = info['file_name'].split('_')[-1] + info['height'] = int(info['height']) + info['width'] = int(info['width']) + data_infos.append(info) + ann_ids = self.coco.getAnnIds(imgIds=[img_id]) + total_ann_ids.extend(ann_ids) + assert len(set(total_ann_ids)) == len(total_ann_ids), f"Non-unique annotation IDs in '{data_path}'!" + print(f'Removed {removed_img_count} images.') + data_infos = [data_infos[i] for i in filter_images(data_infos, min_size=32)] + + # obtain_annotations + for data_info in data_infos: + ann_ids = self.coco.getAnnIds(imgIds=data_info['id']) + ann_info = self.coco.loadAnns(ann_ids) + data_info.update({'ann_info': ann_info}) + return data_infos + + def decode_mask(self, object_masks, ori_height, ori_width): + binary_masks = [] + for object_mask in object_masks: + binary_mask = mask.decode(object_mask).astype(np.uint8) + binary_masks.append(binary_mask) + if len(binary_masks) == 0: + return None + masks = np.stack(binary_masks, axis=0) + masks = torch.from_numpy(masks) + return masks \ No newline at end of file diff --git a/projects/llava_sam2/datasets/Grand_Dataset.py b/projects/llava_sam2/datasets/Grand_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a7cffa2628e55e4d55a400e05f3cf72c88cfa754 --- /dev/null +++ b/projects/llava_sam2/datasets/Grand_Dataset.py @@ -0,0 +1,241 @@ +import json +import os +import random + +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict, load_from_disk +from PIL import Image +from torch.utils.data import Dataset +from pycocotools import mask +import numpy as np +import copy + +from xtuner.registry import BUILDER +from xtuner.dataset.huggingface import process_hf_dataset, build_origin_dataset +import torchvision.transforms as T +from xtuner.utils import DEFAULT_IMAGE_TOKEN +from torchvision.transforms.functional import InterpolationMode +from .encode_fn import video_lisa_encode_fn +from .utils import dynamic_preprocess + +from .grand_process import glamm_grand_map_fn + +class GranDDataset(Dataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + def __init__(self, + image_folder, + json_folder=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + image_list_save_path='./work_dirs/grand_image.json', + json_list_save_path='./work_dirs/grand_jsons.json', + ): + super().__init__() + assert lazy + self.lazy = lazy + self.max_length = max_length + + self.image_list_save_path = image_list_save_path + self.json_list_save_path = json_list_save_path + + json_files, image_path_dict = self.json_file_preprocess(image_folder, json_folder) + self.json_data = json_files + self.image_path_dict = image_path_dict + + self.image_folder = image_folder + + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + + self.repeats = repeats + + self._system = '' + + self.min_dynamic_patch = 1 + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + self.image_size = 448 + self.use_thumbnail = True + patch_size = 14 + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.single_image_mode = single_image_mode + + def json_file_preprocess(self, image_folder, json_folder): + + # list jsons + print("Processing GRAND json files !!!") + if os.path.exists(self.json_list_save_path): + with open(self.json_list_save_path, 'r') as f: + json_files = json.load(f) + else: + json_files = os.listdir(json_folder) + _json_files = [] + for _file in json_files: + if '.json' in _file: + _json_files.append(os.path.join(json_folder, _file)) + json_files = _json_files + with open(self.json_list_save_path, 'w') as f: + json.dump(json_files, f) + print(f"Finished, {len(json_files)} json files !") + + # list images + print("Processing GRAND image files !!!") + if os.path.exists(self.image_list_save_path): + with open(self.image_list_save_path, 'r') as f: + image_path_dict = json.load(f) + else: + sub_folders = os.listdir(image_folder) + _sub_folders = [] + for folder_name in sub_folders: + if 'sa_00' in folder_name: + _sub_folders.append(folder_name) + sub_folders = _sub_folders + sub_folders = [os.path.join(image_folder, folder_name) for folder_name in sub_folders] + + image_path_dict = {} + for sub_folder in sub_folders: + files = os.listdir(sub_folder) + for _file in files: + if '.jpg' in _file: + image_path_dict[_file] = os.path.join(sub_folder, _file) + + with open(self.image_list_save_path, 'w') as f: + json.dump(image_path_dict, f) + print(f"Finished, {len(image_path_dict)} image files !") + + return json_files, image_path_dict + + @property + def modality_length(self): + length_list = [10000] * len(self.json_data) + return length_list * self.repeats + + def __len__(self): + return len(self.json_data) * self.repeats + + def real_len(self): + return len(self.json_data) + + def decode_mask(self, object_masks, ori_height, ori_width): + binary_masks = [] + for object_mask in object_masks: + binary_mask = np.zeros((ori_height, ori_width), dtype=np.uint8) + for seg in object_mask: + m = mask.decode(seg) + m = m.astype(np.uint8) + binary_mask += m.squeeze() + + binary_masks.append(binary_mask) + if len(binary_masks) == 0: + return None + masks = np.stack(binary_masks, axis=0) + masks = torch.from_numpy(masks) + return masks + + def dataset_map_fn(self, data_dict): + data_dict = glamm_grand_map_fn(data_dict) + return data_dict + + def replace_image_str(self, data_dict, image_str): + data_dict['conversation'][0]['input'] = \ + data_dict['conversation'][0]['input'].replace(DEFAULT_IMAGE_TOKEN, image_str) + return data_dict + + def __getitem__(self, index): + + index = index % self.real_len() + json_file_path = self.json_data[index] + with open(json_file_path, 'r') as f: + json_dict = json.load(f) + + image_name = list(json_dict.keys())[0] + + if image_name not in self.image_path_dict.keys(): + return self.__getitem__(random.randint(0, len(self.json_data) - 1)) + image_path = self.image_path_dict[image_name] + + json_dict = json_dict[image_name] + # parse datasets + result = self.dataset_map_fn(json_dict) + json_dict.update(result) + data_dict = json_dict + + data_dict['image'] = image_path + + # process image + image_file = data_dict['image'] + try: + image = Image.open(os.path.join(self.image_folder, + image_file)).convert('RGB') + except: + return self.__getitem__(random.randint(0, len(self.json_data) - 1)) + ori_width, ori_height = image.size + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + data_dict['g_pixel_values'] = g_pixel_values + + if self.single_image_mode: + images = [image] + else: + images = dynamic_preprocess(image, self.min_dynamic_patch, + self.max_dynamic_patch, + self.image_size, self.use_thumbnail) + pixel_values = [self.transformer(image) for image in images] + pixel_values = torch.stack(pixel_values) + data_dict['pixel_values'] = pixel_values + + num_image_tokens = pixel_values.shape[0] * self.patch_token + image_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + + data_dict = self.replace_image_str(data_dict, image_token_str) + + result = self.template_map_fn(data_dict) + data_dict.update(result) + result = video_lisa_encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, + with_image_token=True) + data_dict.update(result) + # process mask + data_dict['masks'] = self.decode_mask(data_dict['masks'], ori_height=ori_height, ori_width=ori_width) + + if data_dict['masks'] is None: + return self.__getitem__(random.randint(0, len(self.json_data) - 1)) + + return data_dict \ No newline at end of file diff --git a/projects/llava_sam2/datasets/MeVIS_Dataset.py b/projects/llava_sam2/datasets/MeVIS_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..82e2c4339fd59946db0fc1fa9bbcdd6cced7514b --- /dev/null +++ b/projects/llava_sam2/datasets/MeVIS_Dataset.py @@ -0,0 +1,5 @@ +from .ReVOS_Dataset import VideoReVOSDataset + + +class VideoMeVISDataset(VideoReVOSDataset): + pass diff --git a/projects/llava_sam2/datasets/Osprey_Dataset.py b/projects/llava_sam2/datasets/Osprey_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..708b4ab15333cd129c7d28dcb6426bd8e4d00a41 --- /dev/null +++ b/projects/llava_sam2/datasets/Osprey_Dataset.py @@ -0,0 +1,463 @@ +import json +import os + +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict, load_from_disk +from PIL import Image +from torch.utils.data import Dataset +from pycocotools import mask as maskUtils +import numpy as np +import copy + +from xtuner.registry import BUILDER +from xtuner.dataset.huggingface import process_hf_dataset, build_origin_dataset +import torchvision.transforms as T +from xtuner.utils import DEFAULT_IMAGE_TOKEN +from torchvision.transforms.functional import InterpolationMode +from .encode_fn import video_lisa_encode_fn +from .utils import dynamic_preprocess + +import random + +import torch.nn.functional as F + +class OspreyDataset(Dataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + LIMIT = '' + + VP_START_TOKEN = '' + VP_END_TOKEN = '' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super().__init__() + assert lazy + self.lazy = lazy + self.max_length = max_length + + json_data = self.json_file_preprocess(data_path) + self.text_data = json_data + + self.image_folder = image_folder + + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + + self.repeats = repeats + + self._system = '' + + self.min_dynamic_patch = 1 + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + self.image_size = 448 + self.use_thumbnail = True + patch_size = 14 + self.patch_size = patch_size + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.single_image_mode = single_image_mode + + def json_file_preprocess(self, data_path): + with open(data_path, 'r') as f: + json_data = json.load(f) + return json_data + + @property + def modality_length(self): + length_list = [] + for data_dict in self.text_data: + if self.lazy: + cur_len = 100 + else: + cur_len = len(data_dict['input_ids']) + if data_dict.get('image', None) is None: + cur_len = -cur_len + length_list.append(cur_len) + return length_list * self.repeats + + def __len__(self): + return len(self.text_data) * self.repeats + + def real_len(self): + return len(self.text_data) + + def annToMask(self, mask_ann, h, w): + if isinstance(mask_ann, list): + rles = maskUtils.frPyObjects(mask_ann, h, w) + rle = maskUtils.merge(rles) + elif isinstance(mask_ann['counts'], list): + # uncompressed RLE + rle = maskUtils.frPyObjects(mask_ann, h, w) + else: + # rle + rle = mask_ann + mask = maskUtils.decode(rle) + return mask + + def decode_mask(self, object_masks, ori_height, ori_width): + binary_masks = [] + for object_mask in object_masks: + binary_mask = self.annToMask(object_mask, ori_height, ori_width) + binary_masks.append(binary_mask) + if len(binary_masks) == 0: + return None + masks = np.stack(binary_masks, axis=0) + masks = torch.from_numpy(masks) + return masks + + def _process_conversation(self, converations, n_regions, region_pixels): + start_region_str = ' There are {} part regions in the picture: '.format(n_regions) + for i in range(n_regions): + start_region_str = start_region_str + \ + f"region{i+1}" + self.VP_START_TOKEN + self.IMG_CONTEXT_TOKEN * region_pixels[i] + self.VP_END_TOKEN + if i == n_regions - 1: + start_region_str = start_region_str + '.\n' + else: + start_region_str = start_region_str + ', ' + + for i, item in enumerate(converations): + item['value'] = item['value'].replace('<', '').replace('>', '') + if item['from'] == 'human': + item['value'] = item['value'] + self.LIMIT + # first conv process + if i == 0: + assert item['from'] == "human" + item['value'] = start_region_str + item['value'] + + messages = converations + input = '' + + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + + return conversation + + def _get_region_infos(self, masks): + # masks tensor, (n_obj, h, w) + masks = F.interpolate( + masks.unsqueeze(0), + size=(int(self.image_size // self.patch_size * self.downsample_ratio), + int(self.image_size // self.patch_size * self.downsample_ratio)), + mode='nearest').squeeze(0) + region_pixels = [] + for mask in masks: + region_pixels.append(mask.bool().to(torch.int64).sum()) + return masks, region_pixels + + def dataset_map_fn(self, data_dict): + file_name = data_dict['file_name'] # image file name + conversations = data_dict['conversations'] + masks = [anno["segmentation"] for anno in data_dict["annotation"]] + height = data_dict['height'] + width = data_dict['width'] + _ret = {} + + _ret['image'] = file_name + _ret['height'] = height + _ret['width'] = width + + masks = self.decode_mask(masks, height, width) + masks, region_pixels = self._get_region_infos(masks) + + if masks is None: + return None + + conversations = self._process_conversation(conversations, len(masks), region_pixels) + _ret['conversation'] = conversations + _ret['prompt_masks'] = masks + return _ret + + def replace_image_str(self, data_dict, image_str): + data_dict['conversation'][0]['input'] = \ + data_dict['conversation'][0]['input'].replace(DEFAULT_IMAGE_TOKEN, image_str) + return data_dict + + def __getitem__(self, index): + + index = index % self.real_len() + data_dict = copy.deepcopy(self.text_data[index]) + + # parse datasets + result = self.dataset_map_fn(data_dict) # {'image', 'height', 'width', 'conversation', 'masks'} + if result is None or result['prompt_masks'] is None: + return self.__getitem__(0) + + data_dict = result + + # process image + image_file = data_dict['image'] + if isinstance(self.image_folder, list): + for image_folder in self.image_folder: + image_path = os.path.join(image_folder, image_file) + if os.path.exists(image_path): + image = Image.open(image_path).convert('RGB') + break + else: + image = Image.open(os.path.join(self.image_folder, + image_file)).convert('RGB') + ori_width, ori_height = image.size + + if self.single_image_mode: + images = [image] + else: + images = dynamic_preprocess(image, self.min_dynamic_patch, + self.max_dynamic_patch, + self.image_size, self.use_thumbnail) + vp_overall_mask = torch.Tensor([False] * (len(images) - 1) + [True]) + data_dict['vp_overall_mask'] = vp_overall_mask + + pixel_values = [self.transformer(image) for image in images] + pixel_values = torch.stack(pixel_values) + data_dict['pixel_values'] = pixel_values + + num_image_tokens = pixel_values.shape[0] * self.patch_token + image_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + + data_dict = self.replace_image_str(data_dict, image_token_str) + + result = self.template_map_fn(data_dict) + data_dict.update(result) + result = video_lisa_encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, + with_image_token=True) + data_dict.update(result) + # process mask + # data_dict['prompt_masks'] = data_dict['prompt_masks'] + + if data_dict['prompt_masks'] is None: + return self.__getitem__(0) + + return data_dict + + +DETAILED_QUESTIONS = [ + 'Can you provide me with a detailed description of the region in the picture marked by ?', + "I'm curious about the region represented by in the picture. Could you describe it in detail?", + 'What can you tell me about the region indicated by in the image?', + "I'd like to know more about the area in the photo labeled . Can you give me a detailed description?", + 'Could you describe the region shown as in the picture in great detail?', + 'What details can you give me about the region outlined by in the photo?', + 'Please provide me with a comprehensive description of the region marked with in the image.', + 'Can you give me a detailed account of the region labeled as in the picture?', + "I'm interested in learning more about the region represented by in the photo. Can you describe it in detail?", + 'What is the region outlined by in the picture like? Could you give me a detailed description?', + 'Can you provide me with a detailed description of the region in the picture marked by , please?', + "I'm curious about the region represented by in the picture. Could you describe it in detail, please?", + 'What can you tell me about the region indicated by in the image, exactly?', + "I'd like to know more about the area in the photo labeled , please. Can you give me a detailed description?", + 'Could you describe the region shown as in the picture in great detail, please?', + 'What details can you give me about the region outlined by in the photo, please?', + 'Please provide me with a comprehensive description of the region marked with in the image, please.', + 'Can you give me a detailed account of the region labeled as in the picture, please?', + "I'm interested in learning more about the region represented by in the photo. Can you describe it in detail, please?", + 'What is the region outlined by in the picture like, please? Could you give me a detailed description?', + 'Please describe the region in the image in detail.', + 'Can you offer a thorough analysis of the region in the image?', + 'Could you elaborate on the region highlighted by in the picture provided?', + 'Please share more information about the zone emphasized with in the photo.', + 'What insights can you give about the area denoted by in the image presented?', + 'Can you share a comprehensive rundown of the region denoted by in the presented image?', + "I'd like to know more about the region highlighted by in the picture provided.", + 'Work through the important details of the area in the image.', + 'Illustrate the area represented by through a descriptive explanation.', + 'Examine the region closely and share its details.' +] + +class OspreyDescriptionDataset(OspreyDataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + VP_START_TOKEN = '' + VP_END_TOKEN = '' + + LIMIT='' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super(OspreyDescriptionDataset, self).__init__( + image_folder=image_folder, + data_path=data_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=template_map_fn, + extra_image_processor=extra_image_processor, + lazy=lazy, + repeats=repeats, + single_image_mode=single_image_mode, + ) + + def dataset_map_fn(self, data_dict): + file_name = data_dict['file_name'] # image file name + descriptions = data_dict['description'] + masks = [anno["segmentation"] for anno in data_dict["annotation"]] + height = data_dict['height'] + width = data_dict['width'] + _ret = {} + + _ret['image'] = file_name + _ret['height'] = height + _ret['width'] = width + + masks = self.decode_mask(masks, height, width) + masks, region_pixels = self._get_region_infos(masks) + + if masks is None: + return None + + conversations = self._process_conversation(descriptions, len(masks), region_pixels) + _ret['conversation'] = conversations + _ret['prompt_masks'] = masks + return _ret + + def _process_conversation(self, descriptions, n_regions, region_pixels): + start_region_str = ' There are {} part regions in the picture: '.format(n_regions) + for i in range(n_regions): + start_region_str = start_region_str + \ + f"region{i+1}" + self.VP_START_TOKEN + self.IMG_CONTEXT_TOKEN * region_pixels[i] + self.VP_END_TOKEN + if i == n_regions - 1: + start_region_str = start_region_str + '.\n' + else: + start_region_str = start_region_str + ', ' + + converations = [] + for i, item in enumerate(descriptions): + question = random.choice(DETAILED_QUESTIONS).strip().replace('', f"region{i+1}") + self.LIMIT + answer = item.replace('<', '').replace('>', '') + # first conv process + if i == 0: + question = start_region_str + question + converations.append({'from': 'human', 'value': question}) + converations.append({'from': 'gpt', 'value': answer}) + + messages = converations + input = '' + + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + return conversation + + +class OspreyShortDescriptionDataset(OspreyDataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + VP_START_TOKEN = '' + VP_END_TOKEN = '' + + LIMIT = ' Answer the question using a single word or phrase.' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + + def __init__(self, + image_folder, + data_path=None, + tokenizer=None, + max_length=8196, + special_tokens=None, + template_map_fn=None, + extra_image_processor=None, + lazy=True, + repeats=1, + single_image_mode=False, + ): + super(OspreyShortDescriptionDataset, self).__init__( + image_folder=image_folder, + data_path=data_path, + tokenizer=tokenizer, + max_length=max_length, + special_tokens=special_tokens, + template_map_fn=template_map_fn, + extra_image_processor=extra_image_processor, + lazy=lazy, + repeats=repeats, + single_image_mode=single_image_mode, + ) \ No newline at end of file diff --git a/projects/llava_sam2/datasets/ReSAM2_Dataset.py b/projects/llava_sam2/datasets/ReSAM2_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..06916e6eac7c9c12f217c2a0a42dae2a1af73eec --- /dev/null +++ b/projects/llava_sam2/datasets/ReSAM2_Dataset.py @@ -0,0 +1,489 @@ +import logging +import os +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict, load_from_disk +from mmengine import print_log +from PIL import Image +from torch.utils.data import Dataset +import numpy as np + +from xtuner.registry import BUILDER +from xtuner.dataset.huggingface import process_hf_dataset, build_origin_dataset +import copy +from .encode_fn import video_lisa_encode_fn +import json +import random +import pycocotools.mask as maskUtils +import cv2 +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode + +SEG_QUESTIONS = [ + "Please segment the object according to the description: {class_name}", +] + +SEG_QUESTIONS_SHORT = [ + "Can you segment the {class_name} in this image?", + "Please segment {class_name} in this image.", + "What is {class_name} in this image? Please respond with segmentation mask.", + "What is {class_name} in this image? Please output segmentation mask.", + + "Can you segment the {class_name} in this image", + "Please segment {class_name} in this image", + "What is {class_name} in this image? Please respond with segmentation mask", + "What is {class_name} in this image? Please output segmentation mask", + + "Could you provide a segmentation mask for the {class_name} in this image?", + "Please identify and segment the {class_name} in this image.", + "Where is the {class_name} in this picture? Please respond with a segmentation mask.", + "Can you highlight the {class_name} in this image with a segmentation mask?", + + "Could you provide a segmentation mask for the {class_name} in this image", + "Please identify and segment the {class_name} in this image", + "Where is the {class_name} in this picture? Please respond with a segmentation mask", + "Can you highlight the {class_name} in this image with a segmentation mask", +] + +ANSWER_LIST = [ + "It is [SEG].", + "Sure, [SEG].", + "Sure, it is [SEG].", + "Sure, the segmentation result is [SEG].", + "[SEG].", +] + +class VideoSAM2Dataset(Dataset): + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + FAST_IMG_CONTEXT_TOKEN = '' + FAST_IMG_START_TOKEN = '' + FAST_IMG_END_TOKEN = '' + + def __init__(self, + sam2_folder, + expression_file, + extra_image_processor=None, + tokenizer=None, + select_number=5, + sampled_frames=5, + offline_processed_text_folder=None, + template_map_fn=None, + max_length=8196, + lazy=True, + repeats=1, + special_tokens=None, + use_fast=False, + n_fast_images=50, + fast_pool_size=4, + mode='long', + frame_contiguous_sample=False, + ): + assert mode in ['long', 'long_short', 'short'] + self.mode = mode + self.cur_mode = mode + assert lazy is True + self.tokenizer = BUILDER.build(tokenizer) + self.select_number = select_number + self.sampled_frames = sampled_frames + assert offline_processed_text_folder or (expression_file and tokenizer) + self.lazy = lazy + + self.max_length = max_length + + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if offline_processed_text_folder and expression_file: + print_log( + 'Both `offline_processed_text_folder` and ' + '`data_path` are set, and we load dataset from' + '`offline_processed_text_folder` ' + f'({offline_processed_text_folder})', + logger='current', + level=logging.WARNING) + + if offline_processed_text_folder is not None: + raise NotImplementedError + else: + video_ids, anno_dict = self.json_file_preprocess(expression_file) + if self.lazy: + self.video_ids = video_ids + self.anno_dict = anno_dict + else: + raise NotImplementedError + + self.sam2_folder = sam2_folder + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + self.down_ratio = 1 + self.repeats = repeats + + self._system = '' + + self.downsample_ratio = 0.5 + self.image_size = 448 + patch_size = 14 + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.use_fast = use_fast + self.n_fast_images = n_fast_images + self.fast_pool_size = fast_pool_size + + self.frame_contiguous_sample = frame_contiguous_sample + + # for visualization debug + self.save_folder = './work_dirs/video_debug/' + self.cur_number = 0 + + print("Video res dataset (ref-sam2), include {} items.".format(len(self.video_ids))) + + def __len__(self): + return len(self.video_ids) * self.repeats + + @property + def modality_length(self): + length_list = [] + for data_dict in self.video_ids: + cur_len = 20000 + length_list.append(cur_len) + return length_list + + def real_len(self): + return len(self.video_ids) + + def json_file_preprocess(self, expression_file): + # prepare expression annotation files + with open(expression_file, 'r') as f: + expression_datas = json.load(f) + + video_ids = list(expression_datas.keys()) + return video_ids, expression_datas + + def dataset_map_fn(self, objects_expression_infos, n_frames, n_fast_frames=0): + # prepare text + if self.mode == 'long': + expressions = [object_info['formated'] for object_info in objects_expression_infos] + self.cur_mode = self.mode + elif self.mode == 'short': + expressions = [object_info['short_caps'][random.randint(0, len(object_info['short_caps'])-1)] for object_info in objects_expression_infos] + self.cur_mode = self.mode + else: + if random.random() < 0.5: + expressions = [object_info['formated'] for object_info in objects_expression_infos] + self.cur_mode = 'long' + else: + expressions = [object_info['short_caps'][random.randint(0, len(object_info['short_caps']) - 1)] for + object_info in objects_expression_infos] + self.cur_mode = 'short' + text_dict = self.prepare_text(n_frames, expressions, num_image_tokens=self.patch_token, + n_fast_frames=n_fast_frames) + ret = {'conversation': text_dict['conversation']} + return ret + + def prepare_text(self, n_frames, expressions, num_image_tokens=256, n_fast_frames=0): + + if self.use_fast: + fast_frame_token_str = f'{self.FAST_IMG_START_TOKEN}' \ + f'{self.FAST_IMG_CONTEXT_TOKEN * n_fast_frames * self.fast_pool_size * self.fast_pool_size}' \ + f'{self.FAST_IMG_END_TOKEN}' + '\n' + else: + fast_frame_token_str = '' + + frame_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + + questions = [] + answers = [] + for i, exp in enumerate(expressions): + if self.cur_mode == 'short': + question_template = random.choice(SEG_QUESTIONS_SHORT) + exp = exp.replace("A ", '') + else: + question_template = random.choice(SEG_QUESTIONS) + questions.append(question_template.format(class_name=exp)) + answers.append(random.choice(ANSWER_LIST)) + qa_list = [] + for i, (question, answer) in enumerate(zip(questions, answers)): + if i == 0: + frame_tokens = frame_token_str + '\n' + # frame_tokens = '=' + ' ' + frame_tokens = frame_tokens * n_frames + frame_tokens = frame_tokens.strip() + frame_tokens = fast_frame_token_str + frame_tokens + qa_list.append( + {'from': 'human', 'value': frame_tokens + question} + ) + else: + qa_list.append( + {'from': 'human', 'value': question} + ) + qa_list.append( + {'from': 'gpt', 'value': answer} + ) + + input = '' + conversation = [] + for msg in qa_list: + if msg['from'] == 'human': + input += msg['value'] + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + + # add system information + conversation[0].update({'system': self._system}) + return {'conversation': conversation} + + def __getitem__(self, index): + index = index % self.real_len() + video_id = self.video_ids[index] + expression_dict = self.anno_dict[video_id] + object_ids = list(expression_dict['objects'].keys()) + + video_path = os.path.join(self.sam2_folder, expression_dict['video_path']) + anno_path = os.path.join(self.sam2_folder, expression_dict['anno_path']) + + video_frames = get_video_frames(video_path) + + if self.use_fast: + # sample fast branch + fast_interval = len(video_frames) / (self.n_fast_images + 1e-4) + sampled_fast_frame_idxs = [min(int(i * fast_interval), len(video_frames) - 1) for i in range(self.n_fast_images)] + fast_video_frames = [video_frames[_idx] for _idx in sampled_fast_frame_idxs] + else: + fast_video_frames = None + + video_frames = video_frames[::4] + + # mask annotation + with open(anno_path, 'r') as f: + mask_data = json.load(f) + masklents = decode_masklet(mask_data['masklet']) + + n_frames = len(masklents) + n_objects = len(object_ids) + + # sample object + if n_objects > self.select_number: + selected_indexes = np.random.choice(n_objects, self.select_number) + else: + selected_indexes = np.random.choice(n_objects, self.select_number, replace=True) + + selected_object_ids = [object_ids[_idx] for _idx in selected_indexes] + objects_expression_infos = [expression_dict['objects'][_idx] for _idx in selected_object_ids] + _masklents = [] + for _mask in masklents: + _mask_selected = [] + for _idx in selected_object_ids: + _mask_selected.append(_mask[:, :, int(_idx)]) + _mask_selected = np.stack(_mask_selected, axis=2) + _masklents.append(_mask_selected) + masklents = _masklents + + # sample video frames + # prepare images, random select k frames + if n_frames > self.sampled_frames + 1: + if self.frame_contiguous_sample and random.random() < 0.5: + # do contiguous sample + selected_start_frame = np.random.choice(n_frames - self.sampled_frames, 1, replace=False) + selected_frame_indexes = [selected_start_frame[0] + _i for _i in range(self.sampled_frames)] + else: + selected_frame_indexes = np.random.choice(n_frames, self.sampled_frames, replace=False) + else: + selected_frame_indexes = np.random.choice(n_frames, self.sampled_frames, replace=True) + selected_frame_indexes.sort() + + video_frames = [video_frames[_idx] for _idx in selected_frame_indexes] + masklents = [masklents[_idx] for _idx in selected_frame_indexes] + + data_dict = self.dataset_map_fn(objects_expression_infos, len(video_frames), n_fast_frames=self.n_fast_images) + result = self.template_map_fn(data_dict) + data_dict.update(result) + result = video_lisa_encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length, with_image_token=True) + data_dict.update(result) + + pixel_values = [] + extra_pixel_values = [] + for frame in video_frames: + frame = frame[:, :, ::-1] + frame_image = Image.fromarray(frame).convert('RGB') + ori_width, ori_height = frame_image.size + if self.extra_image_processor is not None: + g_image = np.array(frame_image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + extra_pixel_values.append(g_pixel_values) + + frame_image = self.transformer(frame_image) + pixel_values.append(frame_image) + + pixel_values = torch.stack(pixel_values, dim=0) # (n_f, 3, h, w) + data_dict['pixel_values'] = pixel_values + if self.extra_image_processor is not None: + data_dict['g_pixel_values'] = extra_pixel_values + + # for fast branch + if self.use_fast: + fast_pixel_values = [] + for frame_image in fast_video_frames: + frame = frame_image[:, :, ::-1] + frame_image = Image.fromarray(frame).convert('RGB') + ori_width, ori_height = frame_image.size + + frame_image = self.transformer(frame_image) + fast_pixel_values.append(frame_image) + + fast_pixel_values = torch.stack(fast_pixel_values, dim=0) # (n_f, 3, h, w) + data_dict['fast_pixel_values'] = fast_pixel_values + + # process and get masks + masklents = np.stack(masklents, axis=0) # (n_frames, h, w, n_obj) + masklents = torch.from_numpy(masklents).permute(3, 0, 1, 2) + masklents = masklents.flatten(0, 1) + # print('sam2-mask_shape:', masklents.shape) + # print('sam2-pixel_values:', data_dict['pixel_values'].shape) + # print('sam2-g_pixel_values:', len(data_dict['g_pixel_values']), ', ', data_dict['g_pixel_values'][0].shape) + data_dict['masks'] = masklents + data_dict['type'] = 'video' + return data_dict + + def visualization_debug(self, data_dict): + save_folder = os.path.join(self.save_folder, 'sample_{}'.format(self.cur_number)) + if not os.path.exists(save_folder): + os.mkdir(save_folder) + self.cur_number += 1 + + # images + + show_images = [] + + pixel_values = data_dict['pixel_values'] + save_folder_image = os.path.join(save_folder, 'image') + if not os.path.exists(save_folder_image): + os.mkdir(save_folder_image) + for i_image, image_pixel_value in enumerate(pixel_values): + # print(image_pixel_value.shape) + image_pixel_value[0] = image_pixel_value[0] * 0.2686 + image_pixel_value[1] = image_pixel_value[1] * 0.2613 + image_pixel_value[2] = image_pixel_value[2] * 0.2757 + image_pixel_value[0] = image_pixel_value[0] + 0.4814 + image_pixel_value[1] = image_pixel_value[1] + 0.4578 + image_pixel_value[2] = image_pixel_value[2] + 0.4082 + image_pixel_value = image_pixel_value * 255 + image_pixel_value = image_pixel_value.permute(1, 2, 0) + image_pixel_value = image_pixel_value.to(torch.uint8).numpy() + # print(os.path.join(save_folder_image, '{}.jpg'.format(i_image))) + # print(image_pixel_value.shape) + show_images.append(image_pixel_value) + cv2.imwrite(os.path.join(save_folder_image, '{}.jpg'.format(i_image)), image_pixel_value) + + # text + input_text = self.tokenizer.decode(data_dict['input_ids'], skip_special_tokens=False) + with open(os.path.join(save_folder, 'text.json'), 'w') as f: + json.dump([input_text], f) + + # masks + save_folder_mask = os.path.join(save_folder, 'mask') + if not os.path.exists(save_folder_mask): + os.mkdir(save_folder_mask) + n_frames = len(pixel_values) + masks = data_dict['masks'] + _, h, w = masks.shape + masks = masks.reshape(-1, n_frames, h, w) + for i_obj, obj_masks in enumerate(masks): + save_folder_mask_obj_folder = os.path.join(save_folder_mask, 'obj_{}'.format(i_obj)) + if not os.path.exists(save_folder_mask_obj_folder): + os.mkdir(save_folder_mask_obj_folder) + for i_frame, f_mask in enumerate(obj_masks): + f_mask = f_mask.numpy() + f_mask = f_mask * 255 + f_mask = np.stack([f_mask * 1, f_mask * 0, f_mask * 0], axis=2) + f_mask = show_images[i_frame] * 0.3 + 0.7 * f_mask + f_mask = f_mask.astype(np.uint8) + cv2.imwrite(os.path.join(save_folder_mask_obj_folder, '{}.png'.format(i_frame)), f_mask) + return + +def get_video_frames(video_path): + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + print("Error: Cannot open video file.") + return + + frames = [] + + frame_id = 0 + while True: + ret, frame = cap.read() + + if not ret: + break + + frames.append(frame) + + frame_id += 1 + + cap.release() + return frames + + +def images_to_video(frames, video_name, fps=6): + height, width, layers = frames[0].shape + + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + video = cv2.VideoWriter(video_name, fourcc, fps, (width, height)) + + for frame in frames: + video.write(frame) + + # cv2.destroyAllWindows() + video.release() + return + +def decode_masklet(masklet): + masks = [] + for _rle in masklet: + mask = maskUtils.decode(_rle) + masks.append(mask) + return masks + +def draw_mask(image, mask): + obj_mask = mask * 255 + obj_mask = np.stack([obj_mask * 1, obj_mask * 0, obj_mask * 0], axis=2) + obj_mask = obj_mask * 0.5 + copy.deepcopy(image) * 0.5 + obj_mask = obj_mask.astype(np.uint8) + return obj_mask + +def add_mask2images(frames, masklets): + show_videos = [] + for i_frames, (frame, masks) in enumerate(zip(frames, masklets)): + if i_frames == 0: + n_obj = masks.shape[-1] + for i_obj in range(n_obj): + show_videos.append([]) + + n_obj = masks.shape[-1] + for i_obj in range(n_obj): + show_videos[i_obj].append(draw_mask(copy.deepcopy(frame), masks[:, :, i_obj])) + return show_videos \ No newline at end of file diff --git a/projects/llava_sam2/datasets/ReVOS_Dataset.py b/projects/llava_sam2/datasets/ReVOS_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d5e2b1f1c3aee87e69b0cf9356cc73e414e2bcd7 --- /dev/null +++ b/projects/llava_sam2/datasets/ReVOS_Dataset.py @@ -0,0 +1,602 @@ +import logging +import os +from typing import Literal + +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict +from mmengine import print_log +from PIL import Image +from torch.utils.data import Dataset +import numpy as np + +from xtuner.registry import BUILDER +from xtuner.dataset.huggingface import build_origin_dataset +import copy + +from .encode_fn import video_lisa_encode_fn +import json +import random +import pycocotools.mask as maskUtils +import cv2 +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode + +SEG_QUESTIONS = [ + "Can you segment the {class_name} in this image?", + "Please segment {class_name} in this image.", + "What is {class_name} in this image? Please respond with segmentation mask.", + "What is {class_name} in this image? Please output segmentation mask.", + + "Can you segment the {class_name} in this image", + "Please segment {class_name} in this image", + "What is {class_name} in this image? Please respond with segmentation mask", + "What is {class_name} in this image? Please output segmentation mask", + + "Could you provide a segmentation mask for the {class_name} in this image?", + "Please identify and segment the {class_name} in this image.", + "Where is the {class_name} in this picture? Please respond with a segmentation mask.", + "Can you highlight the {class_name} in this image with a segmentation mask?", + + "Could you provide a segmentation mask for the {class_name} in this image", + "Please identify and segment the {class_name} in this image", + "Where is the {class_name} in this picture? Please respond with a segmentation mask", + "Can you highlight the {class_name} in this image with a segmentation mask", +] + +ANSWER_LIST = [ + "It is [SEG].", + "Sure, [SEG].", + "Sure, it is [SEG].", + "Sure, the segmentation result is [SEG].", + "[SEG].", +] + +class VideoReVOSDataset(Dataset): + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + FAST_IMG_CONTEXT_TOKEN = '' + FAST_IMG_START_TOKEN = '' + FAST_IMG_END_TOKEN = '' + + def __init__(self, + image_folder, + expression_file, + mask_file, + extra_image_processor=None, + tokenizer=None, + select_number=5, + sampled_frames=10, + offline_processed_text_folder=None, + template_map_fn=None, + max_length=2048, + lazy=True, + repeats=1, + special_tokens=None, + frame_contiguous_sample=False, + use_fast=False, + arch_type: Literal['intern_vl', 'qwen'] = 'intern_vl', + preprocessor=None, + # only work if use_fast = True + n_fast_images=50, + fast_pool_size=4, + fast_token_after_question=False, + ): + assert lazy is True + self.tokenizer = BUILDER.build(tokenizer) + self.select_number = select_number + self.sampled_frames = sampled_frames + assert offline_processed_text_folder or (expression_file and tokenizer) + self.lazy = lazy + + self.max_length = max_length + + self.template_map_fn = template_map_fn + if isinstance(self.template_map_fn, dict) and self.lazy: + _type = self.template_map_fn['type'] + del self.template_map_fn['type'] + self.template_map_fn = _type(**self.template_map_fn) + + if offline_processed_text_folder and expression_file: + print_log( + 'Both `offline_processed_text_folder` and ' + '`data_path` are set, and we load dataset from' + '`offline_processed_text_folder` ' + f'({offline_processed_text_folder})', + logger='current', + level=logging.WARNING) + + self.arch_type = arch_type + if self.arch_type == 'qwen': + self.IMG_CONTEXT_TOKEN = '<|image_pad|>' + self.IMG_START_TOKEN = '<|vision_start|>' + self.IMG_END_TOKEN = '<|vision_end|>' + elif self.arch_type == 'llava': + self.IMG_CONTEXT_TOKEN = '' + self.IMG_START_TOKEN = '' + self.IMG_END_TOKEN = '' + + + if offline_processed_text_folder is not None: + raise NotImplementedError + else: + vid2metaid, metas, mask_dict = self.json_file_preprocess(expression_file, mask_file) + self.vid2metaid = vid2metaid + self.videos = list(self.vid2metaid.keys()) + self.mask_dict = mask_dict + self.json_datas = metas + json_datas = metas + json_data = DatasetDict({'train': HFDataset.from_list(json_datas)}) + if self.lazy: + self.text_data = build_origin_dataset(json_data, 'train') + else: + raise NotImplementedError + + self.image_folder = image_folder + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + self.down_ratio = 1 + self.repeats = repeats + + self._system = '' + + self.downsample_ratio = 0.5 + if self.arch_type == 'llava': + self.downsample_ratio = 1 + self.image_size = 448 + if self.arch_type == 'llava': + self.image_size = 336 + patch_size = 14 + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + if self.arch_type == 'qwen': + self.patch_token = 1 + + if preprocessor is None: + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + self.preprocessor = None + else: + self.transformer = None + self.preprocessor = BUILDER.build(preprocessor) + + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.use_fast = use_fast + self.n_fast_images = n_fast_images + self.fast_pool_size = fast_pool_size + + self.frame_contiguous_sample = frame_contiguous_sample + + # for visualization debug + self.save_folder = './work_dirs/video_debug/' + self.cur_number = 0 + + # exist_thr + self.exist_thr = 8 + self.fast_token_after_question = fast_token_after_question + if self.fast_token_after_question: + assert self.use_fast + + print("Video res dataset, include {} items.".format(len(self.vid2metaid))) + + def __len__(self): + return len(self.vid2metaid) * self.repeats + + @property + def modality_length(self): + length_list = [] + for data_dict in self.vid2metaid: + cur_len = 10000 + length_list.append(cur_len) + return length_list + + def real_len(self): + return len(self.vid2metaid) + + def json_file_preprocess(self, expression_file, mask_file): + # prepare expression annotation files + with open(expression_file, 'r') as f: + expression_datas = json.load(f)['videos'] + + metas = [] + anno_count = 0 # serve as anno_id + vid2metaid = {} + for vid_name in expression_datas: + vid_express_data = expression_datas[vid_name] + + vid_frames = sorted(vid_express_data['frames']) + vid_len = len(vid_frames) + + exp_id_list = sorted(list(vid_express_data['expressions'].keys())) + for exp_id in exp_id_list: + exp_dict = vid_express_data['expressions'][exp_id] + meta = {} + meta['video'] = vid_name + meta['exp'] = exp_dict['exp'] # str + meta['mask_anno_id'] = exp_dict['anno_id'] + + if 'obj_id' in exp_dict.keys(): + meta['obj_id'] = exp_dict['obj_id'] + else: + meta['obj_id'] = [0, ] # Ref-Youtube-VOS only has one object per expression + meta['anno_id'] = [str(anno_count), ] + anno_count += 1 + meta['frames'] = vid_frames + meta['exp_id'] = exp_id + + meta['length'] = vid_len + metas.append(meta) + if vid_name not in vid2metaid.keys(): + vid2metaid[vid_name] = [] + vid2metaid[vid_name].append(len(metas) - 1) + + # process mask annotation files + with open(mask_file, 'rb') as f: + mask_dict = json.load(f) + + return vid2metaid, metas, mask_dict + + def create_img_to_refs_mapping(self, refs_train): + img2refs = {} + for ref in refs_train: + img2refs[ref["image_id"]] = img2refs.get(ref["image_id"], []) + [ref, ] + return img2refs + + def decode_mask(self, video_masks, image_size): + ret_masks = [] + for object_masks in video_masks: + # None object + if len(object_masks) == 0: + if len(ret_masks) != 0: + _object_masks = ret_masks[0] * 0 + else: + _object_masks = np.zeros( + (self.sampled_frames, image_size[0], image_size[1]), dtype=np.uint8) + else: + _object_masks = [] + for i_frame in range(len(object_masks[0])): + _mask = np.zeros(image_size, dtype=np.uint8) + for i_anno in range(len(object_masks)): + if object_masks[i_anno][i_frame] is None: + continue + m = maskUtils.decode(object_masks[i_anno][i_frame]) + if m.ndim == 3: + m = m.sum(axis=2).astype(np.uint8) + else: + m = m.astype(np.uint8) + _mask = _mask | m + _object_masks.append(_mask) + _object_masks = np.stack(_object_masks, axis=0) + # if self.pad_image_to_square: + # _object_masks = expand2square_mask(_object_masks) + ret_masks.append(_object_masks) + _shape = ret_masks[0].shape + for item in ret_masks: + if item.shape != _shape: + print([_ret_mask.shape for _ret_mask in ret_masks]) + return None + ret_masks = np.stack(ret_masks, axis=0) # (n_obj, n_frames, h, w) + + ret_masks = torch.from_numpy(ret_masks) + # ret_masks = F.interpolate(ret_masks, size=(self.image_size // self.down_ratio, + # self.image_size // self.down_ratio), mode='nearest') + ret_masks = ret_masks.flatten(0, 1) + return ret_masks + + def dataset_map_fn(self, data_dict, select_k=5): + images = [] + + len_frames = len(data_dict[0]['frames']) + for objet_info in data_dict: + assert len_frames == len(objet_info['frames']) + + # prepare images, random select k frames + if len_frames > select_k + 1: + if self.frame_contiguous_sample and random.random() < 0.5: + # do contiguous sample + selected_start_frame = np.random.choice(len_frames - select_k, 1, replace=False) + selected_frame_indexes = [selected_start_frame[0] + _i for _i in range(select_k)] + else: + selected_frame_indexes = np.random.choice(len_frames, select_k, replace=False) + else: + selected_frame_indexes = np.random.choice(len_frames, select_k, replace=True) + selected_frame_indexes.sort() + + if self.use_fast: + # sample fast branch + fast_interval = len_frames / (self.n_fast_images + 1e-4) + sampled_fast_frame_idxs = [min(int(i * fast_interval), len_frames - 1) for i in range(self.n_fast_images)] + fast_video_frames = [] + for selected_frame_index in sampled_fast_frame_idxs: + frame_id = data_dict[0]['frames'][selected_frame_index] + fast_video_frames.append(os.path.join(data_dict[0]['video'], frame_id + '.jpg')) + else: + fast_video_frames = None + sampled_fast_frame_idxs = None + + for selected_frame_index in selected_frame_indexes: + frame_id = data_dict[0]['frames'][selected_frame_index] + images.append(os.path.join(data_dict[0]['video'], frame_id + '.jpg')) + + # prepare text + expressions = [object_info['exp'] for object_info in data_dict] + if self.use_fast: + text_dict = self.prepare_text(select_k, expressions, num_image_tokens=self.patch_token, + n_fast_images=len(fast_video_frames),) + else: + text_dict = self.prepare_text(select_k, expressions, num_image_tokens=self.patch_token) + + + # prepare masks + video_masks = [] + for object_info in data_dict: + anno_ids = object_info['mask_anno_id'] + # print('anno_ids: ', anno_ids) + obj_masks = [] + for anno_id in anno_ids: + anno_id = str(anno_id) + frames_masks = self.mask_dict[anno_id] + frames_masks_ = [] + for frame_idx in selected_frame_indexes: + frames_masks_.append(copy.deepcopy(frames_masks[frame_idx])) + obj_masks.append(frames_masks_) + video_masks.append(obj_masks) + + if self.use_fast: + fast_video_masks = [] + assert sampled_fast_frame_idxs is not None + for object_info in data_dict: + anno_ids = object_info['mask_anno_id'] + obj_masks = [] + for anno_id in anno_ids: + anno_id = str(anno_id) + frames_masks = self.mask_dict[anno_id] + frames_masks_ = [] + for frame_idx in sampled_fast_frame_idxs: + frames_masks_.append(copy.deepcopy(frames_masks[frame_idx])) + obj_masks.append(frames_masks_) + fast_video_masks.append(obj_masks) + else: + fast_video_masks = None + + ret = {'images': images, 'video_masks': video_masks, 'conversation': text_dict['conversation'], + 'fast_images': fast_video_frames, 'fast_video_masks': fast_video_masks} + return ret + + def prepare_text(self, n_frames, expressions, num_image_tokens=256, n_fast_images=50): + + if self.use_fast and not self.fast_token_after_question: + fast_frame_token_str = f'{self.FAST_IMG_START_TOKEN}' \ + f'{self.FAST_IMG_CONTEXT_TOKEN * n_fast_images * self.fast_pool_size * self.fast_pool_size}' \ + f'{self.FAST_IMG_END_TOKEN}' + '\n' + else: + fast_frame_token_str = '' + + frame_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + if self.fast_token_after_question: + assert self.use_fast + after_question_str = f'{self.FAST_IMG_START_TOKEN}' \ + f'{self.FAST_IMG_CONTEXT_TOKEN * n_fast_images * self.fast_pool_size * self.fast_pool_size}' \ + f'{self.FAST_IMG_END_TOKEN}' + else: + after_question_str = '' + + questions = [] + answers = [] + for i, exp in enumerate(expressions): + # the exp is a question + if '?' in exp: + questions.append(exp) + else: + exp = exp.replace('.', '').strip() + question_template = random.choice(SEG_QUESTIONS) + questions.append(question_template.format(class_name=exp.lower())) + + answers.append(random.choice(ANSWER_LIST)) + qa_list = [] + for i, (question, answer) in enumerate(zip(questions, answers)): + if i == 0: + frame_tokens = frame_token_str + '\n' + # frame_tokens = '=' + ' ' + frame_tokens = frame_tokens * n_frames + frame_tokens = frame_tokens.strip() + frame_tokens = fast_frame_token_str + frame_tokens + qa_list.append( + {'from': 'human', 'value': frame_tokens + question + after_question_str} + ) + else: + qa_list.append( + {'from': 'human', 'value': question + after_question_str} + ) + qa_list.append( + {'from': 'gpt', 'value': answer} + ) + + input = '' + conversation = [] + for msg in qa_list: + if msg['from'] == 'human': + input += msg['value'] + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + + # add system information + conversation[0].update({'system': self._system}) + return {'conversation': conversation} + + def __getitem__(self, index): + index = index % self.real_len() + selected_video_objects = self.vid2metaid[self.videos[index]] + video_objects_infos = [copy.deepcopy(self.text_data[idx]) for idx in selected_video_objects] + + if len(video_objects_infos) > self.select_number: + selected_indexes = np.random.choice(len(video_objects_infos), self.select_number) + video_objects_infos = [video_objects_infos[_idx] for _idx in selected_indexes] + else: + selected_indexes = np.random.choice(len(video_objects_infos), self.select_number, replace=True) + video_objects_infos = [video_objects_infos[_idx] for _idx in selected_indexes] + + data_dict = self.dataset_map_fn(video_objects_infos, select_k=self.sampled_frames) + + assert 'images' in data_dict.keys() + pixel_values = [] + extra_pixel_values = [] + num_video_tokens = None + num_frame_tokens = None + if data_dict.get('images', None) is not None: + frames_files = data_dict['images'] + frames_files = [os.path.join(self.image_folder, frame_file) for frame_file in frames_files] + for frame_path in frames_files: + frame_image = Image.open(frame_path).convert('RGB') + ori_width, ori_height = frame_image.size + if self.extra_image_processor is not None: + g_image = np.array(frame_image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + extra_pixel_values.append(g_pixel_values) + + if self.preprocessor is not None: + pass + else: + frame_image = self.transformer(frame_image) + pixel_values.append(frame_image) + + if self.preprocessor is not None: + if self.arch_type == 'qwen': + _data_dict = self.preprocessor(pixel_values, do_resize=True, size=(self.image_size, self.image_size)) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + _data_dict['image_grid_thw'] = torch.tensor(_data_dict['image_grid_thw'], dtype=torch.int) + num_frame_tokens = int(_data_dict['image_grid_thw'][0].prod() * (self.downsample_ratio ** 2)) + num_frames = _data_dict['image_grid_thw'].shape[0] + num_video_tokens = num_frame_tokens * num_frames + elif self.arch_type == 'llava': + _data_dict = self.preprocessor(pixel_values, do_resize=True, size=(self.image_size, self.image_size)) + _data_dict['pixel_values'] = np.stack(_data_dict['pixel_values'], axis=0) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + else: + raise NotImplementedError + data_dict.update(_data_dict) + else: + pixel_values = torch.stack(pixel_values, dim=0) # (n_f, 3, h, w) + data_dict['pixel_values'] = pixel_values + if self.extra_image_processor is not None: + data_dict['g_pixel_values'] = extra_pixel_values + + # process and get masks + masks = self.decode_mask(data_dict['video_masks'], image_size=(ori_height, ori_width)) + if masks is None: + return self.__getitem__(random.randint(0, self.real_len())) + data_dict['masks'] = masks + else: + data_dict['pixel_values'] = torch.zeros(0, 3, self.image_size, self.image_size) + data_dict['masks'] = None + + if num_video_tokens is not None: + assert self.patch_token == 1 + input_str = data_dict['conversation'][0]['input'] + input_str = input_str.replace(self.IMG_CONTEXT_TOKEN, self.IMG_CONTEXT_TOKEN * num_frame_tokens) + assert input_str.count(self.IMG_CONTEXT_TOKEN) == num_video_tokens + data_dict['conversation'][0]['input'] = input_str + + result = self.template_map_fn(data_dict) + data_dict.update(result) + result = video_lisa_encode_fn(data_dict, tokenizer=self.tokenizer, max_length=self.max_length) + data_dict.update(result) + + # for fast branch + if self.use_fast: + fast_pixel_values = [] + frames_files = data_dict['fast_images'] + frames_files = [os.path.join(self.image_folder, frame_file) for frame_file in frames_files] + for frame_path in frames_files: + frame_image = Image.open(frame_path).convert('RGB') + ori_width, ori_height = frame_image.size + + frame_image = self.transformer(frame_image) + fast_pixel_values.append(frame_image) + + fast_pixel_values = torch.stack(fast_pixel_values, dim=0) # (n_f, 3, h, w) + data_dict['fast_pixel_values'] = fast_pixel_values + + # process and get masks + masks = self.decode_mask(data_dict['fast_video_masks'], image_size=(ori_height, ori_width)) + + if masks is None: + return self.__getitem__(random.randint(0, self.real_len())) + + data_dict['fast_exists'] = masks.to(dtype=torch.int).sum(dim=(-2, -1)).ge(self.exist_thr).unsqueeze(-1) + + + del data_dict['fast_video_masks'] + data_dict['type'] = 'video' + return data_dict + + def visualization_debug(self, data_dict): + save_folder = os.path.join(self.save_folder, 'sample_{}'.format(self.cur_number)) + if not os.path.exists(save_folder): + os.mkdir(save_folder) + self.cur_number += 1 + + # images + + show_images = [] + + pixel_values = data_dict['pixel_values'] + save_folder_image = os.path.join(save_folder, 'image') + if not os.path.exists(save_folder_image): + os.mkdir(save_folder_image) + for i_image, image_pixel_value in enumerate(pixel_values): + # print(image_pixel_value.shape) + image_pixel_value[0] = image_pixel_value[0] * 0.2686 + image_pixel_value[1] = image_pixel_value[1] * 0.2613 + image_pixel_value[2] = image_pixel_value[2] * 0.2757 + image_pixel_value[0] = image_pixel_value[0] + 0.4814 + image_pixel_value[1] = image_pixel_value[1] + 0.4578 + image_pixel_value[2] = image_pixel_value[2] + 0.4082 + image_pixel_value = image_pixel_value * 255 + image_pixel_value = image_pixel_value.permute(1, 2, 0) + image_pixel_value = image_pixel_value.to(torch.uint8).numpy() + # print(os.path.join(save_folder_image, '{}.jpg'.format(i_image))) + # print(image_pixel_value.shape) + show_images.append(image_pixel_value) + cv2.imwrite(os.path.join(save_folder_image, '{}.jpg'.format(i_image)), image_pixel_value) + + # text + input_text = self.tokenizer.decode(data_dict['input_ids'], skip_special_tokens=False) + with open(os.path.join(save_folder, 'text.json'), 'w') as f: + json.dump([input_text], f) + + # masks + save_folder_mask = os.path.join(save_folder, 'mask') + if not os.path.exists(save_folder_mask): + os.mkdir(save_folder_mask) + n_frames = len(pixel_values) + masks = data_dict['masks'] + _, h, w = masks.shape + masks = masks.reshape(-1, n_frames, h, w) + for i_obj, obj_masks in enumerate(masks): + save_folder_mask_obj_folder = os.path.join(save_folder_mask, 'obj_{}'.format(i_obj)) + if not os.path.exists(save_folder_mask_obj_folder): + os.mkdir(save_folder_mask_obj_folder) + for i_frame, f_mask in enumerate(obj_masks): + f_mask = f_mask.numpy() + f_mask = f_mask * 255 + f_mask = np.stack([f_mask * 1, f_mask * 0, f_mask * 0], axis=2) + f_mask = show_images[i_frame] * 0.3 + 0.7 * f_mask + f_mask = f_mask.astype(np.uint8) + cv2.imwrite(os.path.join(save_folder_mask_obj_folder, '{}.png'.format(i_frame)), f_mask) + return diff --git a/projects/llava_sam2/datasets/RefCOCO_Dataset.py b/projects/llava_sam2/datasets/RefCOCO_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..986f8680feb957239d65795f88a9d2ab99fa0a53 --- /dev/null +++ b/projects/llava_sam2/datasets/RefCOCO_Dataset.py @@ -0,0 +1,338 @@ +import copy +import random +import glob +import json +import logging +import os +from typing import Literal + +import torch + +from mmengine import print_log +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset +import numpy as np +import torch.nn.functional as F +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode +from pycocotools.coco import COCO +from pycocotools import mask as mask_utils + +from xtuner.registry import BUILDER +from xtuner.utils import IGNORE_INDEX +from xtuner.dataset.utils import encode_fn +from xtuner.dataset.map_fns import llava_map_fn + +from projects.glamm.datasets.utils.utils import expand2square + +from projects.glamm.datasets.utils.utils import SEG_QUESTIONS, ANSWER_LIST +from projects.glamm.utils import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + +from third_parts.mmdet.datasets.refcoco import RefCocoDataset + +from .utils import dynamic_preprocess + + +class ReferSegmDataset(RefCocoDataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + + def __init__(self, + data_root, + ann_file=None, + split_file=None, + special_tokens=None, + prompt_template=None, + extra_image_processor=None, + data_prefix=dict(img_path='train2014/'), + tokenizer=None, + max_length=2048, + num_classes_per_sample=3, + single_image_mode=False, + arch_type: Literal['intern_vl', 'qwen'] = 'intern_vl', + preprocessor=None, + **kwargs): + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + pipeline=None, + ann_file=ann_file, + split_file=split_file, + **kwargs, + ) + self.begin_str = f'{DEFAULT_IMAGE_TOKEN}\n' + if extra_image_processor is not None: + self.extra_image_processor = BUILDER.build(extra_image_processor) + + self.arch_type = arch_type + if self.arch_type == 'qwen': + self.IMG_CONTEXT_TOKEN = '<|image_pad|>' + self.IMG_START_TOKEN = '<|vision_start|>' + self.IMG_END_TOKEN = '<|vision_end|>' + elif self.arch_type == 'llava': + self.IMG_CONTEXT_TOKEN = '' + self.IMG_START_TOKEN = '' + self.IMG_END_TOKEN = '' + + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.image_folder = data_root + self.template = prompt_template + self.max_length = max_length + if self.arch_type == 'intern_vl': + # self._system = '你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。' + self._system = '' + self.template['INSTRUCTION'] = '<|user|>\n{input}<|end|><|assistant|>\n' + elif self.arch_type == 'qwen': + self._system = '' + elif self.arch_type == 'llava': + self._system = '' + + self.num_classes_per_sample = num_classes_per_sample + self.min_dynamic_patch = 1 + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + if self.arch_type == 'llava': + self.downsample_ratio = 1 + self.image_size = 448 + if self.arch_type == 'llava': + self.image_size = 336 + self.use_thumbnail = True + patch_size = 14 + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + + if preprocessor is None: + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + self.preprocessor = None + else: + self.transformer = None + self.preprocessor = BUILDER.build(preprocessor) + self.arch_type = arch_type + self.single_image_mode = single_image_mode + self._max_refetch = 1000 + + print("Image RES dataset, include {} items.".format(len(self))) + + @property + def modality_length(self): + import pickle + length_list = [] + for idx in range(len(self)): + length_list.append(100) + return length_list + + def _parse_annotations(self, ann_info): + image_path = ann_info['img_path'] + image = Image.open(image_path).convert('RGB') + width, height = image.size + + masks, phrases = [], [] + instances, text = ann_info['instances'], ann_info['text'] + # index = np.random.choice(range(len(instances)), min( + # len(instances), self.num_classes_per_sample)) + index = np.random.choice(range(len(instances)), self.num_classes_per_sample, replace=True) + for idx in index: + inst = instances[idx] + phrase = text[idx].lower() + if '.' == phrase[-1]: + phrase = phrase[:-1] + phrases.append(phrase) + binary_mask = np.zeros((height, width), dtype=np.uint8) + for seg in inst["mask"]: + rles = mask_utils.frPyObjects([seg], height, width) + m = mask_utils.decode(rles) + m = m.astype(np.uint8) + binary_mask += m.squeeze() + masks.append(binary_mask) + + conversation = [] + for i, phrase in enumerate(phrases): + question = random.choice(SEG_QUESTIONS).format(class_name=phrase) + if i == 0: + question = self.begin_str + question + conversation.append({'from': 'human', 'value': question}) + conversation.append({'from': 'gpt', 'value': random.choice(ANSWER_LIST)}) + masks = torch.stack([torch.from_numpy(mask) for mask in masks], dim=0) + + ann_info.update({ + 'masks': masks, + 'conversations': conversation, + 'image': image_path + }) + return ann_info + + def prepare_data(self, index): + data_dict = super().prepare_data(index) + data_dict = self._parse_annotations(data_dict) + if data_dict is None: + return None + + out_data_dict = {} + if 'masks' in data_dict: + out_data_dict['masks'] = data_dict['masks'] + + if data_dict.get('image', None) is not None: + image_file = data_dict['image'] + try: + image = Image.open(image_file).convert('RGB') + except Exception as e: + print(f'Error: {e}', flush=True) + print_log(f'Error: {e}', logger='current') + return None + if hasattr(self, 'extra_image_processor'): + g_image = np.array(image) # for grounding + g_image = self.extra_image_processor.apply_image(g_image) + g_pixel_values = torch.from_numpy(g_image).permute(2, 0, 1).contiguous() + out_data_dict['g_pixel_values'] = g_pixel_values + + if self.single_image_mode: + images = [image] + else: + images = dynamic_preprocess(image, self.min_dynamic_patch, + self.max_dynamic_patch, + self.image_size, self.use_thumbnail) + if self.preprocessor is not None: + if self.arch_type == 'qwen': + _data_dict = self.preprocessor(images, do_resize=True) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + _data_dict['image_grid_thw'] = torch.tensor(_data_dict['image_grid_thw'], dtype=torch.int) + num_image_tokens = int(_data_dict['image_grid_thw'][0].prod() * (self.downsample_ratio ** 2)) + elif self.arch_type == 'llava': + _data_dict = self.preprocessor(images, do_resize=True, size=(self.image_size, self.image_size)) + _data_dict['pixel_values'] = np.stack(_data_dict['pixel_values'], axis=0) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + num_image_tokens = _data_dict['pixel_values'].shape[0] * self.patch_token + else: + raise NotImplementedError + out_data_dict.update(_data_dict) + else: + pixel_values = [self.transformer(image) for image in images] + pixel_values = torch.stack(pixel_values) + out_data_dict['pixel_values'] = pixel_values + + num_image_tokens = pixel_values.shape[0] * self.patch_token + image_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + token_dict = self.get_inputid_labels(data_dict['conversations'], image_token_str) + out_data_dict.update(token_dict) + else: + token_dict = self.get_inputid_labels(data_dict['conversations'], None) + out_data_dict.update(token_dict) + out_data_dict['pixel_values'] = torch.zeros(1, 3, self.image_size, self.image_size) + return out_data_dict + + def get_inputid_labels(self, conversations, image_token_str) -> dict: + input = '' + out_conversation = [] + while conversations and conversations[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + conversations = conversations[1:] + for msg in conversations: + if msg['from'] == 'human': + if image_token_str is None and '' in msg['value']: + msg['value'] = msg['value'].replace('', '') + if '' in msg['value']: + msg['value'] = msg['value'].replace('', image_token_str).strip() + input += msg['value'].strip() + elif msg['from'] == 'gpt': + out_conversation.append({ + 'input': input, + 'output': msg['value'].strip() + }) + input = '' + else: + raise NotImplementedError + + input_ids, labels = [], [] + for i, single_turn_conversation in enumerate(out_conversation): + input = single_turn_conversation.get('input', '') + if input is None: + input = '' + input_text = self.template.INSTRUCTION.format( + input=input, round=i + 1) + + if i == 0: + if self._system != '' and self._system is not None: + system = self.template.SYSTEM.format(system=self._system) + input_text = system + input_text + input_encode = self.tokenizer.encode( + input_text, add_special_tokens=True) + else: + input_encode = self.tokenizer.encode( + input_text, add_special_tokens=False) + input_ids += input_encode + labels += [IGNORE_INDEX] * len(input_encode) + + output_text = single_turn_conversation.get('output', '') + if self.template.get('SUFFIX', None): + output_text += self.template.SUFFIX + output_encode = self.tokenizer.encode( + output_text, add_special_tokens=False) + input_ids += output_encode + labels += copy.deepcopy(output_encode) + + if len(input_ids) > self.max_length: + input_ids = input_ids[:self.max_length] + labels = labels[:self.max_length] + # print('len_ids: ', len(input_ids)) + return {'input_ids': input_ids, 'labels': labels} + + def __getitem__(self, index): + for _ in range(self._max_refetch + 1): + data = self.prepare_data(index) + # Broken images may cause the returned data to be None + if data is None: + index = self._rand_another() + continue + return data + + +if __name__ == '__main__': + from transformers import CLIPImageProcessor, AutoTokenizer + from third_parts.segment_anything.utils.transforms import ResizeLongestSide + + pretrained_model = 'MBZUAI/GLaMM-GranD-Pretrained' + llm_name_or_path = 'lmsys/vicuna-7b-v1.5' + + tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path) + image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path='openai/clip-vit-large-patch14-336') + extra_image_processor = dict( + type=ResizeLongestSide, + target_length=1024, + ) + from xtuner.utils.templates import PROMPT_TEMPLATE + + prompt_template = PROMPT_TEMPLATE.vicuna + from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory, template_map_fn + from projects.glamm.datasets.collate_fns.glamm_collate_fn import glamm_collate_fn + + dataset = ReferSegmDataset( + tokenizer=tokenizer, + special_tokens=['[SEG]'], + extra_image_processor=extra_image_processor, + prompt_template=prompt_template, + data_root='data/coco/', + data_prefix=dict(img_path='train2014/'), + ann_file='refcoco+/instances.json', + split_file='refcoco+/refs(unc).p', + ) + for i in range(1000): + dataset[i] \ No newline at end of file diff --git a/projects/llava_sam2/datasets/RefYoutubeVOS_Dataset.py b/projects/llava_sam2/datasets/RefYoutubeVOS_Dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3d45b9b3efd3f52de9915ee2270831a6a175b520 --- /dev/null +++ b/projects/llava_sam2/datasets/RefYoutubeVOS_Dataset.py @@ -0,0 +1,47 @@ +from .ReVOS_Dataset import VideoReVOSDataset +import json +import pickle + +class VideoRefYoutubeVOSDataset(VideoReVOSDataset): + + def json_file_preprocess(self, expression_file, mask_file): + # prepare expression annotation files + with open(expression_file, 'r') as f: + expression_datas = json.load(f)['videos'] + + metas = [] + anno_count = 0 # serve as anno_id + vid2metaid = {} + for vid_name in expression_datas: + vid_express_data = expression_datas[vid_name] + + vid_frames = sorted(vid_express_data['frames']) + vid_len = len(vid_frames) + + exp_id_list = sorted(list(vid_express_data['expressions'].keys())) + for exp_id in exp_id_list: + exp_dict = vid_express_data['expressions'][exp_id] + meta = {} + meta['video'] = vid_name + meta['exp'] = exp_dict['exp'] # str + meta['mask_anno_id'] = [str(anno_count), ] + + if 'obj_id' in exp_dict.keys(): + meta['obj_id'] = exp_dict['obj_id'] + else: + meta['obj_id'] = [0, ] # Ref-Youtube-VOS only has one object per expression + meta['anno_id'] = [str(anno_count), ] + anno_count += 1 + meta['frames'] = vid_frames + meta['exp_id'] = exp_id + + meta['length'] = vid_len + metas.append(meta) + if vid_name not in vid2metaid.keys(): + vid2metaid[vid_name] = [] + vid2metaid[vid_name].append(len(metas) - 1) + + # process mask annotation files + with open(mask_file, 'rb') as f: + mask_dict = pickle.load(f) + return vid2metaid, metas, mask_dict diff --git a/projects/llava_sam2/datasets/__init__.py b/projects/llava_sam2/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8333d84a3f0dfd9bfe1f086a25f26f72c15aa095 --- /dev/null +++ b/projects/llava_sam2/datasets/__init__.py @@ -0,0 +1,15 @@ +from .collect_fns import video_lisa_collate_fn +from .MeVIS_Dataset import VideoMeVISDataset +from .ReVOS_Dataset import VideoReVOSDataset +from .RefYoutubeVOS_Dataset import VideoRefYoutubeVOSDataset +from .encode_fn import video_lisa_encode_fn +from .RefCOCO_Dataset import ReferSegmDataset +from .ReSAM2_Dataset import VideoSAM2Dataset +from .vqa_dataset import LLaVADataset, InfinityMMDataset + +from .GCG_Dataset import GranDfGCGDataset, FlickrGCGDataset, OpenPsgGCGDataset, RefCOCOgGCGDataset +from .Grand_Dataset import GranDDataset + +from .Osprey_Dataset import OspreyDataset, OspreyDescriptionDataset, OspreyShortDescriptionDataset + +from .ChatUniVi_Dataset import VideoChatUniViDataset diff --git a/projects/llava_sam2/datasets/collect_fns.py b/projects/llava_sam2/datasets/collect_fns.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5c6622ce1687101da11c781d94f3bf20383a3a --- /dev/null +++ b/projects/llava_sam2/datasets/collect_fns.py @@ -0,0 +1,206 @@ +from typing import Dict, Sequence + +import numpy as np +import torch +from torch.nn.utils.rnn import pad_sequence + +from xtuner.parallel.sequence import (get_sequence_parallel_world_size, + pad_for_sequence_parallel) +from xtuner.utils import DEFAULT_PAD_TOKEN_INDEX, IGNORE_INDEX + + +def video_lisa_collate_fn(instances: Sequence[Dict], + pad_index: int = DEFAULT_PAD_TOKEN_INDEX, + return_hf_format: bool = False, + use_varlen_attn: bool = False): + seq_parallel_world_size = get_sequence_parallel_world_size() + + input_ids, labels = [], [] + has_image = any(inst.get('pixel_values') is not None for inst in instances) + has_pe = any(inst.get('image_grid_thw', None) is not None for inst in instances) + has_fast_image = any(inst.get('fast_pixel_values', None) is not None for inst in instances) + has_grounding_image = any(inst.get('g_pixel_values') is not None for inst in instances) + has_mask = any(inst.get('masks') is not None for inst in instances) + has_bboxes = any(inst.get('bboxes') is not None for inst in instances) + has_points = any(inst.get('points') is not None for inst in instances) + has_fast_exists = any(inst.get('fast_exists') is not None for inst in instances) + + has_vp = any(inst.get('vp_overall_mask') is not None for inst in instances) + has_prompt_mask = any(inst.get('prompt_masks') is not None for inst in instances) + + if use_varlen_attn: + position_ids, cumulative_len = [], [] + assert len(instances) == 1, ( + f'If utilizing varlen attention, the batch size should be' + f' set to 1, but got {len(instances)}') + assert not has_image, 'Currently, it is not configured to ' + 'accommodate the use of varlen Attention in multimodal training' + + if has_image: + pixel_values = [] + frames_per_batch = [] + image_grid_thw = [] + if has_grounding_image: + grounding_pixel_values = [] + if has_mask: + object_masks = [] + if has_bboxes: + object_bboxes = [] + if has_points: + prompt_points = [] + if has_fast_image: + fast_pixel_values = [] + if has_fast_exists: + fast_exists = [] + if has_vp: + vp_overall_mask = [] + else: + vp_overall_mask = None + + if has_prompt_mask: + prompt_masks = [] + else: + prompt_masks = None + + for example in instances: + input_ids.append(torch.LongTensor(example['input_ids'])) + labels.append(torch.LongTensor(example['labels'])) + if use_varlen_attn: + cumulative_len.append(torch.IntTensor(example['cumulative_len'])) + position_ids.append(torch.LongTensor(example['position_ids'])) + + if has_image: + pixel_values.append(example['pixel_values']) + if has_pe: + image_grid_thw.append(example['image_grid_thw']) + if has_vp: + if 'vp_overall_mask' in example.keys() and example['vp_overall_mask'] is not None: + vp_overall_mask.append(example['vp_overall_mask']) + else: + vp_overall_mask.append(torch.Tensor([False] * len(pixel_values[-1]))) + if has_fast_image: + if 'fast_pixel_values' in example.keys() and example['fast_pixel_values'] is not None: + fast_pixel_values.append(example['fast_pixel_values']) + if has_fast_exists: + if 'fast_exists' in example.keys() and example['fast_exists'] is not None: + fast_exists.append(example['fast_exists']) + if has_grounding_image and 'g_pixel_values' in example.keys(): + if isinstance(example['g_pixel_values'], list): + grounding_pixel_values += example['g_pixel_values'] + frames_per_batch.append(len(example['g_pixel_values'])) + else: + grounding_pixel_values.append(example['g_pixel_values']) + frames_per_batch.append(1) + + if has_mask: + if 'masks' in example.keys() and example['masks'] is not None: + if isinstance(example['masks'], list): + if isinstance(example['masks'][0], np.ndarray): + _masks = np.stack(example['masks'], axis=0) + _masks = torch.from_numpy(_masks) + object_masks.append(_masks) + else: + object_masks.append(torch.stack(example['masks'], dim=0)) + else: + object_masks.append(example['masks']) + if has_bboxes: + if 'bboxes' in example.keys() and example['bboxes'] is not None: + object_bboxes.append(example['bboxes']) + if has_points: + if 'points' in example.keys() and example['points'] is not None: + prompt_points.append(example['points']) + + if has_prompt_mask: + if 'prompt_masks' in example.keys(): + prompt_masks.append(example['prompt_masks']) + + ori_length = [len(ids) for ids in input_ids] + if len(instances) > 1: + input_ids = pad_sequence( + input_ids, batch_first=True, padding_value=pad_index) + labels = pad_sequence( + labels, batch_first=True, padding_value=IGNORE_INDEX) + else: + input_ids = torch.stack(input_ids) + labels = torch.stack(labels) + + if use_varlen_attn: + assert input_ids.size(1) % seq_parallel_world_size == 0 + attention_mask = None + position_ids = torch.stack(position_ids, dim=0) + else: + # Some tokenizers have the same eos token and pad token, so input_ids + # cannot be masked directly based on the pad token id. + attention_mask = torch.zeros_like(input_ids).bool() + for i, length in enumerate(ori_length): + attention_mask[i, :length] = True + + bs, seq_len = input_ids.shape + position_ids = torch.arange(seq_len).unsqueeze(0).long().repeat(bs, 1) + + if seq_parallel_world_size > 1: + input_ids = pad_for_sequence_parallel(input_ids, pad_index) + labels = pad_for_sequence_parallel(labels, IGNORE_INDEX) + position_ids = pad_for_sequence_parallel(position_ids, 0) + if attention_mask is not None: + attention_mask = pad_for_sequence_parallel(attention_mask, 0) + + if use_varlen_attn: + max_seqlen = ( + cumulative_len[0][1:] - # noqa: W504 + cumulative_len[0][:-1]).max().item() + data_dict = { + 'input_ids': input_ids, + 'cumulative_len': cumulative_len, + 'position_ids': position_ids, + 'labels': labels, + 'max_seqlen': max_seqlen + } + else: + data_dict = { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'position_ids': position_ids, + 'labels': labels + } + + if has_image: + if all(x.shape == pixel_values[0].shape for x in pixel_values): + pixel_values = torch.stack(pixel_values, dim=0) + data_dict['frames_per_batch'] = frames_per_batch + data_dict['pixel_values'] = pixel_values + if has_pe: + data_dict['image_grid_thw'] = image_grid_thw + + if has_fast_image: + if all(x.shape == fast_pixel_values[0].shape for x in fast_pixel_values): + fast_pixel_values = torch.stack(fast_pixel_values, dim=0) + data_dict['fast_pixel_values'] = fast_pixel_values + + if has_fast_exists: + data_dict['fast_exists'] = fast_exists + + if has_vp: + data_dict['vp_overall_mask'] = torch.cat(vp_overall_mask, dim=0) + + if has_prompt_mask: + data_dict['prompt_masks'] = prompt_masks + + if has_grounding_image: + # if all(x.shape == grounding_pixel_values[0].shape for x in grounding_pixel_values): + # grounding_pixel_values = torch.stack(grounding_pixel_values, dim=0) + data_dict['g_pixel_values'] = grounding_pixel_values + + if has_mask: + data_dict['masks'] = object_masks + + if has_bboxes: + data_dict['bboxes'] = object_bboxes + + if has_points: + data_dict['points'] = prompt_points + + if return_hf_format: + return data_dict + else: + return {'data': data_dict, 'data_samples': None} \ No newline at end of file diff --git a/projects/llava_sam2/datasets/encode_fn.py b/projects/llava_sam2/datasets/encode_fn.py new file mode 100644 index 0000000000000000000000000000000000000000..9bae51a427af41558ca03c810a69ffec62c6f7e3 --- /dev/null +++ b/projects/llava_sam2/datasets/encode_fn.py @@ -0,0 +1,144 @@ +import copy +from xtuner.dataset.utils import get_bos_eos_token_ids +from xtuner.utils import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX + +def video_lisa_encode_fn( + example, + tokenizer, + max_length, + input_ids_with_output=True, + **kwargs +): + """We only support the following three scenarios: + + 1. Incremental pretraining dataset. + example['conversation'] = [ + { + 'input': '', + 'output': '### Human: Can you write xxx' + } + ] + + 2. Single-turn conversation dataset. + example['conversation'] = [ + { + 'input': 'Give three tips for staying healthy.', + 'output': '1.Eat a balanced diet xxx' + } + ] + + 3. Multi-turn conversation dataset. + example['conversation'] = [ + { + 'input': 'Give three tips for staying healthy.', + 'output': '1.Eat a balanced diet xxx' + }, + { + 'input': 'Please expand on the second point.', + 'output': 'Here is an expanded explanation of the xxx' + } + ] + """ + bos_token_id, eos_token_id = get_bos_eos_token_ids(tokenizer) + is_multi_turn_conversation = len(example['conversation']) > 1 + if is_multi_turn_conversation: + assert input_ids_with_output + + input_ids, labels = [], [] + next_needs_bos_token = True + for single_turn_conversation in example['conversation']: + input = single_turn_conversation['input'] + input_encode = tokenizer.encode(input, add_special_tokens=False) + if next_needs_bos_token: + input_ids += bos_token_id + labels += [IGNORE_INDEX] * len(bos_token_id) + input_ids += input_encode + labels += [IGNORE_INDEX] * len(input_encode) + if input_ids_with_output: + # Add output + output_with_loss = single_turn_conversation.get( + 'output_with_loss', True) + output = single_turn_conversation['output'] + output_encode = tokenizer.encode(output, add_special_tokens=False) + input_ids += output_encode + if output_with_loss: + labels += copy.deepcopy(output_encode) + else: + labels += [IGNORE_INDEX] * len(output_encode) + # Add EOS_TOKEN (with loss) + if single_turn_conversation.get('need_eos_token', True): + next_needs_bos_token = True + input_ids += eos_token_id + if output_with_loss: + labels += copy.deepcopy(eos_token_id) + else: + labels += [IGNORE_INDEX] * len(eos_token_id) + else: + next_needs_bos_token = False + # Add SEP (without loss) + sep = single_turn_conversation.get('sep', '') + if sep != '': + sep_encode = tokenizer.encode(sep, add_special_tokens=False) + input_ids += sep_encode + labels += [IGNORE_INDEX] * len(sep_encode) + + if len(input_ids) > max_length: + input_ids = input_ids[:max_length] + labels = labels[:max_length] + return {'input_ids': input_ids, 'labels': labels} + + +def video_lisa_encode_multi_conv_fn( + example, + tokenizer, + max_length, + input_ids_with_output=True +): + """We only support the following three scenarios: + + 1. Incremental pretraining dataset. + example['conversation'] = [ + { + 'input': '', + 'output': '### Human: Can you write xxx' + } + ] + + 2. Single-turn conversation dataset. + example['conversation'] = [ + { + 'input': 'Give three tips for staying healthy.', + 'output': '1.Eat a balanced diet xxx' + } + ] + + 3. Multi-turn conversation dataset. + example['conversation'] = [ + { + 'input': 'Give three tips for staying healthy.', + 'output': '1.Eat a balanced diet xxx' + }, + { + 'input': 'Please expand on the second point.', + 'output': 'Here is an expanded explanation of the xxx' + } + ] + """ + bos_token_id, eos_token_id = get_bos_eos_token_ids(tokenizer) + assert not input_ids_with_output + input_id_list = [] + for conv in example['conversation']: + input_ids = [] + next_needs_bos_token = True + for single_turn_conversation in conv: + input = single_turn_conversation['input'] + input_encode = tokenizer.encode(input, add_special_tokens=False) + if next_needs_bos_token: + input_ids += bos_token_id + input_ids += input_encode + + if len(input_ids) > max_length: + input_ids = input_ids[:max_length] + + input_id_list.append(input_ids) + return {'input_ids': input_id_list} diff --git a/projects/llava_sam2/datasets/gcg_process.py b/projects/llava_sam2/datasets/gcg_process.py new file mode 100644 index 0000000000000000000000000000000000000000..e6257600af2bab37aa61b9ce7dc36022a241b28d --- /dev/null +++ b/projects/llava_sam2/datasets/gcg_process.py @@ -0,0 +1,297 @@ +import numpy as np +import random +from xtuner.utils import DEFAULT_IMAGE_TOKEN + +GCG_QUESTIONS = [ + DEFAULT_IMAGE_TOKEN + 'Could you please give me a brief description of the image? Please respond with interleaved segmentation masks for the corresponding parts of the answer.', + DEFAULT_IMAGE_TOKEN + 'Can you provide a brief description of the this image? Please output with interleaved segmentation masks for the corresponding phrases.', + DEFAULT_IMAGE_TOKEN + 'Please briefly describe the contents of the image. Please respond with interleaved segmentation masks for the corresponding parts of the answer.', + DEFAULT_IMAGE_TOKEN + 'Could you give a brief explanation of what can be found within this picture? Please output with interleaved segmentation masks for the corresponding phrases.', + DEFAULT_IMAGE_TOKEN + 'Could you give me an brief explanation of this picture? Please respond with interleaved segmentation masks for the corresponding phrases.', + DEFAULT_IMAGE_TOKEN + 'Could you provide me with a briefly analysis of this photo? Please output with interleaved segmentation masks for the corresponding parts of the answer.', +] + +def refcocog_parse_annotations(example): + # example {'id': str, 'refs': [{"setence", 'bbox', 'segmentation'},], 'img_file_name': str, 'caption': str} + annotations = {'labels': [], 'caption': [], 'masks': [], 'tokens_positive': [], + 'file_name': example['img_file_name'], 'image': example['img_file_name']} + + orig_caption = example['caption'].strip('"').strip() + annotations['caption'] = orig_caption.lower() + + for detail in example['refs']: + phrase = detail['sentence'] + if phrase.lower() in annotations['caption']: + annotations['labels'].append(phrase) + index = annotations['caption'].find(phrase) + end_index = index + len(phrase) if index != -1 else -1 + annotations['tokens_positive'].append([index, end_index]) + # still polygon or rle + annotations['masks'].append(detail["segmentation"]) + + # Sort tokens_positive and corresponding lists + tokens_positive = annotations['tokens_positive'] + sorted_indices = sorted(range(len(tokens_positive)), key=lambda i: tokens_positive[i][0]) + annotations['tokens_positive'] = [tokens_positive[i] for i in sorted_indices] + annotations['masks'] = [annotations['masks'][i] for i in sorted_indices] + annotations['labels'] = [annotations['labels'][i] for i in sorted_indices] + + # Trimming overlapping intervals + for i in range(len(tokens_positive)): + for j in range(i + 1, len(tokens_positive)): + # If there is overlap + if tokens_positive[i][1] >= tokens_positive[j][0]: + # Modify the end index of phrase i to be one less than the start index of phrase j + tokens_positive[i][1] = tokens_positive[j][0] - 1 + # Modify the phrases to reflect the change in indices + annotations['labels'][i] = orig_caption[tokens_positive[i][0]:tokens_positive[i][1] + 1] + break # Exit inner loop since i was modified + + return annotations + +def refcocog_conversation(caption, tokens_positive): + # insert

and [seg] to caption and select a question + question = random.choice(GCG_QUESTIONS).strip() + + # Prepare caption with tags + def tag_caption(caption, tokens): + for start, end in sorted(tokens, key=lambda x: x[0], reverse=True): + caption = f"{caption[:start]}

{caption[start:end]}

[SEG]{caption[end:]}" + return caption + + detailed_answer = tag_caption(caption, tokens_positive) + + conversations = [{'from': 'human', 'value': question}, {'from': 'gpt', 'value': detailed_answer}] + return conversations + +def refcocog_preprocess(example): + data_labels = example['labels'] + masks = example['masks'] + caption = example['caption'] + tokens_positive = example['tokens_positive'] + + # Function to sort elements based on the start index of each phrase + def sort_by_start_index(items, order): + return [items[i] for i in order] + + # Sort phrases based on their appearance in the sentence + phrase_order = sorted(range(len(tokens_positive)), key=lambda x: tokens_positive[x][0]) + masks = sort_by_start_index(masks, phrase_order) + data_labels = sort_by_start_index(data_labels, phrase_order) + tokens_positive = sort_by_start_index(tokens_positive, phrase_order) + + conversations = refcocog_conversation(caption, tokens_positive) + example['conversations'] = conversations + example['labels'] = data_labels + example['masks'] = masks + example['tokens_positive'] = tokens_positive + + return example + +def glamm_refcocog_map_fn(example): + # example {'id': str, 'refs': [{"setence", 'bbox', 'segmentation'},], 'img_file_name': str, 'caption': str} + + example = refcocog_parse_annotations(example) + # example 'labels': [], 'caption': str, 'masks': [], 'tokens_positive': [], 'file_name': image_file + + example = refcocog_preprocess(example) + + # do llava preprocess + messages = example['conversations'] + input = '' + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + example.update({'conversation': conversation}) + return example + +def grandf_parse_annotations(example): + image_path = example['file_name'] + annotations = { + 'labels': [], 'caption': [], 'masks': [], + 'tokens_positive': [], 'file_name': image_path, + 'image': image_path} + annotations['caption'] = example['caption'].strip('"').strip() + + for word, grounding in example["groundings"].items(): + if grounding is None: + continue + annotations['labels'].append(word) + annotations['tokens_positive'].append(grounding["token_positives"]) + annotations['masks'].append(grounding["rle_masks"]) + + return annotations + +def grandf_conversation(caption, tokens_positive): + question = random.choice(GCG_QUESTIONS).strip() + + # Prepare caption with tags + def tag_caption(caption, tokens): + for start, end in sorted(tokens, key=lambda x: x[0], reverse=True): + caption = f"{caption[:start]}

{caption[start:end]}

[SEG]{caption[end:]}" + return caption + + detailed_answer = tag_caption(caption, tokens_positive) + + conversations = [{'from': 'human', 'value': question}, {'from': 'gpt', 'value': detailed_answer}] + return conversations +def grandf_preprocess(example): + data_labels = example['labels'] + masks = example['masks'] + caption = example['caption'] + tokens_positive = example['tokens_positive'] + + # Function to sort elements based on the start index of each phrase + def sort_by_start_index(items, order): + return [items[i] for i in order] + + # Sort phrases based on their appearance in the sentence + phrase_order = sorted(range(len(tokens_positive)), key=lambda x: tokens_positive[x][0]) + masks = sort_by_start_index(masks, phrase_order) + data_labels = sort_by_start_index(data_labels, phrase_order) + tokens_positive = sort_by_start_index(tokens_positive, phrase_order) + + conversations = grandf_conversation(caption, tokens_positive) + example['conversations'] = conversations + example['labels'] = data_labels + example['masks'] = masks + example['tokens_positive'] = tokens_positive + return example + +def glamm_granf_map_fn(example): + # example {'file_name': str, "height": int, "width": int, "image_id": str, caption: "str", + # "groundings": {ground_words: {'token_positives', 'rle_masks', }}} + example = grandf_parse_annotations(example) + # example 'labels': [], 'caption': str, 'masks': [], 'tokens_positive': [], 'file_name': image_file + + example = grandf_preprocess(example) + + # do llava preprocess + messages = example['conversations'] + input = '' + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + example.update({'conversation': conversation}) + return example + +glamm_openpsg_map_fn = glamm_granf_map_fn + +def flickr_parse_annotations(example): + annotations = {'bboxes': [], 'labels': [], 'bboxes_ignore': [], 'caption': example['caption'], 'masks': [], + 'tokens_positive': [], 'image': example['file_name']} + ann_info = example["ann_info"] + for ann in ann_info: + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, example['width']) - max(x1, 0)) + inter_h = max(0, min(y1 + h, example['height']) - max(y1, 0)) + if inter_w * inter_h == 0 or ann['area'] <= 0 or w < 1 or h < 1: + continue + bbox = [x1, y1, x1 + w, y1 + h] + annotations['bboxes'].append(bbox) + tokens_positive = ann['tokens_positive'] + gt_label = [example['caption'][span[0]:span[1]] for span in tokens_positive] + annotations['labels'].append(gt_label[0]) + annotations['tokens_positive'].append(tokens_positive[0]) + + rle = ann['sam_mask'] + annotations['masks'].append(rle) + + # Convert bounding boxes to numpy arrays + annotations['bboxes'] = np.array(annotations['bboxes'], dtype=np.float32) if annotations[ + 'bboxes'] else np.zeros((0, 4), dtype=np.float32) + annotations['bboxes_ignore'] = np.array(annotations['bboxes_ignore'], dtype=np.float32) if annotations[ + 'bboxes_ignore'] else np.zeros((0, 4), dtype=np.float32) + return annotations + +def flickr_preprocess(example): + data_labels = example['labels'] + masks = example['masks'] + caption = example['caption'] + tokens_positive = example['tokens_positive'] + + # Function to sort elements based on the start index of each phrase + def sort_by_start_index(items, order): + return [items[i] for i in order] + + # Sort phrases based on their appearance in the sentence + phrase_order = sorted(range(len(tokens_positive)), key=lambda x: tokens_positive[x][0]) + masks = sort_by_start_index(masks, phrase_order) + data_labels = sort_by_start_index(data_labels, phrase_order) + tokens_positive = sort_by_start_index(tokens_positive, phrase_order) + + conversations = grandf_conversation(caption, tokens_positive) + example['conversations'] = conversations + example['labels'] = data_labels + example['masks'] = masks + example['tokens_positive'] = tokens_positive + return example + +def glamm_flickr_map_fn(example): + # example {'file_name': str, "height": int, "width": int, "image_id": str, caption: "str", + # "groundings": {ground_words: {'token_positives', 'rle_masks', }}} + + example = flickr_parse_annotations(example) + + example = flickr_preprocess(example) + + # do llava preprocess + messages = example['conversations'] + input = '' + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + example.update({'conversation': conversation}) + return example + + + + + diff --git a/projects/llava_sam2/datasets/grand_process.py b/projects/llava_sam2/datasets/grand_process.py new file mode 100644 index 0000000000000000000000000000000000000000..a97e625a1e1fdc819881acb19617290be1e191c5 --- /dev/null +++ b/projects/llava_sam2/datasets/grand_process.py @@ -0,0 +1,110 @@ +import numpy as np +import random +from xtuner.utils import DEFAULT_IMAGE_TOKEN + +GCG_QUESTIONS = [ + DEFAULT_IMAGE_TOKEN + 'Could you please give me a brief description of the image? Please respond with interleaved segmentation masks for the corresponding parts of the answer.', + DEFAULT_IMAGE_TOKEN + 'Can you provide a brief description of the this image? Please output with interleaved segmentation masks for the corresponding phrases.', + DEFAULT_IMAGE_TOKEN + 'Please briefly describe the contents of the image. Please respond with interleaved segmentation masks for the corresponding parts of the answer.', + DEFAULT_IMAGE_TOKEN + 'Could you give a brief explanation of what can be found within this picture? Please output with interleaved segmentation masks for the corresponding phrases.', + DEFAULT_IMAGE_TOKEN + 'Could you give me an brief explanation of this picture? Please respond with interleaved segmentation masks for the corresponding phrases.', + DEFAULT_IMAGE_TOKEN + 'Could you provide me with a briefly analysis of this photo? Please output with interleaved segmentation masks for the corresponding parts of the answer.', +] + +def grand_parse_annotations(example): + annotations = { + 'caption': [], 'masks': [], + 'tokens_positive': [], 'labels': []} + annotations['caption'] = example['dense_caption']['caption'].strip('"').strip() + object_infos = example['dense_caption']['details'] + + all_seg_objects_dict = {} + for seg_object_dict in example["objects"]: + all_seg_objects_dict[seg_object_dict['id']] = seg_object_dict + for seg_object_dict in example["floating_objects"]: + all_seg_objects_dict[seg_object_dict['id']] = seg_object_dict + + for object_info in object_infos: + ids = object_info["ids"] + if object_info["tokens_positive"] is None: + continue + annotations['labels'].append(object_info["phrase"]) + annotations['tokens_positive'].append(object_info["tokens_positive"]) + _masks = [] + for _id in ids: + _masks.append(all_seg_objects_dict[_id]['segmentation']) + annotations['masks'].append(_masks) + return annotations + +def grand_conversation(caption, tokens_positive): + question = random.choice(GCG_QUESTIONS).strip() + + # Prepare caption with tags + def tag_caption(caption, tokens): + for start, end in sorted(tokens, key=lambda x: x[0], reverse=True): + caption = f"{caption[:start]}

{caption[start:end]}

[SEG]{caption[end:]}" + return caption + + detailed_answer = tag_caption(caption, tokens_positive) + + conversations = [{'from': 'human', 'value': question}, {'from': 'gpt', 'value': detailed_answer}] + return conversations + +def grand_preprocess(example): + data_labels = example['labels'] + masks = example['masks'] + caption = example['caption'] + tokens_positive = example['tokens_positive'] + + # Function to sort elements based on the start index of each phrase + def sort_by_start_index(items, order): + return [items[i] for i in order] + + # Sort phrases based on their appearance in the sentence + phrase_order = sorted(range(len(tokens_positive)), key=lambda x: tokens_positive[x][0]) + masks = sort_by_start_index(masks, phrase_order) + data_labels = sort_by_start_index(data_labels, phrase_order) + tokens_positive = sort_by_start_index(tokens_positive, phrase_order) + + conversations = grand_conversation(caption, tokens_positive) + example['conversations'] = conversations + example['labels'] = data_labels + example['masks'] = masks + example['tokens_positive'] = tokens_positive + return example + +def glamm_grand_map_fn(example): + # example {'file_name': str, "height": int, "width": int, "image_id": str, caption: "str", + # "groundings": {ground_words: {'token_positives', 'rle_masks', }}} + example = grand_parse_annotations(example) + # example 'labels': [], 'caption': str, 'masks': [], 'tokens_positive': [], 'file_name': image_file + + example = grand_preprocess(example) + + # do llava preprocess + messages = example['conversations'] + input = '' + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + example.update({'conversation': conversation}) + return example + + + + diff --git a/projects/llava_sam2/datasets/utils.py b/projects/llava_sam2/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7aec3dea24fb236462c088a082f2c89d57835f --- /dev/null +++ b/projects/llava_sam2/datasets/utils.py @@ -0,0 +1,58 @@ + +def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, + image_size): + best_ratio_diff = float('inf') + best_ratio = (1, 1) + area = width * height + for ratio in target_ratios: + target_aspect_ratio = ratio[0] / ratio[1] + ratio_diff = abs(aspect_ratio - target_aspect_ratio) + if ratio_diff < best_ratio_diff: + best_ratio_diff = ratio_diff + best_ratio = ratio + elif ratio_diff == best_ratio_diff: + if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: + best_ratio = ratio + return best_ratio + +def dynamic_preprocess(image, + min_num=1, + max_num=6, + image_size=448, + use_thumbnail=False): + orig_width, orig_height = image.size + aspect_ratio = orig_width / orig_height + + # calculate the existing image aspect ratio + target_ratios = {(i, j) + for n in range(min_num, max_num + 1) + for i in range(1, n + 1) for j in range(1, n + 1) + if i * j <= max_num and i * j >= min_num} + target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) + + # find the closest aspect ratio to the target + target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio, + target_ratios, orig_width, + orig_height, image_size) + + # calculate the target width and height + target_width = image_size * target_aspect_ratio[0] + target_height = image_size * target_aspect_ratio[1] + blocks = target_aspect_ratio[0] * target_aspect_ratio[1] + + # resize the image + resized_img = image.resize((target_width, target_height)) + processed_images = [] + for i in range(blocks): + box = ((i % (target_width // image_size)) * image_size, + (i // (target_width // image_size)) * image_size, + ((i % (target_width // image_size)) + 1) * image_size, + ((i // (target_width // image_size)) + 1) * image_size) + # split the image + split_img = resized_img.crop(box) + processed_images.append(split_img) + assert len(processed_images) == blocks + if use_thumbnail and len(processed_images) != 1: + thumbnail_img = image.resize((image_size, image_size)) + processed_images.append(thumbnail_img) + return processed_images \ No newline at end of file diff --git a/projects/llava_sam2/datasets/vqa_dataset.py b/projects/llava_sam2/datasets/vqa_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..1c88e56704ded24f7d087a153c4d173a897df056 --- /dev/null +++ b/projects/llava_sam2/datasets/vqa_dataset.py @@ -0,0 +1,509 @@ +import copy +import random +import glob +import json +import logging +import os +from typing import Literal + +import torch + +from mmengine import print_log +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset +import numpy as np +import torch.nn.functional as F +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode +from pycocotools.coco import COCO +from pycocotools import mask as mask_utils + +from xtuner.registry import BUILDER +from xtuner.utils import IGNORE_INDEX +from xtuner.dataset.utils import encode_fn +from xtuner.dataset.map_fns import llava_map_fn + +from projects.glamm.datasets.utils.utils import expand2square + +from projects.glamm.datasets.utils.utils import SEG_QUESTIONS, ANSWER_LIST +from projects.glamm.utils import DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + +from .utils import dynamic_preprocess + + +class InfinityMMDataset(Dataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + + def __init__(self, + tokenizer, + data_path, + prompt_template, + special_tokens=None, + max_length=8192, + offline_save_path='./work_dirs/infinityMM.json', + ): + self.offline_save_path = offline_save_path + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + self._system = '' + + self.template = prompt_template + self.max_length = max_length + + self.min_dynamic_patch = 1 + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + self.image_size = 448 + self.use_thumbnail = True + patch_size = 14 + self.patch_token = int( + (self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') + if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), + interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + + self.data = self._load_annotations(data_path) + self._max_refetch = 1000 + + def _load_annotations(self, data_path): + if os.path.exists(self.offline_save_path): + with open(self.offline_save_path, 'r') as f: + ret = json.load(f) + print(f"Load InfinityMM file list from {self.offline_save_path}, {len(ret)} items !!!") + return ret + sub_folders = [] + for sub_folder in os.listdir(data_path): + if '.' not in sub_folder: + # a folder + if "LVIS_111k" in sub_folder: + # special case, have subsub folder + subsub_folders = os.listdir(os.path.join(data_path, sub_folder)) + for subsub_folder in subsub_folders: + sub_folders.append(os.path.join(data_path, sub_folder, subsub_folder)) + else: + sub_folders.append(os.path.join(data_path, sub_folder)) + + all_jsons = [] + for sub_folder in sub_folders: + print(f"Processing {sub_folder} !!!") + _files = os.listdir(sub_folder) + _num = 0 + for _file in _files: + if '.json' in _file: + _json_path = os.path.join(sub_folder, _file) + _num += 1 + all_jsons.append(os.path.join(sub_folder, _file)) + print(f"Finished {sub_folder} has {_num} items.") + + with open(self.offline_save_path, 'w') as f: + json.dump(all_jsons, f) + + return all_jsons + + def __getitem__(self, index): + for _ in range(self._max_refetch + 1): + data = self.prepare_data(index) + # Broken images may cause the returned data to be None + if data is None: + index = self._rand_another() + continue + return data + + def __len__(self): + return len(self.data) + + @property + def modality_length(self): + self.group_length = [] + for data_dict in self.data: + self.group_length.append(100) + return self.group_length + + @property + def length(self): + group_length = np.array(self.group_length) + group_length = np.abs(group_length).tolist() + return group_length + + def prepare_data(self, index): + data_path = self.data[index] + + with open(data_path, 'r') as f: + data_dict = json.load(f) + if 'image' in data_dict.keys(): + data_dict['image'] = data_path.replace('.json', '.jpg') + + if data_dict is None: + return None + + out_data_dict = {} + + if data_dict.get('image', None) is not None: + image_file = data_dict['image'] + try: + image = Image.open(image_file).convert('RGB') + except Exception as e: + print(f'Error: {e}', flush=True) + print_log(f'Error: {e}', logger='current') + return None + + images = dynamic_preprocess(image, self.min_dynamic_patch, + self.max_dynamic_patch, + self.image_size, self.use_thumbnail) + pixel_values = [self.transformer(image) for image in images] + pixel_values = torch.stack(pixel_values) + out_data_dict['pixel_values'] = pixel_values + + num_image_tokens = pixel_values.shape[0] * self.patch_token + image_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + token_dict = self.get_inputid_labels( + data_dict['conversations'], image_token_str) + out_data_dict.update(token_dict) + else: + token_dict = self.get_inputid_labels( + data_dict['conversations'], None) + out_data_dict.update(token_dict) + out_data_dict['pixel_values'] = torch.zeros( + 1, 3, self.image_size, self.image_size) + return out_data_dict + + def _rand_another(self) -> int: + return np.random.randint(0, len(self.data)) + + def get_inputid_labels(self, conversations, image_token_str) -> dict: + input = '' + out_conversation = [] + while conversations and conversations[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + conversations = conversations[1:] + for i, msg in enumerate(conversations): + if msg['from'] == 'human': + + # change to 1 image + if '' in msg['value']: + msg['value'] = msg['value'].replace('\n', '').replace('', '') + if i == 0: + msg['value'] = "\n" + msg['value'] + + if image_token_str is None and '' in msg['value']: + msg['value'] = msg['value'].replace('', '') + if '' in msg['value']: + msg['value'] = msg['value'].replace('', image_token_str).strip() + input += msg['value'].strip() + elif msg['from'] == 'gpt': + out_conversation.append({ + 'input': input, + 'output': msg['value'].strip() + }) + input = '' + else: + raise NotImplementedError + + input_ids, labels = [], [] + for i, single_turn_conversation in enumerate(out_conversation): + input = single_turn_conversation.get('input', '') + if input is None: + input = '' + input_text = self.template.INSTRUCTION.format( + input=input, round=i + 1) + + if i == 0: + if self._system != '' and self._system is not None: + system = self.template.SYSTEM.format(system=self._system) + input_text = system + input_text + input_encode = self.tokenizer.encode( + input_text, add_special_tokens=True) + else: + input_encode = self.tokenizer.encode( + input_text, add_special_tokens=False) + input_ids += input_encode + labels += [IGNORE_INDEX] * len(input_encode) + + output_text = single_turn_conversation.get('output', '') + if self.template.get('SUFFIX', None): + output_text += self.template.SUFFIX + output_encode = self.tokenizer.encode( + output_text, add_special_tokens=False) + input_ids += output_encode + labels += copy.deepcopy(output_encode) + + if len(input_ids) > self.max_length: + input_ids = input_ids[:self.max_length] + labels = labels[:self.max_length] + print_log( + f'Warning: input_ids length({len(input_ids)}) ' + f'is longer than max_length, cut to {self.max_length}', + logger='current') + return {'input_ids': input_ids, 'labels': labels} + + +class LLaVADataset(Dataset): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + IMG_CONTEXT_TOKEN = '' + IMG_START_TOKEN = '' + IMG_END_TOKEN = '' + + IMAGENET_MEAN = (0.485, 0.456, 0.406) + IMAGENET_STD = (0.229, 0.224, 0.225) + + def __init__(self, + tokenizer, + data_path, + prompt_template, + special_tokens=None, + image_folder=None, + max_length=8192, + arch_type: Literal['intern_vl', 'qwen'] = 'intern_vl', + preprocessor=None, + skip_pure_text=False, + ): + + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + self.image_folder = image_folder + self.template = prompt_template + self.max_length = max_length + + self._system = '' + + self.arch_type = arch_type + self.min_dynamic_patch = 1 + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + if self.arch_type == 'llava': + self.downsample_ratio = 1 + self.image_size = 448 + if self.arch_type == 'llava': + self.image_size = 336 + self.use_thumbnail = True + patch_size = 14 + self.patch_token = int( + (self.image_size // patch_size)**2 * (self.downsample_ratio**2)) + + + if self.arch_type == 'qwen': + self.IMG_CONTEXT_TOKEN = '<|image_pad|>' + self.IMG_START_TOKEN = '<|vision_start|>' + self.IMG_END_TOKEN = '<|vision_end|>' + elif self.arch_type == 'llava': + self.IMG_CONTEXT_TOKEN = '' + self.IMG_START_TOKEN = '' + self.IMG_END_TOKEN = '' + + if preprocessor is None: + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + self.preprocessor = None + else: + self.transformer = None + self.preprocessor = BUILDER.build(preprocessor) + + self.data = self._load_annotations(data_path, image_folder) + self._max_refetch = 1000 + + self.skip_pure_text = skip_pure_text + + def _load_annotations(self, data_path, image_folder=None): + data = json.load(open(data_path)) + return data + + def __getitem__(self, index): + for _ in range(self._max_refetch + 1): + data = self.prepare_data(index) + # Broken images may cause the returned data to be None + if data is None: + index = self._rand_another() + continue + return data + + def __len__(self): + return len(self.data) + + @property + def modality_length(self): + self.group_length = [] + for data_dict in self.data: + self.group_length.append(100) + return self.group_length + + @property + def length(self): + group_length = np.array(self.group_length) + group_length = np.abs(group_length).tolist() + return group_length + + def prepare_data(self, index): + data_dict: dict = self.data[index] + + if data_dict is None: + return None + + out_data_dict = {} + + if self.skip_pure_text and data_dict.get('image', None) is None: + return None + + if data_dict.get('image', None) is not None: + image_file = os.path.join(self.image_folder, data_dict['image']) + try: + image = Image.open(image_file).convert('RGB') + except Exception as e: + print(f'Error: {e}', flush=True) + print_log(f'Error: {e}', logger='current') + return None + if self.preprocessor is not None: + # images = dynamic_preprocess(image, self.min_dynamic_patch, + # self.max_dynamic_patch, + # self.image_size, self.use_thumbnail) + images = [image] + if self.arch_type == 'qwen': + _data_dict = self.preprocessor(images, do_resize=True) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + _data_dict['image_grid_thw'] = torch.tensor(_data_dict['image_grid_thw'], dtype=torch.int) + num_image_tokens = int(_data_dict['image_grid_thw'][0].prod() * (self.downsample_ratio ** 2)) + elif self.arch_type == 'llava': + _data_dict = self.preprocessor(images, do_resize=True, size=(self.image_size, self.image_size)) + _data_dict['pixel_values'] = np.stack(_data_dict['pixel_values'], axis=0) + _data_dict['pixel_values'] = torch.tensor(_data_dict['pixel_values'], dtype=torch.float) + num_image_tokens = _data_dict['pixel_values'].shape[0] * self.patch_token + else: + raise NotImplementedError + out_data_dict.update(_data_dict) + else: + images = dynamic_preprocess(image, self.min_dynamic_patch, + self.max_dynamic_patch, + self.image_size, self.use_thumbnail) + pixel_values = [self.transformer(image) for image in images] + pixel_values = torch.stack(pixel_values) + out_data_dict['pixel_values'] = pixel_values + + num_image_tokens = pixel_values.shape[0] * self.patch_token + image_token_str = f'{self.IMG_START_TOKEN}' \ + f'{self.IMG_CONTEXT_TOKEN * num_image_tokens}' \ + f'{self.IMG_END_TOKEN}' + token_dict = self.get_inputid_labels( + data_dict['conversations'], image_token_str) + out_data_dict.update(token_dict) + else: + token_dict = self.get_inputid_labels( + data_dict['conversations'], None) + out_data_dict.update(token_dict) + out_data_dict['pixel_values'] = torch.zeros( + 1, 3, self.image_size, self.image_size) + return out_data_dict + + def _rand_another(self) -> int: + return np.random.randint(0, len(self.data)) + + def get_inputid_labels(self, conversations, image_token_str) -> dict: + input = '' + out_conversation = [] + while conversations and conversations[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + conversations = conversations[1:] + for msg in conversations: + if msg['from'] == 'human': + if image_token_str is None and '' in msg['value']: + msg['value'] = msg['value'].replace('', '') + if '' in msg['value']: + msg['value'] = msg['value'].replace('', image_token_str).strip() + input += msg['value'].strip() + elif msg['from'] == 'gpt': + out_conversation.append({ + 'input': input, + 'output': msg['value'].strip() + }) + input = '' + else: + raise NotImplementedError + + input_ids, labels = [], [] + for i, single_turn_conversation in enumerate(out_conversation): + input = single_turn_conversation.get('input', '') + if input is None: + input = '' + input_text = self.template.INSTRUCTION.format( + input=input, round=i + 1) + + if i == 0: + if self._system != '' and self._system is not None: + system = self.template.SYSTEM.format(system=self._system) + input_text = system + input_text + input_encode = self.tokenizer.encode( + input_text, add_special_tokens=True) + else: + input_encode = self.tokenizer.encode( + input_text, add_special_tokens=False) + input_ids += input_encode + labels += [IGNORE_INDEX] * len(input_encode) + + output_text = single_turn_conversation.get('output', '') + if self.template.get('SUFFIX', None): + output_text += self.template.SUFFIX + output_encode = self.tokenizer.encode( + output_text, add_special_tokens=False) + input_ids += output_encode + labels += copy.deepcopy(output_encode) + + if len(input_ids) > self.max_length: + input_ids = input_ids[:self.max_length] + labels = labels[:self.max_length] + print_log( + f'Warning: input_ids length({len(input_ids)}) ' + f'is longer than max_length, cut to {self.max_length}', + logger='current') + return {'input_ids': input_ids, 'labels': labels} + + +if __name__ == '__main__': + from transformers import CLIPImageProcessor, AutoTokenizer + from third_parts.segment_anything.utils.transforms import ResizeLongestSide + pretrained_model = 'MBZUAI/GLaMM-GranD-Pretrained' + llm_name_or_path = 'lmsys/vicuna-7b-v1.5' + + tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path) + image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path='openai/clip-vit-large-patch14-336') + extra_image_processor = dict( + type=ResizeLongestSide, + target_length=1024, + ) + from xtuner.utils.templates import PROMPT_TEMPLATE + prompt_template = PROMPT_TEMPLATE.vicuna + from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory, template_map_fn + from projects.glamm.datasets.collate_fns.glamm_collate_fn import glamm_collate_fn + + dataset = LLaVADataset( + tokenizer=tokenizer, + data_path='data/llava_data/LLaVA-Instruct-150K/llava_instruct_150k.json', + prompt_template=prompt_template, + special_tokens=['[SEG]'], + image_folder='data/coco/train2017/', + ) + for i in range(1000): + dataset[i] diff --git a/projects/llava_sam2/deepspeed_zero2_sam2.json b/projects/llava_sam2/deepspeed_zero2_sam2.json new file mode 100644 index 0000000000000000000000000000000000000000..ce917a4eef85d2acfb4dca1b249a3ca641d08807 --- /dev/null +++ b/projects/llava_sam2/deepspeed_zero2_sam2.json @@ -0,0 +1,24 @@ +{ + "gradient_accumulation_steps": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "zero_force_ds_cpu_optimizer": false, + "zero_optimization": { + "stage": 2, + "overlap_comm": true, + "allgather_bucket_size": 5368709120, + "reduce_bucket_size": 5368709120, + "reduce_scatter": true, + "sub_group_size": 1e9, + "contiguous_gradients": true, + "allgather_partitions": true + }, + "fp16": { + "enabled": false, + "initial_scale_power": 16 + }, + "bf16": { + "enabled": true + } +} diff --git a/projects/llava_sam2/gradio/app.py b/projects/llava_sam2/gradio/app.py new file mode 100644 index 0000000000000000000000000000000000000000..1b46f77e2ec9f52e529bf8c031c2eb02594f8773 --- /dev/null +++ b/projects/llava_sam2/gradio/app.py @@ -0,0 +1,151 @@ +import gradio as gr +import sys +from projects.llava_sam2.gradio.app_utils import\ + process_markdown, show_mask_pred, description, preprocess_video,\ + show_mask_pred_video, image2video_and_save + +import torch +from transformers import (AutoModel, AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel, GenerationConfig) +import argparse +import os + +TORCH_DTYPE_MAP = dict( + fp16=torch.float16, bf16=torch.bfloat16, fp32=torch.float32, auto='auto') + +def parse_args(args): + parser = argparse.ArgumentParser(description="Sa2VA Demo") + parser.add_argument('hf_path', help='Sa2VA hf path.') + return parser.parse_args(args) + +def inference(image, video, follow_up, input_str): + input_image = image + if image is not None and (video is not None and os.path.exists(video)): + return image, video, "Error: Please only input a image or a video !!!" + if image is None and (video is None or not os.path.exists(video)) and not follow_up: + return image, video, "Error: Please input a image or a video !!!" + + if not follow_up: + # reset + print('Log: History responses have been removed!') + global_infos.n_turn = 0 + global_infos.inputs = '' + text = input_str + + image = input_image + global_infos.image_for_show = image + global_infos.image = image + video = video + global_infos.video = video + + if image is not None: + global_infos.input_type = "image" + else: + global_infos.input_type = "video" + + else: + text = input_str + image = global_infos.image + video = global_infos.video + + input_type = global_infos.input_type + if input_type == "video": + video = preprocess_video(video, global_infos.inputs+input_str) + + past_text = global_infos.inputs + + if past_text == "" and "" not in text: + text = "" + text + if input_type == "image": + input_dict = { + 'image': image, + 'text': text, + 'past_text': past_text, + 'mask_prompts': None, + 'tokenizer': tokenizer, + } + else: + input_dict = { + 'video': video, + 'text': text, + 'past_text': past_text, + 'mask_prompts': None, + 'tokenizer': tokenizer, + } + + return_dict = sa2va_model.predict_forward(**input_dict) + global_infos.inputs = return_dict["past_text"] + print(return_dict['past_text']) + if 'prediction_masks' in return_dict.keys() and return_dict['prediction_masks'] and len( + return_dict['prediction_masks']) != 0: + if input_type == "image": + image_mask_show, selected_colors = show_mask_pred(global_infos.image_for_show, return_dict['prediction_masks'],) + video_mask_show = global_infos.video + else: + image_mask_show = None + video_mask_show, selected_colors = show_mask_pred_video(video, return_dict['prediction_masks'],) + video_mask_show = image2video_and_save(video_mask_show, save_path="./ret_video.mp4") + else: + image_mask_show = global_infos.image_for_show + video_mask_show = global_infos.video + selected_colors = [] + + predict = return_dict['prediction'].strip() + global_infos.n_turn += 1 + + predict = process_markdown(predict, selected_colors) + return image_mask_show, video_mask_show, predict + +def init_models(args): + model_path = args.hf_path + model = AutoModel.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + use_flash_attn=True, + trust_remote_code=True, + ).eval().cuda() + + tokenizer = AutoTokenizer.from_pretrained( + model_path, + trust_remote_code=True, + ) + return model, tokenizer + +class global_infos: + inputs = '' + n_turn = 0 + image_width = 0 + image_height = 0 + + image_for_show = None + image = None + video = None + + input_type = "image" # "image" or "video" + +if __name__ == "__main__": + # get parse args and set models + args = parse_args(sys.argv[1:]) + + sa2va_model, tokenizer = \ + init_models(args) + + demo = gr.Interface( + inference, + inputs=[ + gr.Image(type="pil", label="Upload Image", height=360), + gr.Video(sources=["upload", "webcam"], label="Upload mp4 video", height=360), + gr.Checkbox(label="Follow up Question"), + gr.Textbox(lines=1, placeholder=None, label="Text Instruction"),], + outputs=[ + gr.Image(type="pil", label="Output Image"), + gr.Video(label="Output Video", show_download_button=True, format='mp4'), + gr.Markdown()], + theme=gr.themes.Soft(), allow_flagging="auto", description=description, + title='Sa2VA' + ) + + demo.queue() + demo.launch(share=True) \ No newline at end of file diff --git a/projects/llava_sam2/gradio/app_utils.py b/projects/llava_sam2/gradio/app_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..beb682bb8c08f071228d46ae8aa91907bbce506a --- /dev/null +++ b/projects/llava_sam2/gradio/app_utils.py @@ -0,0 +1,293 @@ +import numpy as np +from PIL import Image +import cv2 + +markdown_default = """ + + +Sa2VA +""" + +description = """ +**Usage** :
+ (1) For **Grounded Caption Generation** Interleaved Segmentation, input prompt like: *"Could you provide me with a detailed analysis of this photo? Please output with interleaved segmentation masks for the corresponding parts of the answer."*
+ (2) For **Segmentation Output**, input prompt like: *"Can you please segment xxx in the given image"*
+ (3) For **Image Captioning** VQA, input prompt like: *"Could you please give me a detailed description of the image?"*
+ (4) For **Image Conversation**, input arbitrary text instruction.
+""" + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +def desaturate(rgb, factor=0.65): + """ + Desaturate an RGB color by a given factor. + + :param rgb: A tuple of (r, g, b) where each value is in [0, 255]. + :param factor: The factor by which to reduce the saturation. + 0 means completely desaturated, 1 means original color. + :return: A tuple of desaturated (r, g, b) values in [0, 255]. + """ + r, g, b = [x / 255.0 for x in rgb] + h, l, s = rgb_to_hls(r, g, b) + l = factor + new_r, new_g, new_b = hls_to_rgb(h, l, s) + return (int(new_r * 255), int(new_g * 255), int(new_b * 255)) + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + sumc = (maxc+minc) + rangec = (maxc-minc) + l = sumc/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = rangec / sumc + else: + s = rangec / (2.0-sumc) + rc = (maxc-r) / rangec + gc = (maxc-g) / rangec + bc = (maxc-b) / rangec + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + +def process_markdown(output_str, colors): + output_str = output_str.replace("\n", "").replace(" ", " ").replace("", "")\ + .replace("<|im_end|>", '').replace("<|end|>", "") + output_str = output_str.split("ASSISTANT: ")[-1] + + # markdown_out = output_str.replace('[SEG]', '') + markdown_out = output_str + markdown_out = markdown_out.replace( + "

", "" + ) + markdown_out = markdown_out.replace("

", "") + + for color in colors: + markdown_out = markdown_out.replace("[COLOR]", str(desaturate(tuple(color))), 1) + + markdown_out = f""" + {markdown_out} + """ + markdown_out = markdown_default + "

" + markdown_out + return markdown_out + +def show_mask_pred(image, masks): + masks = [mask[:1] for mask in masks] + masks = np.concatenate(masks, axis=0) # (n, h, w) + + selected_colors = [] + + colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), + (255, 255, 0), (255, 0, 255), (0, 255, 255), + (128, 128, 255), [255, 192, 203], # Pink + [165, 42, 42], # Brown + [255, 165, 0], # Orange + [128, 0, 128], # Purple + [0, 0, 128], # Navy + [128, 0, 0], # Maroon + [128, 128, 0], # Olive + [70, 130, 180], # Steel Blue + [173, 216, 230], # Light Blue + [255, 192, 0], # Gold + [255, 165, 165], # Light Salmon + [255, 20, 147], # Deep Pink + ] + + _mask_image = np.zeros((masks.shape[1], masks.shape[2], 3), dtype=np.uint8) + + for i, mask in enumerate(masks): + color = colors[i % len(colors)] + selected_colors.append(color) + _mask_image[:, :, 0] = _mask_image[:, :, 0] + mask.astype(np.uint8) * color[0] + _mask_image[:, :, 1] = _mask_image[:, :, 1] + mask.astype(np.uint8) * color[1] + _mask_image[:, :, 2] = _mask_image[:, :, 2] + mask.astype(np.uint8) * color[2] + + + image = np.array(image) + image = image * 0.5 + _mask_image * 0.5 + image = image.astype(np.uint8) + return image, selected_colors + +def show_mask_pred_video(video, masks): + ret_video = [] + selected_colors = [] + colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), + (255, 255, 0), (255, 0, 255), (0, 255, 255), + (128, 128, 255), [255, 192, 203], # Pink + [165, 42, 42], # Brown + [255, 165, 0], # Orange + [128, 0, 128], # Purple + [0, 0, 128], # Navy + [128, 0, 0], # Maroon + [128, 128, 0], # Olive + [70, 130, 180], # Steel Blue + [173, 216, 230], # Light Blue + [255, 192, 0], # Gold + [255, 165, 165], # Light Salmon + [255, 20, 147], # Deep Pink + ] + for i_frame in range(len(video)): + frame_masks = [mask[i_frame:i_frame+1] for mask in masks] + frame_masks = np.concatenate(frame_masks, axis=0) + _mask_image = np.zeros((frame_masks.shape[1], frame_masks.shape[2], 3), dtype=np.uint8) + + for i, mask in enumerate(frame_masks): + if i_frame == 0: + color = colors[i % len(colors)] + selected_colors.append(color) + else: + color = selected_colors[i] + _mask_image[:, :, 0] = _mask_image[:, :, 0] + mask.astype(np.uint8) * color[0] + _mask_image[:, :, 1] = _mask_image[:, :, 1] + mask.astype(np.uint8) * color[1] + _mask_image[:, :, 2] = _mask_image[:, :, 2] + mask.astype(np.uint8) * color[2] + + image = np.array(video[i_frame]) + image = image * 0.5 + _mask_image * 0.5 + image = image.astype(np.uint8) + ret_video.append(image) + return ret_video, selected_colors + +def parse_visual_prompts(points): + ret = {'points': [], 'boxes': []} + for item in points: + if item[2] == 1.0: + ret['points'].append([item[0], item[1]]) + elif item[2] == 2.0 or item[2] == 3.0: + ret['boxes'].append([item[0], item[1], item[3], item[4]]) + else: + raise NotImplementedError + return ret + +def get_video_frames(video_path): + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + print("Error: Cannot open video file.") + return + + frames = [] + + frame_id = 0 + while True: + ret, frame = cap.read() + + if not ret: + break + + frames.append(frame) + + frame_id += 1 + + cap.release() + return frames + +def get_frames_from_video(video_path, n_frames=5, sample_type="uniform"): + frames = get_video_frames(video_path) + if sample_type == "uniform": + stride = len(frames) / (n_frames + 1e-4) + ret = [] + for i in range(n_frames): + idx = int(i * stride) + frame = frames[idx] + frame = frame[:, :, ::-1] + frame_image = Image.fromarray(frame).convert('RGB') + ret.append(frame_image) + else: + ret = [] + for frame in frames[:500]: + frame = frame[:, :, ::-1] + frame_image = Image.fromarray(frame).convert('RGB') + ret.append(frame_image) + return ret + +def preprocess_video(video_path, text): + if "Segment" in text or "segment" in text: + sample_type = 'begin' + else: + sample_type = 'uniform' + return get_frames_from_video(video_path, sample_type=sample_type) + +def image2video_and_save(frames, save_path): + success = frames_to_video(frames, save_path) + return save_path + + +def frames_to_video( + frames, + output_path: str, + fps: int = 24, +) -> bool: + try: + frames = [frame[:, :, ::-1] for frame in frames] + # Use provided frame size or get from first frame + height, width = frames[0].shape[:2] + + # Initialize video writer + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) + + # Process each frame + for frame in frames: + out.write(frame) + + # Release video writer + out.release() + print(f"Video saved successfully to {output_path}") + return True + + except Exception as e: + print(f"Error converting frames to video: {str(e)}") + return False \ No newline at end of file diff --git a/projects/llava_sam2/models/__init__.py b/projects/llava_sam2/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d428ea8f053e389304eb3b2b85e593de350f83b --- /dev/null +++ b/projects/llava_sam2/models/__init__.py @@ -0,0 +1,3 @@ +from .llava_sam2 import VideoLLaVASAMModel, VideoLLaVASAMModel_zero3 +from .sam2 import SAM2 +from .sam2_train import SAM2TrainRunner diff --git a/projects/llava_sam2/models/extension/__init__.py b/projects/llava_sam2/models/extension/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a5a69d58ba75749633d98e11a6792654958c9e0c --- /dev/null +++ b/projects/llava_sam2/models/extension/__init__.py @@ -0,0 +1 @@ +from .sam2_base import SAM2Base diff --git a/projects/llava_sam2/models/extension/sam2_base.py b/projects/llava_sam2/models/extension/sam2_base.py new file mode 100644 index 0000000000000000000000000000000000000000..4942bf42ea8c8a8bae0e4047a076b5c9272b4ddd --- /dev/null +++ b/projects/llava_sam2/models/extension/sam2_base.py @@ -0,0 +1,281 @@ +import torch +import torch.nn.functional as F + +from third_parts.sam2.modeling.sam2_base import SAM2Base as _SAM2Base +from third_parts.sam2.modeling.sam2_base import NO_OBJ_SCORE + + +class SAM2Base(_SAM2Base): + + def track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + # Whether to run the memory encoder on the predicted masks. Sometimes we might want + # to skip the memory encoder with `run_mem_encoder=False`. For example, + # in demo we might call `track_step` multiple times for each user click, + # and only encode the memory when the user finalizes their clicks. And in ablation + # settings like SAM training on static images, we don't need the memory encoder. + run_mem_encoder=True, + # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). + prev_sam_mask_logits=None, + ## Extension: LLM prompt + language_embd=None, + ): + current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} + # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW + if len(current_vision_feats) > 1: + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1]) + ] + else: + high_res_features = None + if mask_inputs is not None and self.use_mask_input_as_output_without_sam: + # When use_mask_input_as_output_without_sam=True, we directly output the mask input + # (see it as a GT mask) without using a SAM prompt encoder + mask decoder. + pix_feat = current_vision_feats[-1].permute(1, 2, 0) + pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) + sam_outputs = self._use_mask_as_output( + pix_feat, high_res_features, mask_inputs + ) + else: + # fused the visual feature with previous memory features in the memory bank + pix_feat_with_mem = self._prepare_memory_conditioned_features( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats[-1:], + current_vision_pos_embeds=current_vision_pos_embeds[-1:], + feat_sizes=feat_sizes[-1:], + output_dict=output_dict, + num_frames=num_frames, + track_in_reverse=track_in_reverse, + ) + # apply SAM-style segmentation head + # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, + # e.g. in demo where such logits come from earlier interaction instead of correction sampling + # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) + if prev_sam_mask_logits is not None: + assert point_inputs is not None and mask_inputs is None + mask_inputs = prev_sam_mask_logits + multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) + sam_outputs = self._forward_sam_heads( + backbone_features=pix_feat_with_mem, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + high_res_features=high_res_features, + multimask_output=multimask_output, + # Inject language Embed if possible + language_embd=language_embd, + ) + ( + _, + _, + _, + low_res_masks, + high_res_masks, + obj_ptr, + _, + ) = sam_outputs + + current_out["pred_masks"] = low_res_masks + current_out["pred_masks_high_res"] = high_res_masks + current_out["obj_ptr"] = obj_ptr + + # Finally run the memory encoder on the predicted mask to encode + # it into a new memory feature (that can be used in future frames) + if run_mem_encoder and self.num_maskmem > 0: + high_res_masks_for_mem_enc = high_res_masks + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks_for_mem_enc, + is_mask_from_pts=(point_inputs is not None), + ) + current_out["maskmem_features"] = maskmem_features + current_out["maskmem_pos_enc"] = maskmem_pos_enc + else: + current_out["maskmem_features"] = None + current_out["maskmem_pos_enc"] = None + + return current_out + + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + ## Extension: LLM prompt + language_embd=None, + ): + """ + Forward SAM prompt encoders and mask heads. + + Inputs: + - backbone_features: image features of [B, C, H, W] shape + - point_inputs: a dictionary with "point_coords" and "point_labels", where + 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the + absolute pixel-unit coordinate in (x, y) format of the P input points + 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means + positive clicks, 0 means negative clicks, and -1 means padding + - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the + same spatial size as the image. + - high_res_features: either 1) None or 2) or a list of length 2 containing + two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively, + which will be used as high-resolution feature maps for SAM decoder. + - multimask_output: if it's True, we output 3 candidate masks and their 3 + corresponding IoU estimates, and if it's False, we output only 1 mask and + its corresponding IoU estimate. + + Outputs: + - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if + `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM + output mask logits (before sigmoid) for the low-resolution masks, with 4x + the resolution (1/4 stride) of the input backbone_features. + - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3 + if `multimask_output=True` and M = 1 if `multimask_output=False`), + upsampled from the low-resolution masks, with shape size as the image + (stride is 1 pixel). + - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1 + if `multimask_output=False`), the estimated IoU of each output mask. + - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `low_res_multimasks`. + - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `high_res_multimasks`. + - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted + based on the output token from the SAM mask decoder. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + + ## Extension: LLM prompt + if language_embd is not None: + # B N C + assert sparse_embeddings.size(0) == language_embd.size(0) + assert sparse_embeddings.size(2) == language_embd.size(2) + sparse_embeddings = torch.cat([sparse_embeddings, language_embd], dim=1) + + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=self.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + if self.pred_obj_scores: + is_obj_appearing = object_score_logits > 0 + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + # print('Do torch.where !!!') + # low_res_multimasks = torch.where( + # is_obj_appearing[:, None, None], + # low_res_multimasks, + # NO_OBJ_SCORE, + # ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks, + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + + sam_output_token = sam_output_tokens[:, 0] + if multimask_output: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: + # Allow *soft* no obj ptr, unlike for masks + if self.soft_no_obj_ptr: + # Only hard possible with gt + assert not self.teacher_force_obj_scores_for_mem + lambda_is_obj_appearing = object_score_logits.sigmoid() + else: + lambda_is_obj_appearing = is_obj_appearing.float() + + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) diff --git a/projects/llava_sam2/models/internvl.py b/projects/llava_sam2/models/internvl.py new file mode 100644 index 0000000000000000000000000000000000000000..d54f0b24d0917f0c3d697f90d2a5b8644879f18c --- /dev/null +++ b/projects/llava_sam2/models/internvl.py @@ -0,0 +1,548 @@ +import torch +from xtuner.model import InternVL_V1_5 +from typing import List, Optional, Tuple, Union +from transformers.modeling_outputs import CausalLMOutputWithPast + +from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM, + LlamaTokenizer) +import torch.nn as nn + +from mmengine import print_log +from torch.nn import CrossEntropyLoss +from transformers import (AutoConfig, AutoModel, AutoTokenizer, + BitsAndBytesConfig) +from xtuner.model.utils import (find_all_linear_names, get_peft_model_state_dict, + guess_load_checkpoint, make_inputs_require_grad) +import os + +def get_rank_and_world_size(): + rank = int(os.environ.get('RANK', 0)) + world_size = int(os.environ.get('WORLD_SIZE', 1)) + return rank, world_size + +# This function is used to split large model +def split_model(model_name): + import math + device_map = {} + num_gpus = torch.cuda.device_count() + rank, world_size = get_rank_and_world_size() + num_gpus = num_gpus // world_size + + num_layers = {'InternVL2-8B': 32, 'InternVL2-26B': 48, + 'InternVL2-40B': 60, 'InternVL2-Llama3-76B': 80}[model_name] + # Since the first GPU will be used for ViT, treat it as 0.8 GPU. + num_layers_per_gpu = math.ceil(num_layers / (num_gpus - 0.2)) + num_layers_per_gpu = [num_layers_per_gpu] * num_gpus + num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.8) + layer_cnt = 0 + for i, num_layer in enumerate(num_layers_per_gpu): + for j in range(num_layer): + device_map[f'language_model.model.layers.{layer_cnt}'] = rank + world_size * i + layer_cnt += 1 + device_map['vision_model'] = rank + device_map['mlp1'] = rank + device_map['language_model.model.tok_embeddings'] = rank + device_map['language_model.model.embed_tokens'] = rank + device_map['language_model.output'] = rank + device_map['language_model.model.norm'] = rank + device_map['language_model.lm_head'] = rank + device_map[f'language_model.model.layers.{num_layers - 1}'] = rank + return device_map + +class InternVL_Slowfast(InternVL_V1_5): + + def __init__(self, + model_path, + freeze_llm=False, + freeze_visual_encoder=False, + llm_lora=None, + visual_encoder_lora=None, + quantization_vit=False, + quantization_llm=False, + pretrained_pth=None, + special_tokens=None, + model_split=False, + ): + print_log('Start to load InternVL_V1_5 model.', logger='current') + super(InternVL_V1_5, self).__init__() + self.freeze_llm = freeze_llm + self.freeze_visual_encoder = freeze_visual_encoder + self.use_llm_lora = llm_lora is not None + self.use_visual_encoder_lora = visual_encoder_lora is not None + self.quantization_vit = quantization_vit + self.quantization_llm = quantization_llm + if quantization_vit: + assert visual_encoder_lora is not None + if quantization_llm: + assert quantization_llm and llm_lora is not None + + config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) + if config.llm_config.model_type == 'internlm2': + config.llm_config.attn_implementation = 'flash_attention_2' + else: + config.llm_config._attn_implementation = 'flash_attention_2' + + if quantization_vit is False and quantization_llm is False: + quantization = None + else: + llm_int8_skip_modules = ['mlp1'] + if quantization_llm and not quantization_vit: + llm_int8_skip_modules.append('vision_model') + + if quantization_vit and not quantization_llm: + llm_int8_skip_modules.append('language_model') + + quantization_config = dict( + type=BitsAndBytesConfig, + llm_int8_skip_modules=llm_int8_skip_modules, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4') + quantization_clazz = quantization_config.pop('type') + quantization = quantization_clazz(**quantization_config) + + if model_split: + # print("\n\nDone Model Split !!!!!!!!!!!\n\n") + device_map = split_model("InternVL2-26B") + # print(device_map) + self.device = 'cuda' + self.model = AutoModel.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + trust_remote_code=True, + device_map=device_map).eval() + + else: + self.model = AutoModel.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + quantization_config=quantization, + config=config, + trust_remote_code=True) + + tokenizer = AutoTokenizer.from_pretrained( + model_path, trust_remote_code=True) + self.tokenizer = tokenizer + + if special_tokens is not None: + self._add_special_tokens(special_tokens) + + img_context_token_id = tokenizer.convert_tokens_to_ids('') + self.model.img_context_token_id = img_context_token_id + + if self.freeze_llm: + self.model.language_model.requires_grad_(False) + if self.freeze_visual_encoder: + self.model.vision_model.requires_grad_(False) + + if hasattr(self.model.language_model, 'enable_input_require_grads'): + self.model.language_model.enable_input_require_grads() + else: + self.model.language_model.get_input_embeddings( + ).register_forward_hook(make_inputs_require_grad) + + self.gradient_checkpointing_enable() + + if self.use_llm_lora: + self._prepare_llm_for_lora(llm_lora) + + if self.use_visual_encoder_lora: + self._prepare_visual_encoder_for_lora(visual_encoder_lora) + + if pretrained_pth is not None: + pretrained_state_dict = guess_load_checkpoint(pretrained_pth) + + self.load_state_dict(pretrained_state_dict, strict=False) + print(f'Load pretrained weight from {pretrained_pth}') + + self._count = 0 + print_log(self, logger='current') + print_log('InternVL_V1_5 construction is complete', logger='current') + + self.transfer_to_hf = False + + def _add_special_tokens(self, special_tokens): + num_new_tokens = self.tokenizer.add_tokens( + special_tokens, special_tokens=True) + + if num_new_tokens > 0: + self.model.language_model.resize_token_embeddings(len(self.tokenizer)) + + def _post_init(self, fast_pool_size=4, fast_pool=True): + if fast_pool: + self.fast_pool = nn.AdaptiveAvgPool2d((fast_pool_size, fast_pool_size)) + return + + def forward(self, data, data_samples=None, mode='loss', fast_token_idx=None): + if 'fast_pixel_values' in data.keys(): + assert fast_token_idx is not None + fast_pixel_values = data['fast_pixel_values'] + if type(fast_pixel_values) is list or fast_pixel_values.ndim == 5: + if type(fast_pixel_values) is list: + fast_pixel_values = [ + x.unsqueeze(0) if x.ndim == 3 else x for x in fast_pixel_values + ] + # b*n, c, h, w + fast_concat_images = torch.cat( + [image.to(self.model.vision_model.dtype) for image in fast_pixel_values], dim=0) + else: + raise NotImplementedError() + else: + fast_pixel_values = None + fast_concat_images = None + + pixel_values = data['pixel_values'] + + if type(pixel_values) is list or pixel_values.ndim == 5: + if type(pixel_values) is list: + pixel_values = [ + x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values + ] + # b*n, c, h, w + concat_images = torch.cat( + [image.to(self.model.vision_model.dtype) for image in pixel_values], dim=0) + else: + raise NotImplementedError() + + input_ids = data['input_ids'] + position_ids = data['position_ids'] + attention_mask = data['attention_mask'] + # sum is 0 are text + image_flags = torch.sum(concat_images, dim=(1, 2, 3)) != 0 + image_flags = image_flags.long() + + labels = data['labels'] + use_cache = False + + if 'vp_overall_mask' not in data.keys(): + vp_overall_mask = None + else: + vp_overall_mask = data['vp_overall_mask'] + + if 'prompt_masks' in data.keys(): + prompt_masks = data['prompt_masks'] + else: + prompt_masks = None + + outputs = self._llm_forward( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + image_flags=image_flags, + pixel_values=concat_images, + labels=labels, + use_cache=use_cache, + output_hidden_states=True, + fast_pixel_values=fast_concat_images, + fast_token_idx=fast_token_idx, + vp_overall_mask=vp_overall_mask, + prompt_masks=prompt_masks, + ) + + return outputs + + def _llm_forward( + self, + pixel_values: torch.FloatTensor, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + image_flags: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + fast_pixel_values=None, + fast_token_idx=None, + vp_overall_mask=None, + prompt_masks=None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + return_dict = return_dict if return_dict is not None \ + else self.model.config.use_return_dict + + image_flags = image_flags.squeeze(-1) + # We only added the clone code here to avoid the error. + input_embeds = self.model.language_model.get_input_embeddings()( + input_ids).clone() + + if fast_pixel_values is not None: + n_fast_images = fast_pixel_values.shape[0] + whole_pixel_values = torch.cat([fast_pixel_values, pixel_values], dim=0) + vit_embeds = self.model.extract_feature(whole_pixel_values) + vit_embeds = vit_embeds.to(input_embeds.dtype) # FIXME: why vit_embeds is float16? + fast_vit_embeds = vit_embeds[:n_fast_images] # (n_fast_images, hw, c) + _size = int(fast_vit_embeds.shape[1] ** 0.5) + fast_vit_embeds = fast_vit_embeds.reshape(fast_vit_embeds.shape[0], _size, _size, fast_vit_embeds.shape[-1]) + # pooling + fast_vit_embeds = fast_vit_embeds.permute(0, 3, 1, 2) # (n_fast_images, c, h, w) + fast_vit_embeds = self.fast_pool(fast_vit_embeds).flatten(2) # (n_fast_images, c, hw) + fast_vit_embeds = fast_vit_embeds.permute(0, 2, 1) + vit_embeds = vit_embeds[n_fast_images:] + else: + vit_embeds = self.model.extract_feature(pixel_values) + vit_embeds = vit_embeds.to(input_embeds.dtype) # FIXME: why vit_embeds is float16? + fast_vit_embeds = None + + vit_embeds = vit_embeds[image_flags == 1] + vit_batch_size = pixel_values.shape[0] + + B, N, C = input_embeds.shape + input_embeds = input_embeds.reshape(B * N, C) + + self._count += 1 + + if vp_overall_mask is not None and prompt_masks is not None: + vp_embeds = [] + vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool() + prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks] + + vp_overall_mask = vp_overall_mask[image_flags == 1] + overall_tile_vit_embeds = vit_embeds[vp_overall_mask] # (n_img, hw, c) + + i_vp_img = 0 + for i_img in range(len(vit_embeds)): + vp_embeds.append(vit_embeds[i_img].reshape(-1, C)) + if vp_overall_mask[i_img]: + tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) # (hw, C) + objects_prompt_masks = prompt_masks[i_vp_img] + n_obj = len(objects_prompt_masks) + tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1) + objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1) + vp_embeds.append(tile_vit_embeds[objects_prompt_masks]) + i_vp_img += 1 + vp_embeds = torch.cat(vp_embeds, dim=0) + else: + vp_embeds = None + + input_ids = input_ids.reshape(B * N) + selected = (input_ids == self.model.img_context_token_id) + + if vp_embeds is None: + try: + input_embeds[selected] = vit_embeds.reshape(-1, C) + except Exception as e: + vit_embeds = vit_embeds.reshape(-1, C) + print(f'warning: {e}, input_embeds[selected].shape=' + f'{input_embeds[selected].shape}, ' + f'vit_embeds.shape={vit_embeds.shape}') + n_token = selected.sum() + if n_token > len(vit_embeds): + print(f"Wrong !!! {n_token} image tokens in text but only {len(vit_embeds)} vit embeds !!!") + expand_ratio = n_token // len(vit_embeds) + 1 + vit_embeds = torch.cat([vit_embeds] * expand_ratio, dim=0) + + input_embeds[selected] = vit_embeds[:n_token] + else: + try: + input_embeds[selected] = vp_embeds.reshape(-1, C) + except Exception as e: + vp_embeds = vp_embeds.reshape(-1, C) + print(f'warning: {e}, input_embeds[selected].shape=' + f'{input_embeds[selected].shape}, ' + f'vp_embeds.shape={vp_embeds.shape}') + n_token = selected.sum() + if n_token > len(vp_embeds): + print(f"Wrong !!! {n_token} image tokens in text but only {len(vp_embeds)} vit embeds !!!") + expand_ratio = n_token // len(vp_embeds) + 1 + vp_embeds = torch.cat([vp_embeds] * expand_ratio, dim=0) + + input_embeds[selected] = vp_embeds[:n_token] + + if fast_vit_embeds is not None: + selected = (input_ids == fast_token_idx) + selected_tot = selected.sum().item() + if selected_tot > fast_vit_embeds.shape[0] * fast_vit_embeds.shape[1]: + assert selected_tot % (fast_vit_embeds.shape[0] * fast_vit_embeds.shape[1]) == 0 + repeat_times = selected_tot / (fast_vit_embeds.shape[0] * fast_vit_embeds.shape[1]) + fast_vit_embeds = fast_vit_embeds.repeat(int(repeat_times), 1, 1) + try: + input_embeds[selected] = fast_vit_embeds.reshape(-1, C) + except Exception as e: + fast_vit_embeds = fast_vit_embeds.reshape(-1, C) + print(f'warning: {e}, input_embeds[fast_selected].shape=' + f'{input_embeds[selected].shape}, ' + f'fast_vit_embeds.shape={fast_vit_embeds.shape}') + n_token = selected.sum() + input_embeds[selected] = fast_vit_embeds[:n_token] + + input_embeds = input_embeds.reshape(B, N, C) + + outputs = self.model.language_model( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + logits = outputs.logits + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view( + -1, self.model.language_model.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits, ) + outputs[1:] + return (loss, ) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + @torch.no_grad() + def generate( + self, + pixel_values: Optional[torch.FloatTensor] = None, + input_ids: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + visual_features: Optional[torch.FloatTensor] = None, + generation_config: Optional[GenerationConfig] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + fast_token_idx=None, + fast_pixel_values=None, + prompt_masks=None, + vp_overall_mask=None, + **generate_kwargs, + ) -> torch.LongTensor: + device = self.model.device + assert self.model.img_context_token_id is not None + + if fast_pixel_values is not None: + assert fast_token_idx is not None + if type(fast_pixel_values) is list or fast_pixel_values.ndim == 5: + if type(fast_pixel_values) is list: + fast_pixel_values = [ + x.unsqueeze(0) if x.ndim == 3 else x for x in fast_pixel_values + ] + # b*n, c, h, w + fast_pixel_values = torch.cat( + [image.to(self.model.vision_model.dtype) for image in fast_pixel_values], dim=0) + + if pixel_values is not None: + if visual_features is not None: + vit_embeds = visual_features + else: + if type(pixel_values) is list or pixel_values.ndim == 5: + if type(pixel_values) is list: + pixel_values = [ + x.unsqueeze(0) if x.ndim == 3 else x for x in pixel_values + ] + # b*n, c, h, w + pixel_values = torch.cat( + [image.to(self.model.vision_model.dtype) for image in pixel_values], dim=0) + + if fast_pixel_values is not None: + n_fast_images = fast_pixel_values.shape[0] + whole_pixel_values = torch.cat([fast_pixel_values, pixel_values], dim=0) + vit_embeds = self.model.extract_feature(whole_pixel_values.to(device)) + # vit_embeds = vit_embeds.to(input_embeds.dtype) # FIXME: why vit_embeds is float16? + fast_vit_embeds = vit_embeds[:n_fast_images] # (n_fast_images, hw, c) + _size = int(fast_vit_embeds.shape[1] ** 0.5) + fast_vit_embeds = fast_vit_embeds.reshape(fast_vit_embeds.shape[0], _size, _size, + fast_vit_embeds.shape[-1]) + # pooling + fast_vit_embeds = fast_vit_embeds.permute(0, 3, 1, 2) # (n_fast_images, c, h, w) + fast_vit_embeds = self.fast_pool(fast_vit_embeds).flatten(2) # (n_fast_images, c, hw) + fast_vit_embeds = fast_vit_embeds.permute(0, 2, 1) + vit_embeds = vit_embeds[n_fast_images:] + else: + fast_vit_embeds = None + vit_embeds = self.model.extract_feature(pixel_values.to(device)) + image_flags = torch.sum(pixel_values, dim=(1, 2, 3)) != 0 + image_flags = image_flags.long() + vit_embeds = vit_embeds[image_flags == 1] + + input_embeds = self.model.language_model.get_input_embeddings()(input_ids.to(device)) + B, N, C = input_embeds.shape + input_embeds = input_embeds.reshape(B * N, C) + + if vp_overall_mask is not None and prompt_masks is not None: + vp_embeds = [] + vp_overall_mask = vp_overall_mask.to(vit_embeds.device).bool() + prompt_masks = [item.to(vit_embeds.device).bool() for item in prompt_masks] + + vp_overall_mask = vp_overall_mask[image_flags == 1] + overall_tile_vit_embeds = vit_embeds[vp_overall_mask] # (n_img, hw, c) + + i_vp_img = 0 + for i_img in range(len(vit_embeds)): + vp_embeds.append(vit_embeds[i_img].reshape(-1, C)) + if vp_overall_mask[i_img]: + tile_vit_embeds = overall_tile_vit_embeds[i_vp_img].reshape(-1, C) # (hw, C) + objects_prompt_masks = prompt_masks[i_vp_img] + n_obj = len(objects_prompt_masks) + tile_vit_embeds = tile_vit_embeds.unsqueeze(0).repeat(n_obj, 1, 1) + objects_prompt_masks = objects_prompt_masks.reshape(n_obj, -1) + vp_embeds.append(tile_vit_embeds[objects_prompt_masks]) + i_vp_img += 1 + vp_embeds = torch.cat(vp_embeds, dim=0) + else: + vp_embeds = None + + input_ids = input_ids.reshape(B * N) + selected = (input_ids == self.model.img_context_token_id) + assert selected.sum() != 0 + if vp_embeds is None: + input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device) + else: + if len(input_embeds[selected]) != len(vp_embeds.reshape(-1, C)): + print("Shape mismatch, selected is {}, vp embeds is {} !!!"\ + .format(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C)))) + min_tokens = min(len(input_embeds[selected]), len(vp_embeds.reshape(-1, C))) + input_embeds[selected][:min_tokens] = vp_embeds.reshape(-1, C)[:min_tokens].to(input_embeds.device) + else: + input_embeds[selected] = vp_embeds.reshape(-1, C).to(input_embeds.device) + + if fast_vit_embeds is not None: + selected = (input_ids == fast_token_idx) + # FIXME, add repeat. + assert selected.sum() != 0 + input_embeds[selected] = fast_vit_embeds.reshape(-1, C).to(input_embeds.device) + + input_embeds = input_embeds.reshape(B, N, C) + else: + input_embeds = self.model.language_model.get_input_embeddings()(input_ids) + + outputs = self.model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask.to(device), + generation_config=generation_config, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + use_cache=True, + **generate_kwargs, + ) + + return outputs + + def state_dict(self, *args, **kwargs): + if self.transfer_to_hf: + state_dict = super(InternVL_V1_5, self).state_dict(*args, **kwargs) + return state_dict + else: + return super().state_dict(*args, **kwargs) + + diff --git a/projects/llava_sam2/models/lisa.py b/projects/llava_sam2/models/lisa.py new file mode 100644 index 0000000000000000000000000000000000000000..df965afb84ee9e428886e4e1732bf2760975eaa0 --- /dev/null +++ b/projects/llava_sam2/models/lisa.py @@ -0,0 +1,242 @@ + +import torch +import torch.nn as nn + +from mmengine.model import BaseModel + +from xtuner.registry import BUILDER +from xtuner.model.utils import get_peft_model_state_dict + + +class LisaModel(BaseModel): + def __init__(self, + mllm, + tokenizer, + grounding_encoder, + loss_mask=None, + loss_dice=None,): + super(LisaModel, self).__init__() + self.mllm = BUILDER.build(mllm) + + if self.mllm.use_llm_lora: + self.mllm.model.language_model.base_model.model.lm_head.requires_grad_(True) + self.mllm.model.language_model.base_model.model.model.embed_tokens.requires_grad_(True) + + self.tokenizer = BUILDER.build(tokenizer) + self._add_special_tokens() + self.grounding_encoder = BUILDER.build(grounding_encoder) + self.grounding_encoder.requires_grad_(False) + self.grounding_encoder.mask_decoder.requires_grad_(True) + + in_dim = self.mllm.model.config.llm_config.hidden_size + out_dim = self.grounding_encoder.mask_decoder.transformer_dim + self.text_hidden_fcs = nn.Sequential( + nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True), + nn.Linear(in_dim, out_dim), nn.Dropout(0.0) + ) + + self.loss_mask = BUILDER.build(loss_mask) + self.loss_dice = BUILDER.build(loss_dice) + + def _add_special_tokens(self): + special_tokens = ['[SEG]'] + num_new_tokens = self.tokenizer.add_tokens( + special_tokens, special_tokens=True) + if num_new_tokens > 0: + self.mllm.model.language_model.resize_token_embeddings(len(self.tokenizer)) + + self.seg_token_idx = self.tokenizer("[SEG]", add_special_tokens=False).input_ids[0] + + def _generate_and_postprocess_masks(self, pred_embeddings, image_embeddings, resize_list=None, orig_size_list=None): + pred_masks = [] + for i, pred_embedding in enumerate(pred_embeddings): + sparse_embeddings, dense_embeddings = self.grounding_encoder.prompt_encoder( + points=None, boxes=None, masks=None, text_embeds=pred_embedding.unsqueeze(1) + ) + sparse_embeddings = sparse_embeddings.to(pred_embedding.dtype) + low_res_masks, _ = self.grounding_encoder.mask_decoder( + image_embeddings=image_embeddings[i].unsqueeze(0), + image_pe=self.grounding_encoder.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, + multimask_output=False, ) + + pred_mask = self.grounding_encoder.postprocess_masks( + low_res_masks, input_size=resize_list[i], original_size=orig_size_list[i], ) + pred_masks.append(pred_mask[:, 0]) + return pred_masks + + def load_state_dict(self, state_dict, strict: bool = True, assign: bool = False): + return super().load_state_dict(state_dict, strict, assign) + + def state_dict(self, *args, **kwargs): + state_dict = super().state_dict(*args, **kwargs) + from collections import OrderedDict + + to_return = OrderedDict() + # Step 1. visual_encoder + if self.mllm.use_visual_encoder_lora: + to_return.update( + get_peft_model_state_dict( + self.mllm.model.vision_model, state_dict=state_dict)) + elif not self.mllm.freeze_visual_encoder: + to_return.update({ + k: v + for k, v in state_dict.items() if 'visual_encoder.' in k + }) + # Step 2. LLM + if self.mllm.use_llm_lora: + to_return.update( + get_peft_model_state_dict(self.mllm.model.language_model, state_dict=state_dict)) + elif not self.mllm.freeze_llm: + to_return.update( + {k: v + for k, v in state_dict.items() if 'llm.' in k}) + # Step 3. Projector + to_return.update( + {k: v + for k, v in state_dict.items() if 'mlp1.' in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'grounding_encoder.mask_decoder.' in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'text_hidden_fcs.' in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'lm_head.weight' in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'embed_tokens.weight' in k}) + return to_return + + def forward(self, data, data_samples=None, mode='loss'): + if mode == 'loss': + return self.compute_loss(data) + elif mode == 'predict': + return self.predict(data) + elif mode == 'tensor': + return self._forward(data) + else: + raise NotImplementedError + + def compute_loss(self,data, data_samples=None, mode='loss'): + g_pixel_values = data.pop('g_pixel_values', None) + gt_masks = data.pop('masks', None) + input_ids = data['input_ids'] + output = self.mllm(data, data_samples, mode) + if gt_masks is None: + g_pixel_values = [ + torch.randn(3, 512, 1024).to(output.hidden_states[-1]) + for _ in range(len(input_ids))] + ori_size_list = [(512, 1024) for _ in range(len(input_ids))] + seg_token_mask = torch.zeros_like(input_ids).bool() + seg_token_mask[:, -2] = True + else: + ori_size_list = [mask.shape[-2:] for mask in gt_masks] + seg_token_mask = input_ids == self.seg_token_idx + + resize_list = [pixel.shape[-2:] for pixel in g_pixel_values] + g_pixel_values = torch.stack([ + self.grounding_encoder.preprocess(pixel) for pixel in g_pixel_values + ]) + image_embeddings = self.grounding_encoder.image_encoder(g_pixel_values) + + seg_token_mask = seg_token_mask[:, 1:] + seg_token_mask = torch.cat([ + seg_token_mask, + seg_token_mask.new_zeros(seg_token_mask.shape[0], 1)], dim=-1) + + hidden_states = output.hidden_states + hidden_states = self.text_hidden_fcs(hidden_states[-1]) + pred_embeddings = hidden_states[seg_token_mask] + + seg_token_counts = seg_token_mask.int().sum(-1) + pred_embeddings_list = torch.split(pred_embeddings, seg_token_counts.tolist(), dim=0) + + pred_masks = self._generate_and_postprocess_masks( + pred_embeddings_list, image_embeddings, resize_list, ori_size_list) + + if gt_masks is None: + return { + 'loss_mask': pred_masks[0].sum() * 0.0, + 'loss_dice': pred_masks[0].sum() * 0.0, + 'llm_loss': output.loss, + } + bs = len(pred_masks) + loss_mask, loss_dice = 0, 0 + for i in range(bs): + pred_mask = pred_masks[i] + gt_mask = gt_masks[i] + + sam_loss_mask = self.loss_mask(pred_mask, gt_mask) + sam_loss_dice = self.loss_dice(pred_mask, gt_mask) + accuracy = torch.eq((pred_mask.sigmoid() > 0.5), gt_mask).to(pred_mask).mean() + loss_mask += sam_loss_mask + loss_dice += sam_loss_dice + + loss_dict = { + 'loss_mask': loss_mask / bs, + 'loss_dice': loss_dice / bs, + 'llm_loss': output.loss, + } + return loss_dict + + def predict(self, data): + generation_config = dict(max_new_tokens=1024, do_sample=False) + eos_token_id = self.tokenizer.convert_tokens_to_ids('<|end|>') + generation_config['eos_token_id'] = eos_token_id + pixel_values = data.pop('pixel_values') + attention_mask = data.pop('attention_mask', None) + input_ids = data['input_ids'] + generate_output = self.mllm.generate( + pixel_values=pixel_values, + input_ids=input_ids, + attention_mask=attention_mask, + output_hidden_states=True, + return_dict_in_generate=True, + **generation_config, + ) + device = self.mllm.model.device + + hidden_states = generate_output.hidden_states + last_hidden_states = [item[-1] for item in hidden_states[1:]] # remove input_ids + last_hidden_states = torch.cat(last_hidden_states, dim=1) + last_hidden_states = last_hidden_states[0] # remove batch dim + output_ids = generate_output.sequences[0][:-1] # remove batch dim and eos token + output_text = self.tokenizer.decode(output_ids) + seg_mask = output_ids == self.seg_token_idx + if seg_mask.sum() == 0: + return dict( + pred_mask_logits=None, + output_text=output_text, + ) + seg_embeds = self.text_hidden_fcs(last_hidden_states[seg_mask]) + + g_pixel_values = data.pop('g_pixel_values', None) + gt_masks = data['masks'] + + ori_size_list = [mask.shape[-2:] for mask in gt_masks] + resize_list = [pixel.shape[-2:] for pixel in g_pixel_values] + g_pixel_values = torch.stack([ + self.grounding_encoder.preprocess(pixel.to(device)) for pixel in g_pixel_values + ]) + image_embeddings = self.grounding_encoder.image_encoder(g_pixel_values) + pred_masks = self._generate_and_postprocess_masks( + [seg_embeds], image_embeddings, resize_list, ori_size_list) + + return dict( + pred_mask_logits=pred_masks[0], # remove batch dim + output_text=output_text, + ) + + def gradient_checkpointing_enable(self): + self.activation_checkpointing_enable() + + def activation_checkpointing_enable(self): + self.mllm.model.language_model.gradient_checkpointing_enable() + + def gradient_checkpointing_disable(self): + self.activation_checkpointing_disable() + + def activation_checkpointing_disable(self): + self.mllm.model.language_model.gradient_checkpointing_disable() diff --git a/projects/llava_sam2/models/llava_sam2.py b/projects/llava_sam2/models/llava_sam2.py new file mode 100644 index 0000000000000000000000000000000000000000..2e27a32b837a236703dc90a647768b4658a62f6d --- /dev/null +++ b/projects/llava_sam2/models/llava_sam2.py @@ -0,0 +1,903 @@ +from typing import Literal + +import torch +import torch.nn as nn +import torch.nn.functional as F +from third_parts.mmdet.models.losses import CrossEntropyLoss + +from xtuner.registry import BUILDER +from xtuner.model.utils import get_peft_model_state_dict + +from .lisa import LisaModel + +from xtuner.utils import PROMPT_TEMPLATE +from xtuner.tools.utils import get_stop_criteria +from transformers import GenerationConfig +from projects.llava_sam2.models.preprocess.image_resize import DirectResize + +import numpy as np + +from .internvl import InternVL_Slowfast +from .utils import dynamic_preprocess + +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode + +from pycocotools import mask as _mask + +from types import MethodType + +from xtuner.model.utils import guess_load_checkpoint + +from mmcv.ops import point_sample +from third_parts.mmdet.models.utils import get_uncertain_point_coords_with_randomness + +class VideoLLaVASAMModel(LisaModel): + def __init__(self, + mllm, + tokenizer, + grounding_encoder, + loss_mask=None, + loss_dice=None, + torch_dtype=torch.bfloat16, + pretrained_pth=None, + frozen_sam2_decoder=True, + special_tokens=None, + loss_sample_points=False, + num_points=12544, + # for slow fast arch + fast_pool=False, + fast_pool_size=4, + use_fast_supervision=False, + # for inference + phi3=True, + template=None, + # for arch selection + arch_type:Literal['intern_vl', 'qwen', 'llava']='intern_vl', + # for inference large model + split_model=False, + # ext + preprocessor=None, + # bs + bs:int=0, + ): + super(LisaModel, self).__init__() + self.split_model = split_model + if split_model: + mllm.model_split = split_model + if special_tokens is None: + special_tokens = ['[SEG]'] + self.special_tokens = special_tokens + if 'special_tokens' not in mllm.keys(): + mllm.special_tokens = special_tokens + self.mllm = BUILDER.build(mllm) + self.arch_type = arch_type + + self.fast_pool = fast_pool + self.fast_pool_size = fast_pool_size + if hasattr(self.mllm, '_post_init'): + self.mllm._post_init( + fast_pool_size=self.fast_pool_size, + fast_pool=self.fast_pool + ) + else: + print("No _post_init() in mllm !!!") + + self.tokenizer = BUILDER.build(tokenizer) + self._add_special_tokens() + self.grounding_encoder = BUILDER.build(grounding_encoder) + self.grounding_encoder.requires_grad_(False) + if not frozen_sam2_decoder: + self.grounding_encoder.sam2_model.sam_mask_decoder.requires_grad_(True) + + if self.mllm.use_llm_lora: + if self.arch_type == 'intern_vl': + self.mllm.model.language_model.base_model.model.get_input_embeddings().requires_grad_(True) + self.mllm.model.language_model.base_model.model.get_output_embeddings().requires_grad_(True) + elif self.arch_type == 'qwen': + self.mllm.model.model.base_model.model.get_input_embeddings().requires_grad_(True) + self.mllm.model.get_output_embeddings().weight.requires_grad_(True) + elif self.arch_type == 'llava': + self.mllm.model.language_model.base_model.model.get_input_embeddings().requires_grad_(True) + self.mllm.model.language_model.base_model.model.get_output_embeddings().requires_grad_(True) + # self.mllm.model.language_model.base_model.model.lm_head.requires_grad_(True) + # self.mllm.model.language_model.base_model.model.model.embed_tokens.requires_grad_(True) + + if self.arch_type == 'intern_vl': + in_dim = self.mllm.model.config.llm_config.hidden_size + elif self.arch_type == 'qwen': + in_dim = self.mllm.model.config.hidden_size + elif self.arch_type == 'llava': + # for llava, the hidden size is in language model + in_dim = self.mllm.model.language_model.config.hidden_size + out_dim = self.grounding_encoder.hidden_dim + self.text_hidden_fcs = nn.Sequential( + nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True), + nn.Linear(in_dim, out_dim), nn.Dropout(0.0) + ) + + if use_fast_supervision: + self.text_exist_fcs = nn.Sequential( + nn.Linear(in_dim, in_dim), nn.ReLU(inplace=True), + nn.Linear(in_dim, 1), nn.Dropout(0.0) + ) + + self.loss_mask = BUILDER.build(loss_mask) + self.loss_dice = BUILDER.build(loss_dice) + if use_fast_supervision: + self.loss_exists = BUILDER.build(dict( + type=CrossEntropyLoss, + use_sigmoid=True, + reduction='mean', + loss_weight=1.0) + ) + + self.torch_dtype = torch_dtype + + if pretrained_pth is not None: + pretrained_state_dict = guess_load_checkpoint(pretrained_pth) + self.load_state_dict(pretrained_state_dict, strict=False) + print(f'Load pretrained weight from {pretrained_pth}') + + self.loss_sample_points = loss_sample_points + self.num_points = num_points + self.oversample_ratio = 3.0 + self.importance_sample_ratio = 0.75 + + if fast_pool: + self.fast_token_idx = self.tokenizer("", add_special_tokens=False).input_ids[0] + else: + self.fast_token_idx = None + self.use_fast_supervision = use_fast_supervision + + self.phi3 = phi3 + self.template = template + + if preprocessor is None: + self.preprocessor = preprocessor + else: + self.preprocessor = BUILDER.build(preprocessor) + + self.bs = bs + + def _merge_lora(self): + # print('pre merge lora: ', self.mllm.model.language_model.base_model.model.get_input_embeddings().weight.shape) + try: + self.mllm.model.language_model = self.mllm.model.language_model.merge_and_unload() + except: + print("Skip language model, no LoRA in it !!!") + try: + self.mllm.model.vision_model = self.mllm.model.vision_model.merge_and_unload() + except: + print("Skip vision encoder, no LoRA in it !!!") + # print('after merge lora: ', self.mllm.model.language_model.get_input_embeddings().weight.shape) + return + + def all_state_dict(self, *args, **kwargs): + state_dict = super(LisaModel, self).state_dict(*args, **kwargs) + return state_dict + + def activation_checkpointing_disable(self): + if self.arch_type == 'qwen': + self.mllm.model.model.gradient_checkpointing_disable() + else: + self.mllm.model.language_model.gradient_checkpointing_disable() + + + def _add_special_tokens(self): + special_tokens = self.special_tokens + _num_new_tokens = self.tokenizer.add_tokens(special_tokens, special_tokens=True) + + # if not isinstance(self.mllm.model.language_model.get_output_embeddings(), nn.Linear): + # print("Change the lm_head to nn.Linear !!!") + # transposed = False + # old_lm_head = self.mllm.model.language_model.get_output_embeddings() + # old_num_tokens, old_lm_head_dim = ( + # old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() + # ) + # new_lm_head_shape = (old_lm_head_dim, len(tokenizer)) if not transposed else ( + # len(tokenizer), old_lm_head_dim) + # has_new_lm_head_bias = old_lm_head.bias is not None + # new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device) + # new_lm_head.weight = old_lm_head.weight + # new_lm_head.bias = old_lm_head.bias + # self.mllm.model.language_model.set_output_embeddings(new_lm_head) + + # this is already done in mllm + # if num_new_tokens > 0: + # self.mllm.model.language_model.resize_token_embeddings(len(self.tokenizer)) + + # assert isinstance(self.mllm, InternVL_Slowfast) + self.seg_token_idx = self.tokenizer("[SEG]", add_special_tokens=False).input_ids[0] + + def state_dict(self, *args, **kwargs): + state_dict = super(LisaModel, self).state_dict(*args, **kwargs) + from collections import OrderedDict + + to_return = OrderedDict() + # Step 1. visual_encoder + if self.mllm.use_visual_encoder_lora: + to_return.update( + get_peft_model_state_dict( + self.mllm.model.vision_model, state_dict=state_dict)) + raise NotImplementedError + elif not self.mllm.freeze_visual_encoder: + to_return.update({ + k: v + for k, v in state_dict.items() if 'visual_encoder.' in k + }) + raise NotImplementedError + # Step 2. LLM + if self.mllm.use_llm_lora: + if self.arch_type == 'intern_vl': + to_return.update( + get_peft_model_state_dict(self.mllm.model.language_model, state_dict=state_dict) + ) + elif self.arch_type == 'qwen': + to_return.update( + get_peft_model_state_dict(self.mllm.model.model, state_dict=state_dict) + ) + elif self.arch_type == 'llava': + to_return.update( + get_peft_model_state_dict(self.mllm.model.language_model, state_dict=state_dict) + ) + elif not self.mllm.freeze_llm: + to_return.update( + {k: v + for k, v in state_dict.items() if 'llm.' in k}) + raise NotImplementedError + # Step 3. Projector + to_return.update( + {k: v + for k, v in state_dict.items() if 'mlp1.' in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'model.multi_modal_projector.' in k}) + + # Step 4. mask decoder of grounding model (SAM/SAM2) + to_return.update( + {k: v + for k, v in state_dict.items() if 'mask_decoder' in k}) + + # Step 5. others (fcs) + to_return.update( + {k: v + for k, v in state_dict.items() if 'text_hidden_fcs.' in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'text_exist_fcs.' in k} + ) + to_return.update( + {k: v + for k, v in state_dict.items() if 'lm_head.weight' in k or 'output' in k and 'sam2_model' not in k}) + to_return.update( + {k: v + for k, v in state_dict.items() if 'embed_tokens.weight' in k or 'tok_embeddings' in k}) + return to_return + + def check_obj_number(self, pred_embeddings_list_video, gt_masks_video, fix_number=5): + assert len(pred_embeddings_list_video) == len(gt_masks_video) + ret_pred_embeddings_list_video = [] + ret_gt_masks_video = [] + for pred_mebeds, gt_masks in zip(pred_embeddings_list_video, gt_masks_video): + # assert len(pred_mebeds) == len(gt_masks) + if len(pred_mebeds) != len(gt_masks): + min_num = min(len(pred_mebeds), len(gt_masks)) + pred_mebeds = pred_mebeds[:min_num] + gt_masks = gt_masks[:min_num] + if len(pred_mebeds) != fix_number: + if len(pred_mebeds) > fix_number: + _idxs = torch.randperm(pred_mebeds.shape[0]) + _idxs = _idxs[:fix_number] + pred_mebeds = pred_mebeds[_idxs] + gt_masks = gt_masks[_idxs] + else: + n_repeat = fix_number // len(pred_mebeds) + 1 + pred_mebeds = torch.cat([pred_mebeds] * n_repeat, dim=0)[:fix_number] + gt_masks = torch.cat([gt_masks] * n_repeat, dim=0)[:fix_number] + ret_pred_embeddings_list_video.append(pred_mebeds) + ret_gt_masks_video.append(gt_masks) + return ret_pred_embeddings_list_video, ret_gt_masks_video + + def _get_pesudo_data(self, dtype, device): + assert self.bs > 0 + g_pixel_values = torch.zeros((3, 1024, 1024), dtype=dtype, device=device) + g_pixel_values = [g_pixel_values] * self.bs + frames_per_batch = [1] * self.bs + gt_masks = torch.zeros((5, 256, 256), dtype=torch.uint8, device=device) + gt_masks = [gt_masks] * self.bs + return g_pixel_values, frames_per_batch, gt_masks + + def forward(self, data, data_samples=None, mode='loss'): + g_pixel_values = data.pop('g_pixel_values', None) + gt_masks = data.pop('masks', None) + frames_per_batch = data.pop('frames_per_batch', None) + input_ids = data['input_ids'] + fast_exists = data.pop('fast_exists', None) + # if self.arch_type == 'llava' and data.get('pixel_values', None) is not None: + # data['pixel_values'] = data['pixel_values'].to(self.torch_dtype) + if self.fast_pool: + output = self.mllm(data, data_samples, mode, fast_token_idx=self.fast_token_idx) + else: + output = self.mllm(data, data_samples, mode) + if gt_masks is None: + # require zero seg datas + seg_valid = False + g_pixel_values, frames_per_batch, gt_masks = self._get_pesudo_data( + dtype=self.torch_dtype, + device=input_ids.device, + ) + else: + seg_valid = True + + assert frames_per_batch, "Video Lisa require frames_per_batch !!!" + # print('frmaes_per_batch: ', frames_per_batch) + ori_size_list = [] + for i_bs, mask in enumerate(gt_masks): + mask_shape = mask.shape[-2:] + ori_size_list += [mask_shape] * frames_per_batch[i_bs] + + seg_token_mask = input_ids == self.seg_token_idx + + hidden_states = output.hidden_states + hidden_states = self.text_hidden_fcs(hidden_states[-1]) + + _zero = hidden_states.mean() * 0.0 + if seg_valid: + pred_embeddings = hidden_states[seg_token_mask] + _zero + else: + pred_embeddings = hidden_states[:, :5].flatten(0, 1) + _zero + + seg_token_counts = seg_token_mask.int().sum(-1) + if not seg_valid: + seg_token_counts += 5 + + pred_embeddings_list_ = torch.split(pred_embeddings, seg_token_counts.tolist(), dim=0) + pred_embeddings_list = [] + for item in pred_embeddings_list_: + if len(item) != 0: + pred_embeddings_list.append(item) + pred_embeddings_list_video, success = self.genetate_video_pred_embeddings( + pred_embeddings_list, frames_per_batch) + if not success: + raise NotImplementedError + + if self.use_fast_supervision and fast_exists is not None: + # gt_exists = [] + # for id_x, _fast_exists in enumerate(fast_exists): + # num_tot = _fast_exists.shape[0] + # num_conv = gt_masks[id_x].shape[0] // frames_per_batch[id_x] + # assert num_tot % num_conv == 0 + # gt_exists.append(_fast_exists.reshape(num_conv, num_tot // num_conv)) + fast_flag = input_ids == self.fast_token_idx + fast_tokens = output.hidden_states[-1][fast_flag] + exists_logit = self.text_exist_fcs(fast_tokens[self.fast_pool_size ** 2 - 1::self.fast_pool_size ** 2]) + gt_exists = torch.cat(fast_exists) + loss_exists = self.loss_exists(exists_logit, gt_exists) + else: + loss_exists = None + + gt_masks_video = self.process_video_gt_masks(gt_masks, frames_per_batch) + pred_embeddings_list_video, gt_masks_video = self.check_obj_number( + pred_embeddings_list_video, gt_masks_video + ) + g_pixel_values = torch.stack([ + self.grounding_encoder.preprocess_image(pixel) for pixel in g_pixel_values + ]) + num_objs = pred_embeddings_list_video[0].shape[0] + num_frames = len(pred_embeddings_list_video) + language_embeddings = torch.cat(pred_embeddings_list_video, dim=0)[:, None] + sam_states = self.grounding_encoder.get_sam2_embeddings(g_pixel_values, expand_size=num_objs) + pred_masks = self.grounding_encoder.inject_language_embd(sam_states, language_embeddings, nf_nobj=(num_frames, num_objs)) + + gt_masks = [F.interpolate(gt_mask.unsqueeze(0), size=pred_masks[0].shape[-2:], mode='nearest').squeeze(0) for gt_mask in gt_masks_video] + gt_masks = torch.cat(gt_masks, dim=0) + pred_masks = pred_masks.flatten(0, 1) + + loss_mask, loss_dice = 0, 0 + if len(pred_masks) != len(gt_masks): + # drop this data + print(f"Pred mask shape {pred_masks.shape} is not equal to gt_mask shape {gt_masks.shape} !!!") + min_num = min(len(pred_masks), len(gt_masks)) + pred_masks = pred_masks[:min_num] + gt_masks = gt_masks[:min_num] + seg_valid = False + + if self.loss_sample_points: + sampled_pred_mask, sampled_gt_mask = self.sample_points(pred_masks, gt_masks) + sam_loss_dice = self.loss_dice( + sampled_pred_mask, + sampled_gt_mask, avg_factor=(len(gt_masks) + 1e-4)) + sam_loss_mask = self.loss_mask( + sampled_pred_mask.reshape(-1), + sampled_gt_mask.reshape(-1), + avg_factor=(pred_masks.shape[0] * sampled_pred_mask.shape[1] + 1e-4)) + else: + sam_loss_mask = self.loss_mask(pred_masks, gt_masks) + sam_loss_dice = self.loss_dice(pred_masks, gt_masks) + loss_mask += sam_loss_mask + loss_dice += sam_loss_dice + + if not seg_valid: + _scale = 0.0 + else: + _scale = 1.0 + loss_mask = loss_mask * _scale + loss_dice = loss_dice * _scale + + loss_dict = { + 'loss_mask': loss_mask, + 'loss_dice': loss_dice, + 'llm_loss': output.loss, + } + if loss_exists is not None: + loss_dict['loss_exists'] = loss_exists + return loss_dict + + def sample_points(self, mask_pred, gt_masks): + gt_masks = gt_masks.unsqueeze(1) + gt_masks = gt_masks.to(mask_pred) + mask_pred = mask_pred.unsqueeze(1) + # (N, 1, h, w) + + with torch.no_grad(): + points_coords = get_uncertain_point_coords_with_randomness( + mask_pred.to(torch.float32), None, self.num_points, + self.oversample_ratio, self.importance_sample_ratio) + # shape (num_total_gts, h, w) -> (num_total_gts, num_points) + mask_point_targets = point_sample( + gt_masks.float(), points_coords).squeeze(1) + # shape (num_queries, h, w) -> (num_queries, num_points) + mask_point_preds = point_sample( + mask_pred.to(torch.float32), points_coords.to(torch.float32)).squeeze(1) + return mask_point_preds.to(mask_pred.dtype), mask_point_targets.to(mask_pred.dtype) + + def genetate_video_pred_embeddings(self, pred_embeddings_list, frames_per_batch): + if len(pred_embeddings_list) == len(frames_per_batch): + success = True + else: + success = False + print("len(pred_embeddings_list):{} is not equal to len(frames_per_batch):{} !!!".format(len(pred_embeddings_list), len(frames_per_batch))) + pred_embeddings_list_video = [] + for pred_embedding_batch, frame_nums in zip(pred_embeddings_list, frames_per_batch): + pred_embeddings_list_video += [pred_embedding_batch] * frame_nums + return pred_embeddings_list_video, success + + def process_video_gt_masks(self, gt_masks, frames_per_batch): + gt_masks_video = [] + + assert len(gt_masks) == len(frames_per_batch) + for gt_masks_batch, frames_num in zip(gt_masks, frames_per_batch): + N, H, W = gt_masks_batch.shape + assert N % frames_num == 0 + gt_masks_batch = gt_masks_batch.reshape( + N // frames_num, frames_num, H, W) + for i in range(frames_num): + gt_masks_video.append(gt_masks_batch[:, i]) + return gt_masks_video + + def preparing_for_generation(self, metainfo, **kwargs): + # set stop criteria and generation configs for model + assert hasattr(self, 'tokenizer'), "The Model does not have the tokenizer!!!" + self.bot_name = 'BOT' + if 'template' in metainfo.keys(): + template = metainfo['template'] + else: + template = PROMPT_TEMPLATE['phi3_chat'] + if self.template is None: + self.template = template + stop_words = [] + stop_words += self.template.get('STOP_WORDS', []) + stop_criteria = get_stop_criteria( + tokenizer=self.tokenizer, stop_words=stop_words) + self.stop_criteria = stop_criteria + + default_generation_kwargs = dict( + max_new_tokens=512, + do_sample=False, + eos_token_id=self.tokenizer.eos_token_id, + pad_token_id=( + self.tokenizer.pad_token_id + if self.tokenizer.pad_token_id is not None + else self.tokenizer.eos_token_id + ), + ) + default_generation_kwargs.update(metainfo.get('generation_kwargs', {})) + self.gen_config = GenerationConfig(**default_generation_kwargs) + self.init_prediction_config = True + + self.mllm.to(self.torch_dtype) + self.text_hidden_fcs.to(self.torch_dtype) + # if getattr(self, 'text_exist_fcs', None) is not None: + # self.text_exist_fcs.to(self.torch_dtype) + + # for sam image processor + self.extra_image_processor = DirectResize(target_length=1024, ) + # for multi image process + self.min_dynamic_patch = 1 + if 'max_dynamic_patch' in metainfo.keys(): + self.max_dynamic_patch = metainfo['max_dynamic_patch'] + else: + self.max_dynamic_patch = 12 + self.downsample_ratio = 0.5 + self.image_size = 448 + self.use_thumbnail = True + patch_size = 14 + self.patch_size = patch_size + + self.patch_token = int((self.image_size // patch_size) ** 2 * (self.downsample_ratio ** 2)) + self.IMAGENET_MEAN = (0.485, 0.456, 0.406) + self.IMAGENET_STD = (0.229, 0.224, 0.225) + self.IMG_CONTEXT_TOKEN = '' + self.IMG_START_TOKEN = '' + self.IMG_END_TOKEN = '' + if self.arch_type == 'qwen': + self.IMG_CONTEXT_TOKEN = '<|image_pad|>' + self.IMG_START_TOKEN = '' + self.IMG_END_TOKEN = '' + + if self.preprocessor is None: + self.transformer = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((self.image_size, self.image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=self.IMAGENET_MEAN, std=self.IMAGENET_STD) + ]) + self.preprocessor = None + else: + self.transformer = None + # self.preprocessor = BUILDER.build(self.preprocessor) + + self.VP_START_TOKEN = '' + self.VP_END_TOKEN = '' + + # change phi3 prepare for generation fuction + if self.phi3: + self.mllm.model.language_model.prepare_inputs_for_generation = MethodType(prepare_inputs_for_generation, self.mllm.model.language_model) + return + + def predict_video(self, pixel_values, text_prompts, **kwargs): + ori_h, ori_w = kwargs['ori_height'], kwargs['ori_width'] + + _input_ids = kwargs['input_ids'] + + g_pixel_values = kwargs.pop('g_pixel_values', None) + g_pixel_values = torch.stack([ + self.grounding_encoder.preprocess_image(pixel) for pixel in g_pixel_values + ]) + + fast_pixel_values = kwargs.pop('fast_pixel_values', None) + if fast_pixel_values is None: + fast_token_idx = None + else: + fast_token_idx = self.fast_token_idx + + predictions = [] + pred_masks = [] + is_exists_list = [] + for input_ids in _input_ids: + input_ids = torch.tensor(input_ids).unsqueeze(0) + attention_mask = torch.ones_like(input_ids, dtype=torch.bool) + pixel_values = pixel_values.to(dtype=self.torch_dtype) + if fast_pixel_values is not None: + fast_pixel_values = fast_pixel_values.to(dtype=self.torch_dtype) + mm_inputs = { + 'pixel_values': pixel_values, + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'position_ids': None, + 'past_key_values': None, + 'labels': None, + 'fast_pixel_values': fast_pixel_values, + 'fast_token_idx': fast_token_idx, + } + if kwargs.get('image_grid_thw', None) is not None: + mm_inputs['image_grid_thw'] = kwargs['image_grid_thw'] + + generate_output = self.mllm.generate( + **mm_inputs, + generation_config=self.gen_config, + streamer=None, + bos_token_id=self.tokenizer.bos_token_id, + stopping_criteria=self.stop_criteria, + output_hidden_states=True, + return_dict_in_generate=True + ) + + predict = self.tokenizer.decode(generate_output.sequences[0], skip_special_tokens=False).strip() + + # input_text = self.tokenizer.decode(mm_inputs['input_ids'][0], skip_special_tokens=False) + # print(input_text, generate_output.sequences[0], '\n', predict, self.tokenizer("[SEG]", add_special_tokens=False).input_ids[0]) + + predictions.append(predict) + + hidden_states = generate_output.hidden_states + last_hidden_states = [item[-1][0] for item in hidden_states] + last_hidden_states = torch.cat(last_hidden_states, dim=0) + seg_hidden_states = get_seg_hidden_states( + last_hidden_states, generate_output.sequences[0][:-1], + seg_id=self.seg_token_idx + ) + + if len(seg_hidden_states) == 0: + print("Warning, no [SEG] tokens !!!") + pred_masks.append(torch.zeros((g_pixel_values.shape[0], ori_h, ori_w), dtype=torch.int)) + continue + elif len(seg_hidden_states) > 1: + print("Warning, {} [SEG] tokens !!!".format(len(seg_hidden_states))) + seg_hidden_states = seg_hidden_states[:1] + seg_hidden_states = self.text_hidden_fcs(seg_hidden_states) + + seg_hidden_states = seg_hidden_states.to(dtype=torch.float32) + + sam_states = self.grounding_encoder.get_sam2_embeddings(g_pixel_values) + # TODO: change 5 + if len(pixel_values) < 5: + pred_mask = self.grounding_encoder.language_embd_inference(sam_states, [seg_hidden_states] * pixel_values.shape[0]) + else: + pred_mask = self.grounding_encoder.language_embd_inference(sam_states, [seg_hidden_states] * 5) + pred_mask = F.interpolate( + pred_mask, + size=(ori_h, ori_w), + mode='bilinear', + align_corners=False, + ) + pred_mask = pred_mask[:, 0] + pred_mask = pred_mask.sigmoid() > 0.5 + pred_mask = pred_mask.int() + # supervision + if self.use_fast_supervision and (input_ids == self.fast_token_idx).sum() > 0: + fast_flag = input_ids.squeeze(0) == self.fast_token_idx + len_out = generate_output.sequences[0][:-1].shape[0] + fast_tokens = last_hidden_states[:-len_out][fast_flag].to(dtype=torch.float32) + exists_logit = self.text_exist_fcs(fast_tokens[self.fast_pool_size ** 2 - 1::self.fast_pool_size ** 2]) + is_exists = exists_logit.squeeze(-1).sigmoid() > 0.5 + is_exists_list.append(is_exists) + not_exists = torch.logical_not(is_exists) + if torch.any(not_exists): + pred_mask[not_exists] = pred_mask[not_exists] * 0 + + pred_masks.append(pred_mask) + assert len(pred_masks) == len(text_prompts) + ret_dict = { + 'prediction': predictions, + 'prediction_masks': [mask_to_rle(_item.cpu().numpy()) for _item in pred_masks], + } + if 'id' in kwargs.keys(): + ret_dict['id'] = kwargs['id'] + + if len(is_exists_list) > 0: + ret_dict['is_exists'] = is_exists_list + return ret_dict + + +def get_seg_hidden_states(hidden_states, output_ids, seg_id): + seg_mask = output_ids == seg_id + n_out = len(seg_mask) + return hidden_states[-n_out:][seg_mask] + +def mask_to_rle(mask): + rle = [] + for m in mask: + rle.append(_mask.encode(np.asfortranarray(m.astype(np.uint8)))) + rle[-1]['counts'] = rle[-1]['counts'].decode() + return rle + +from transformers.cache_utils import Cache, DynamicCache + +def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs +): + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get('position_ids', None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1]:] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and (past_key_values is None or len(past_key_values)==0): + model_inputs = {'inputs_embeds': inputs_embeds} + else: + model_inputs = {'input_ids': input_ids} + + model_inputs.update( + { + 'position_ids': position_ids, + 'past_key_values': past_key_values, + 'use_cache': kwargs.get('use_cache'), + 'attention_mask': attention_mask, + } + ) + return model_inputs + + +class VideoLLaVASAMModel_zero3(VideoLLaVASAMModel): + def __init__(self, + mllm, + tokenizer, + grounding_encoder, + loss_mask=None, + loss_dice=None, + torch_dtype=torch.bfloat16, + pretrained_pth=None, + frozen_sam2_decoder=True, + special_tokens=['[SEG]', ], + loss_sample_points=False, + num_points=12544, + # for slow fast arch + fast_pool=False, + fast_pool_size=4, + arch_type='intern_vl', + # zero3 + bs=1, + ): + super(VideoLLaVASAMModel_zero3, self).__init__( + mllm=mllm, + tokenizer=tokenizer, + grounding_encoder=grounding_encoder, + loss_mask=loss_mask, + loss_dice=loss_dice, + torch_dtype=torch_dtype, + pretrained_pth=pretrained_pth, + frozen_sam2_decoder=frozen_sam2_decoder, + special_tokens=special_tokens, + loss_sample_points=loss_sample_points, + num_points=num_points, + # for slow fast arch + fast_pool=fast_pool, + fast_pool_size=fast_pool_size, + arch_type=arch_type, + ) + self.bs = bs + + def _get_pesudo_data(self, dtype, device): + g_pixel_values = torch.zeros((3, 1024, 1024), dtype=dtype, device=device) + g_pixel_values = [g_pixel_values] * self.bs + frames_per_batch = [1] * self.bs + gt_masks = torch.zeros((5, 256, 256), dtype=torch.uint8, device=device) + gt_masks = [gt_masks] * self.bs + return g_pixel_values, frames_per_batch, gt_masks + + def forward(self, data, data_samples=None, mode='loss'): + g_pixel_values = data.pop('g_pixel_values', None) + gt_masks = data.pop('masks', None) + frames_per_batch = data.pop('frames_per_batch', None) + input_ids = data['input_ids'] + if self.fast_pool: + output = self.mllm(data, data_samples, mode, fast_token_idx=self.fast_token_idx) + else: + output = self.mllm(data, data_samples, mode) + + if gt_masks is None: + # require zero seg datas + seg_valid = False + g_pixel_values, frames_per_batch, gt_masks = self._get_pesudo_data( + dtype=self.torch_dtype, + device=input_ids.device, + ) + else: + seg_valid = True + + assert frames_per_batch, "Video Lisa require frames_per_batch !!!" + # print('frmaes_per_batch: ', frames_per_batch) + ori_size_list = [] + for i_bs, mask in enumerate(gt_masks): + mask_shape = mask.shape[-2:] + ori_size_list += [mask_shape] * frames_per_batch[i_bs] + + seg_token_mask = input_ids == self.seg_token_idx + + hidden_states = output.hidden_states + hidden_states = self.text_hidden_fcs(hidden_states[-1]) + + _zero = hidden_states.mean() * 0.0 + if seg_valid: + pred_embeddings = hidden_states[seg_token_mask] + _zero + else: + pred_embeddings = hidden_states[:, :5].flatten(0, 1) + _zero + + seg_token_counts = seg_token_mask.int().sum(-1) + if not seg_valid: + seg_token_counts += 5 + + pred_embeddings_list_ = torch.split(pred_embeddings, seg_token_counts.tolist(), dim=0) + pred_embeddings_list = [] + for item in pred_embeddings_list_: + if len(item) != 0: + pred_embeddings_list.append(item) + pred_embeddings_list_video, success = self.genetate_video_pred_embeddings( + pred_embeddings_list, frames_per_batch) + if not success: + raise NotImplementedError + # return {'llm_loss': output.loss, 'loss_mask': output.loss * 0.0, 'loss_dice': output.loss * 0.0} + + gt_masks_video = self.process_video_gt_masks(gt_masks, frames_per_batch) + pred_embeddings_list_video, gt_masks_video = self.check_obj_number( + pred_embeddings_list_video, gt_masks_video + ) + g_pixel_values = torch.stack([ + self.grounding_encoder.preprocess_image(pixel) for pixel in g_pixel_values + ]) + # print(f"Done, {g_pixel_values.device} !!!\n\n") + num_objs = pred_embeddings_list_video[0].shape[0] + num_frames = len(pred_embeddings_list_video) + language_embeddings = torch.cat(pred_embeddings_list_video, dim=0)[:, None] + # print(f"Done, {g_pixel_values.device} !!! {num_frames}---{num_objs}, {language_embeddings.shape}\n\n") + sam_states = self.grounding_encoder.get_sam2_embeddings(g_pixel_values, expand_size=num_objs) + pred_masks = self.grounding_encoder.inject_language_embd(sam_states, language_embeddings, nf_nobj=(num_frames, num_objs)) + + gt_masks = [F.interpolate(gt_mask.unsqueeze(0), size=pred_masks[0].shape[-2:], mode='nearest').squeeze(0) for gt_mask in gt_masks_video] + gt_masks = torch.cat(gt_masks, dim=0) + pred_masks = pred_masks.flatten(0, 1) + # pred_masks = torch.cat(pred_masks, dim=0) + + + bs = len(pred_masks) + loss_mask, loss_dice = 0, 0 + if len(pred_masks) != len(gt_masks): + # drop this data + print(f"Pred mask shape {pred_masks.shape} is not equal to gt_mask shape {gt_masks.shape} !!!") + min_num = min(len(pred_masks), len(gt_masks)) + pred_masks = pred_masks[:min_num] + gt_masks = gt_masks[:min_num] + seg_valid = False + + if self.loss_sample_points: + sampled_pred_mask, sampled_gt_mask = self.sample_points(pred_masks, gt_masks) + sam_loss_dice = self.loss_dice( + sampled_pred_mask, + sampled_gt_mask, avg_factor=(len(gt_masks) + 1e-4)) + sam_loss_mask = self.loss_mask( + sampled_pred_mask.reshape(-1), + sampled_gt_mask.reshape(-1), + avg_factor=(pred_masks.shape[0] * sampled_pred_mask.shape[1] + 1e-4)) + else: + sam_loss_mask = self.loss_mask(pred_masks, gt_masks) + sam_loss_dice = self.loss_dice(pred_masks, gt_masks) + loss_mask += sam_loss_mask + loss_dice += sam_loss_dice + + if not seg_valid: + _scale = 0.0 + else: + _scale = 1.0 + loss_mask = loss_mask * _scale + loss_dice = loss_dice * _scale + + loss_dict = { + 'loss_mask': loss_mask, + 'loss_dice': loss_dice, + 'llm_loss': output.loss, + } + return loss_dict diff --git a/projects/llava_sam2/models/predictor/__init__.py b/projects/llava_sam2/models/predictor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21f3bb13f0b8da1b1166f6ff826f7e605bee981e --- /dev/null +++ b/projects/llava_sam2/models/predictor/__init__.py @@ -0,0 +1 @@ +from .sam2_predictor import SAM2VideoPredictor diff --git a/projects/llava_sam2/models/predictor/sam2_predictor.py b/projects/llava_sam2/models/predictor/sam2_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb3c8ac1a7547509d95c9fc03d9c83e7534d624 --- /dev/null +++ b/projects/llava_sam2/models/predictor/sam2_predictor.py @@ -0,0 +1,708 @@ +from collections import OrderedDict + +import torch +from tqdm import tqdm + +from projects.llava_sam2.models.extension import SAM2Base +from third_parts.sam2.modeling.sam2_base import NO_OBJ_SCORE +from third_parts.sam2.utils.misc import fill_holes_in_mask_scores + + +def _obj_id_to_idx(inference_state, obj_id): + """Map client-side object id to model-side object index.""" + obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None) + if obj_idx is not None: + return obj_idx + + # This is a new object id not sent to the server before. We only allow adding + # new objects *before* the tracking starts. + allow_new_object = not inference_state["tracking_has_started"] + if allow_new_object: + # get the next object slot + obj_idx = len(inference_state["obj_id_to_idx"]) + inference_state["obj_id_to_idx"][obj_id] = obj_idx + inference_state["obj_idx_to_id"][obj_idx] = obj_id + inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"]) + # set up input and output structures for this object + inference_state["point_inputs_per_obj"][obj_idx] = {} + inference_state["mask_inputs_per_obj"][obj_idx] = {} + inference_state["output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + inference_state["temp_output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + return obj_idx + else: + raise RuntimeError( + f"Cannot add new object id {obj_id} after tracking starts. " + f"All existing object ids: {inference_state['obj_ids']}. " + f"Please call 'reset_state' to restart from scratch." + ) + + +def _get_maskmem_pos_enc(inference_state, current_out): + """ + `maskmem_pos_enc` is the same across frames and objects, so we cache it as + a constant in the inference session to reduce session storage size. + """ + model_constants = inference_state["constants"] + # "out_maskmem_pos_enc" should be either a list of tensors or None + out_maskmem_pos_enc = current_out["maskmem_pos_enc"] + if out_maskmem_pos_enc is not None: + if "maskmem_pos_enc" not in model_constants: + assert isinstance(out_maskmem_pos_enc, list) + # only take the slice for one object, since it's same across objects + maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc] + model_constants["maskmem_pos_enc"] = maskmem_pos_enc + else: + maskmem_pos_enc = model_constants["maskmem_pos_enc"] + # expand the cached maskmem_pos_enc to the actual batch size + batch_size = out_maskmem_pos_enc[0].size(0) + expanded_maskmem_pos_enc = [ + x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc + ] + else: + expanded_maskmem_pos_enc = None + return expanded_maskmem_pos_enc + + +def _obj_idx_to_id(inference_state, obj_idx): + """Map model-side object index to client-side object id.""" + return inference_state["obj_idx_to_id"][obj_idx] + + +def _get_obj_num(inference_state): + """Get the total number of unique object ids received so far in this session.""" + return len(inference_state["obj_idx_to_id"]) + + +class SAM2VideoPredictor(SAM2Base): + """The predictor class to handle user interactions and manage inference states.""" + + def __init__( + self, + fill_hole_area=0, + # whether to apply non-overlapping constraints on the output object masks + non_overlap_masks=False, + # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks; + # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True) + clear_non_cond_mem_around_input=False, + # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True). + clear_non_cond_mem_for_multi_obj=False, + **kwargs, + ): + super().__init__(**kwargs) + self.fill_hole_area = fill_hole_area + self.non_overlap_masks = non_overlap_masks + self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input + self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj + + def _get_image_feature(self, inference_state, frame_idx, batch_size): + """Compute the image features on a given frame.""" + # Look up in the cache first + image, backbone_out = inference_state["cached_features"].get( + frame_idx, (None, None) + ) + if backbone_out is None: + # Cache miss -- we will run inference on a single image + image = inference_state["images"][frame_idx].cuda().float().unsqueeze(0) + backbone_out = self.forward_image(image) + # Cache the most recent frame's feature (for repeated interactions with + # a frame; we can use an LRU cache for more frames in the future). + inference_state["cached_features"] = {frame_idx: (image, backbone_out)} + + # expand the features to have the same dimension as the number of objects + expanded_image = image.expand(batch_size, -1, -1, -1) + expanded_backbone_out = { + "backbone_fpn": backbone_out["backbone_fpn"].copy(), + "vision_pos_enc": backbone_out["vision_pos_enc"].copy(), + } + for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]): + expanded_backbone_out["backbone_fpn"][i] = feat.expand( + batch_size, -1, -1, -1 + ) + for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]): + pos = pos.expand(batch_size, -1, -1, -1) + expanded_backbone_out["vision_pos_enc"][i] = pos + + features = self._prepare_backbone_features(expanded_backbone_out) + features = (expanded_image,) + features + return features + + + def _run_single_frame_inference( + self, + inference_state, + output_dict, + frame_idx, + batch_size, + is_init_cond_frame, + point_inputs, + mask_inputs, + reverse, + run_mem_encoder, + prev_sam_mask_logits=None, + ## Extension: LLM prompt + language_embd=None, + ): + """Run tracking on a single frame based on current inputs and previous memory.""" + # Retrieve correct image features + ( + _, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # point and mask should not appear as input simultaneously on the same frame + assert point_inputs is None or mask_inputs is None + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + output_dict=output_dict, + num_frames=inference_state["num_frames"], + track_in_reverse=reverse, + run_mem_encoder=run_mem_encoder, + prev_sam_mask_logits=prev_sam_mask_logits, + language_embd=language_embd, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = current_out["maskmem_features"] + if maskmem_features is not None: + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + pred_masks_gpu = current_out["pred_masks"] + # potentially fill holes in the predicted masks + if self.fill_hole_area > 0: + pred_masks_gpu = fill_holes_in_mask_scores( + pred_masks_gpu, self.fill_hole_area + ) + pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = _get_maskmem_pos_enc(inference_state, current_out) + # object pointer is a small tensor, so we always keep it on GPU memory for fast access + obj_ptr = current_out["obj_ptr"] + # make a compact version of this frame's output to reduce the state size + compact_current_out = { + "maskmem_features": maskmem_features, + "maskmem_pos_enc": maskmem_pos_enc, + "pred_masks": pred_masks, + "obj_ptr": obj_ptr, + } + return compact_current_out, pred_masks_gpu + + + def _consolidate_temp_output_across_obj( + self, + inference_state, + frame_idx, + is_cond, + run_mem_encoder, + consolidate_at_video_res=False, + ): + """ + Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on + a frame into a single output for all objects, including + 1) fill any missing objects either from `output_dict_per_obj` (if they exist in + `output_dict_per_obj` for this frame) or leave them as placeholder values + (if they don't exist in `output_dict_per_obj` for this frame); + 2) if specified, rerun memory encoder after apply non-overlapping constraints + on the object scores. + """ + batch_size = _get_obj_num(inference_state) + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Optionally, we allow consolidating the temporary outputs at the original + # video resolution (to provide a better editing experience for mask prompts). + if consolidate_at_video_res: + assert not run_mem_encoder, "memory encoder cannot run at video resolution" + consolidated_H = inference_state["video_height"] + consolidated_W = inference_state["video_width"] + consolidated_mask_key = "pred_masks_video_res" + else: + consolidated_H = consolidated_W = self.image_size // 4 + consolidated_mask_key = "pred_masks" + + # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc" + # will be added when rerunning the memory encoder after applying non-overlapping + # constraints to object scores. Its "pred_masks" are prefilled with a large + # negative value (NO_OBJ_SCORE) to represent missing objects. + consolidated_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + consolidated_mask_key: torch.full( + size=(batch_size, 1, consolidated_H, consolidated_W), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["storage_device"], + ), + "obj_ptr": torch.full( + size=(batch_size, self.hidden_dim), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["device"], + ), + } + empty_mask_ptr = None + for obj_idx in range(batch_size): + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + out = obj_temp_output_dict[storage_key].get(frame_idx, None) + # If the object doesn't appear in "temp_output_dict_per_obj" on this frame, + # we fall back and look up its previous output in "output_dict_per_obj". + # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in + # "output_dict_per_obj" to find a previous output for this object. + if out is None: + out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None) + if out is None: + out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None) + # If the object doesn't appear in "output_dict_per_obj" either, we skip it + # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE + # placeholder above) and set its object pointer to be a dummy pointer. + if out is None: + # Fill in dummy object pointers for those objects without any inputs or + # tracking outcomes on this frame (only do it under `run_mem_encoder=True`, + # i.e. when we need to build the memory for tracking). + if run_mem_encoder: + if empty_mask_ptr is None: + empty_mask_ptr = self._get_empty_mask_ptr( + inference_state, frame_idx + ) + # fill object pointer with a dummy pointer (based on an empty mask) + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr + continue + # Add the temporary object output mask to consolidated output mask + obj_mask = out["pred_masks"] + consolidated_pred_masks = consolidated_out[consolidated_mask_key] + if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]: + consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask + else: + # Resize first if temporary object mask has a different resolution + resized_obj_mask = torch.nn.functional.interpolate( + obj_mask, + size=consolidated_pred_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ) + consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"] + + # Optionally, apply non-overlapping constraints on the consolidated scores + # and rerun the memory encoder + if run_mem_encoder: + device = inference_state["device"] + high_res_masks = torch.nn.functional.interpolate( + consolidated_out["pred_masks"].to(device, non_blocking=True), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks_for_mem_enc: + high_res_masks = self._apply_non_overlapping_constraints(high_res_masks) + maskmem_features, maskmem_pos_enc = self._run_memory_encoder( + inference_state=inference_state, + frame_idx=frame_idx, + batch_size=batch_size, + high_res_masks=high_res_masks, + is_mask_from_pts=True, # these frames are what the user interacted with + ) + consolidated_out["maskmem_features"] = maskmem_features + consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc + + return consolidated_out + + + def _get_orig_video_res_output(self, inference_state, any_res_masks): + """ + Resize the object scores to the original video resolution (video_res_masks) + and apply non-overlapping constraints for final output. + """ + device = inference_state["device"] + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + any_res_masks = any_res_masks.to(device, non_blocking=True) + if any_res_masks.shape[-2:] == (video_H, video_W): + video_res_masks = any_res_masks + else: + video_res_masks = torch.nn.functional.interpolate( + any_res_masks, + size=(video_H, video_W), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks: + video_res_masks = self._apply_non_overlapping_constraints(video_res_masks) + return any_res_masks, video_res_masks + + def init_state( + self, + images + ): + """Initialize a inference state.""" + inference_state = {} + inference_state["images"] = images + inference_state["num_frames"] = len(images) + # whether to offload the video frames to CPU memory + # turning on this option saves the GPU memory with only a very small overhead + inference_state["offload_video_to_cpu"] = False + # whether to offload the inference state to CPU memory + # turning on this option saves the GPU memory at the cost of a lower tracking fps + # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object + # and from 24 to 21 when tracking two objects) + inference_state["offload_state_to_cpu"] = False + # the original video height and width, used for resizing final output scores + inference_state["video_height"] = self.image_size + inference_state["video_width"] = self.image_size + inference_state["device"] = torch.device("cuda") + inference_state["storage_device"] = torch.device("cuda") + # inputs on each frame + inference_state["point_inputs_per_obj"] = {} + inference_state["mask_inputs_per_obj"] = {} + # visual features on a small number of recently visited frames for quick interactions + inference_state["cached_features"] = {} + # values that don't change across frames (so we only need to hold one copy of them) + inference_state["constants"] = {} + # mapping between client-side object id and model-side object index + inference_state["obj_id_to_idx"] = OrderedDict() + inference_state["obj_idx_to_id"] = OrderedDict() + inference_state["obj_ids"] = [] + # A storage to hold the model's tracking results and states on each frame + inference_state["output_dict"] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + # Slice (view) of each object tracking results, sharing the same memory with "output_dict" + inference_state["output_dict_per_obj"] = {} + # A temporary storage to hold new outputs when user interact with a frame + # to add clicks or mask (it's merged into "output_dict" before propagation starts) + inference_state["temp_output_dict_per_obj"] = {} + # Frames that already holds consolidated outputs from click or mask inputs + # (we directly use their consolidated outputs during tracking) + inference_state["consolidated_frame_inds"] = { + "cond_frame_outputs": set(), # set containing frame indices + "non_cond_frame_outputs": set(), # set containing frame indices + } + # metadata for each tracking frame (e.g. which direction it's tracked) + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"] = {} + return inference_state + + def add_language_embd( + self, + inference_state, + frame_idx, + obj_id, + language_embd, + inference=False, + ): + obj_idx = _obj_id_to_idx(inference_state, obj_id) + + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + # Get any previously predicted mask logits on this object and feed it along with + # the new clicks into the SAM mask decoder. + prev_sam_mask_logits = None + # lookup temporary output dict first, which contains the most recent output + # (if not found, then lookup conditioning and non-conditioning frame output) + prev_out = obj_temp_output_dict[storage_key].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx) + + if prev_out is not None and prev_out["pred_masks"] is not None: + prev_sam_mask_logits = prev_out["pred_masks"].cuda(non_blocking=True) + # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues. + prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0) + + current_out, pred_mask_gpu = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=None, + mask_inputs=None, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + prev_sam_mask_logits=prev_sam_mask_logits, + ## Extension: LLM prompt + language_embd=language_embd, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + if inference: + _consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=False, + ) + # _, video_res_masks = self._get_orig_video_res_output( + # inference_state, consolidated_out["pred_masks_video_res"] + # ) + return frame_idx, obj_ids, pred_mask_gpu + + + def _clear_non_cond_mem_around_input(self, inference_state, frame_idx): + """ + Remove the non-conditioning memory around the input frame. When users provide + correction clicks, the surrounding frames' non-conditioning memories can still + contain outdated object appearance information and could confuse the model. + + This method clears those non-conditioning memories surrounding the interacted + frame to avoid giving the model both old and new information about the object. + """ + r = self.memory_temporal_stride_for_eval + frame_idx_begin = frame_idx - r * self.num_maskmem + frame_idx_end = frame_idx + r * self.num_maskmem + output_dict = inference_state["output_dict"] + non_cond_frame_outputs = output_dict["non_cond_frame_outputs"] + for t in range(frame_idx_begin, frame_idx_end + 1): + non_cond_frame_outputs.pop(t, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + obj_output_dict["non_cond_frame_outputs"].pop(t, None) + + def _run_memory_encoder( + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts + ): + """ + Run the memory encoder on `high_res_masks`. This is usually after applying + non-overlapping constraints to object scores. Since their scores changed, their + memory also need to be computed again with the memory encoder. + """ + # Retrieve correct image features + _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( + inference_state, frame_idx, batch_size + ) + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks, + is_mask_from_pts=is_mask_from_pts, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = _get_maskmem_pos_enc( + inference_state, {"maskmem_pos_enc": maskmem_pos_enc} + ) + return maskmem_features, maskmem_pos_enc + + def _add_output_per_object( + self, inference_state, frame_idx, current_out, storage_key + ): + """ + Split a multi-object output into per-object output slices and add them into + `output_dict_per_obj`. The resulting slices share the same tensor storage. + """ + maskmem_features = current_out["maskmem_features"] + assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor) + + maskmem_pos_enc = current_out["maskmem_pos_enc"] + assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list) + + output_dict_per_obj = inference_state["output_dict_per_obj"] + for obj_idx, obj_output_dict in output_dict_per_obj.items(): + obj_slice = slice(obj_idx, obj_idx + 1) + obj_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + "pred_masks": current_out["pred_masks"][obj_slice], + "obj_ptr": current_out["obj_ptr"][obj_slice], + } + if maskmem_features is not None: + obj_out["maskmem_features"] = maskmem_features[obj_slice] + if maskmem_pos_enc is not None: + obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc] + obj_output_dict[storage_key][frame_idx] = obj_out + + @torch.inference_mode() + def propagate_in_video_preflight(self, inference_state): + """Prepare inference_state and consolidate temporary outputs before tracking.""" + # Tracking has started and we don't allow adding new objects until session is reset. + inference_state["tracking_has_started"] = True + batch_size = _get_obj_num(inference_state) + + # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and + # add them into "output_dict". + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + output_dict = inference_state["output_dict"] + # "consolidated_frame_inds" contains indices of those frames where consolidated + # temporary outputs have been added (either in this call or any previous calls + # to `propagate_in_video_preflight`). + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + for is_cond in [False, True]: + # Separately consolidate conditioning and non-conditioning temp outptus + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Find all the frames that contain temporary outputs for any objects + # (these should be the frames that have just received clicks for mask inputs + # via `add_new_points` or `add_new_mask`) + temp_frame_inds = set() + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + temp_frame_inds.update(obj_temp_output_dict[storage_key].keys()) + consolidated_frame_inds[storage_key].update(temp_frame_inds) + # consolidate the temprary output across all objects on this frame + for frame_idx in temp_frame_inds: + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True + ) + # merge them into "output_dict" and also create per-object slices + output_dict[storage_key][frame_idx] = consolidated_out + self._add_output_per_object( + inference_state, frame_idx, consolidated_out, storage_key + ) + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + + # clear temporary outputs in `temp_output_dict_per_obj` + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + obj_temp_output_dict[storage_key].clear() + + # edge case: if an output is added to "cond_frame_outputs", we remove any prior + # output on the same frame in "non_cond_frame_outputs" + for frame_idx in output_dict["cond_frame_outputs"]: + output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + for frame_idx in obj_output_dict["cond_frame_outputs"]: + obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + assert frame_idx in output_dict["cond_frame_outputs"] + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + + # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames + # with either points or mask inputs (which should be true under a correct workflow). + all_consolidated_frame_inds = ( + consolidated_frame_inds["cond_frame_outputs"] + | consolidated_frame_inds["non_cond_frame_outputs"] + ) + input_frames_inds = set() + for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values(): + input_frames_inds.update(point_inputs_per_frame.keys()) + for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values(): + input_frames_inds.update(mask_inputs_per_frame.keys()) + + # with language embd as input, there may not be point or box + # assert all_consolidated_frame_inds == input_frames_inds + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx=None, + max_frame_num_to_track=None, + reverse=False, + ): + """Propagate the input points across frames to track in the entire video.""" + self.propagate_in_video_preflight(inference_state) + + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + obj_ids = inference_state["obj_ids"] + num_frames = inference_state["num_frames"] + batch_size = _get_obj_num(inference_state) + if len(output_dict["cond_frame_outputs"]) == 0: + raise RuntimeError("No points are provided; please add points first") + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + + # set start index, end index, and processing order + if start_frame_idx is None: + # default: start from the earliest frame with input points + start_frame_idx = min(output_dict["cond_frame_outputs"]) + if max_frame_num_to_track is None: + # default: track all the frames in the video + max_frame_num_to_track = num_frames + if reverse: + end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) + if start_frame_idx > 0: + processing_order = range(start_frame_idx, end_frame_idx - 1, -1) + else: + processing_order = [] # skip reverse tracking if starting from frame 0 + else: + end_frame_idx = min( + start_frame_idx + max_frame_num_to_track, num_frames - 1 + ) + processing_order = range(start_frame_idx, end_frame_idx + 1) + + for frame_idx in tqdm(processing_order, desc="propagate in video"): + # We skip those frames already in consolidated outputs (these are frames + # that received input clicks or mask). Note that we cannot directly run + # batched forward on them via `_run_single_frame_inference` because the + # number of clicks on each object might be different. + if frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + storage_key = "cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]: + storage_key = "non_cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + else: + storage_key = "non_cond_frame_outputs" + current_out, pred_masks = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=output_dict, + frame_idx=frame_idx, + batch_size=batch_size, + is_init_cond_frame=False, + point_inputs=None, + mask_inputs=None, + reverse=reverse, + run_mem_encoder=True, + ) + output_dict[storage_key][frame_idx] = current_out + # Create slices of per-object outputs for subsequent interaction with each + # individual object after tracking. + self._add_output_per_object( + inference_state, frame_idx, current_out, storage_key + ) + inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse} + + # Resize the output mask to the original video resolution (we directly use + # the mask scores on GPU for output to avoid any CPU conversion in between) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, pred_masks + ) + yield frame_idx, obj_ids, video_res_masks diff --git a/projects/llava_sam2/models/preprocess/image_resize.py b/projects/llava_sam2/models/preprocess/image_resize.py new file mode 100644 index 0000000000000000000000000000000000000000..93880971a8e1ed0682afbd07a1f5c4e37ac8666a --- /dev/null +++ b/projects/llava_sam2/models/preprocess/image_resize.py @@ -0,0 +1,14 @@ +import numpy as np +from torchvision.transforms.functional import resize, to_pil_image # type: ignore + + +class DirectResize: + def __init__(self, target_length: int) -> None: + self.target_length = target_length + + def apply_image(self, image: np.ndarray) -> np.ndarray: + """ + Expects a numpy array with shape HxWxC in uint8 format. + """ + img = to_pil_image(image, mode='RGB') + return np.array(img.resize((self.target_length, self.target_length))) diff --git a/projects/llava_sam2/models/sam2.py b/projects/llava_sam2/models/sam2.py new file mode 100644 index 0000000000000000000000000000000000000000..a36e89f23caea8d10954884460995181ee9b4b09 --- /dev/null +++ b/projects/llava_sam2/models/sam2.py @@ -0,0 +1,122 @@ +import os.path + +import torch + +from hydra import compose +from hydra.utils import instantiate +from omegaconf import OmegaConf + +from mmengine.model import BaseModule + + +from vlm.utils import load_checkpoint_with_prefix, load_state_dict_to_model + +BASE_DIR = 'work_dirs/ckpt' + + +class SAM2(BaseModule): + def __init__( + self, + cfg_path: str = "sam2_hiera_l.yaml", + ckpt_path: str = "sam2_hiera_large.pt", + hydra_overrides_extra=None, + apply_postprocessing=True, + ): + super().__init__(init_cfg=None) + + import third_parts.sam2 # noqa: F401 + + if hydra_overrides_extra is None: + hydra_overrides_extra = [] + hydra_overrides = [ + ## Extension: LLM prompt + "++model._target_=projects.llava_sam2.models.predictor.SAM2VideoPredictor", + ] + + if apply_postprocessing: + hydra_overrides_extra = hydra_overrides_extra.copy() + hydra_overrides_extra += [ + # dynamically fall back to multi-mask if the single mask is not stable + "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98", + # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking + # "++model.binarize_mask_from_pts_for_mem_enc=true", + # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution) + # "++model.fill_hole_area=8", + ] + hydra_overrides.extend(hydra_overrides_extra) + + # Read config and init model + cfg = compose(config_name=cfg_path, overrides=hydra_overrides) + OmegaConf.resolve(cfg) + sam2_model = instantiate(cfg.model, _recursive_=True) + state_dict = load_checkpoint_with_prefix(os.path.join(BASE_DIR, ckpt_path)) + load_state_dict_to_model(sam2_model, state_dict) + + self.sam2_model = sam2_model + + self.hidden_dim = self.sam2_model.hidden_dim + + self.img_mean = (0.485, 0.456, 0.406) + self.img_std = (0.229, 0.224, 0.225) + + def inject_language_embd(self, inference_state, language_embd): + num_frame = len(language_embd) + num_obj = len(language_embd[0]) + mask_out = [] + for frame_idx in range(num_frame): + frame_mask_out = [] + for obj_idx in range(num_obj): + _language_embd = language_embd[frame_idx][obj_idx][None][None] + _, _, out_mask_logits = self.sam2_model.add_language_embd(inference_state, frame_idx, obj_idx + 100, _language_embd) + frame_mask_out.append(out_mask_logits) + frame_mask_out = torch.cat(frame_mask_out, dim=1) + mask_out.append(frame_mask_out) + mask_out = torch.cat(mask_out, dim=0) + return mask_out + + + def language_embd_inference(self, inference_state, language_embd): + num_frame = len(language_embd) + num_obj = len(language_embd[0]) + mask_out = [] + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + for frame_idx in range(num_frame): + frame_mask_out = [] + + for obj_idx in range(num_obj): + _language_embd = language_embd[frame_idx][obj_idx][None][None] + _, _, out_mask_logits = self.sam2_model.add_language_embd( + inference_state, + frame_idx, + obj_idx + 100, + _language_embd, + inference=True, + ) + frame_mask_out.append(out_mask_logits) + frame_mask_out = torch.cat(frame_mask_out, dim=1) + mask_out.append(frame_mask_out) + + + mask_out = [] + for out_frame_idx, out_obj_ids, out_mask_logits in self.sam2_model.propagate_in_video(inference_state): + mask_out.append(out_mask_logits) + mask_out = torch.cat(mask_out, dim=0) + return mask_out + + def get_sam2_embeddings(self, images): + return self.sam2_model.init_state(images) + + def forward(self, batch): + raise NotImplementedError + + def preprocess_image(self, image: torch.Tensor, dtype=torch.float32) -> torch.Tensor: + image = image / 255. + + img_mean = torch.tensor(self.img_mean, dtype=dtype, device=image.device)[:, None, None] + img_std = torch.tensor(self.img_std, dtype=dtype, device=image.device)[:, None, None] + image -= img_mean + image /= img_std + + return image diff --git a/projects/llava_sam2/models/sam2_train.py b/projects/llava_sam2/models/sam2_train.py new file mode 100644 index 0000000000000000000000000000000000000000..48bf876eba699ee0a3cbe021b4be63c0de8ae22c --- /dev/null +++ b/projects/llava_sam2/models/sam2_train.py @@ -0,0 +1,128 @@ +import os.path + +import torch + +from hydra import compose +from hydra.utils import instantiate +from omegaconf import OmegaConf + +from mmengine.model import BaseModule + + +from vlm.utils import load_checkpoint_with_prefix, load_state_dict_to_model + +BASE_DIR = 'pretrained/' + + +class SAM2TrainRunner(BaseModule): + def __init__( + self, + cfg_path: str = "sam2_hiera_l.yaml", + ckpt_path: str = "sam2_hiera_large.pt", + hydra_overrides_extra=None, + apply_postprocessing=True, + ): + super().__init__(init_cfg=None) + + import third_parts.sam2 # noqa: F401 + + if hydra_overrides_extra is None: + hydra_overrides_extra = [] + hydra_overrides = [ + ## Extension: LLM prompt + "++model._target_=projects.llava_sam2.models.extension.SAM2Base", + ] + + if apply_postprocessing: + hydra_overrides_extra = hydra_overrides_extra.copy() + hydra_overrides_extra += [ + # dynamically fall back to multi-mask if the single mask is not stable + # "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true", + # "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05", + # "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98", + # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking + # "++model.binarize_mask_from_pts_for_mem_enc=true", + # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution) + # "++model.fill_hole_area=8", + ] + hydra_overrides.extend(hydra_overrides_extra) + + # Read config and init model + cfg = compose(config_name=cfg_path, overrides=hydra_overrides) + OmegaConf.resolve(cfg) + sam2_model = instantiate(cfg.model, _recursive_=True) + state_dict = load_checkpoint_with_prefix(os.path.join(BASE_DIR, ckpt_path)) + load_state_dict_to_model(sam2_model, state_dict) + + self.sam2_model = sam2_model + + self.hidden_dim = self.sam2_model.hidden_dim + self.img_mean = (0.485, 0.456, 0.406) + self.img_std = (0.229, 0.224, 0.225) + + def preprocess_image(self, image: torch.Tensor) -> torch.Tensor: + image = image / 255. + img_mean = torch.tensor(self.img_mean, dtype=image.dtype, device=image.device)[:, None, None] + img_std = torch.tensor(self.img_std, dtype=image.dtype, device=image.device)[:, None, None] + image -= img_mean + image /= img_std + return image + + def inject_language_embd(self, sam_states, language_embd, nf_nobj=None): + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(sam_states['current_vision_feats'][:-1], sam_states['feat_sizes'][:-1]) + ] + + B = sam_states['current_vision_feats'][-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = sam_states['feat_sizes'][-1] + + if self.sam2_model.directly_add_no_mem_embed: + # directly add no-mem embedding (instead of using the transformer encoder) + pix_feat_with_mem = sam_states['current_vision_feats'][-1] + self.sam2_model.no_mem_embed + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + else: + raise NotImplementedError("directly add no memory embedding is not implemented") + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + _, _, _, low_res_masks, high_res_masks, obj_ptr, _, = self.sam2_model._forward_sam_heads( + backbone_features=pix_feat_with_mem, + point_inputs=None, + mask_inputs=None, + high_res_features=high_res_features, + multimask_output=self.sam2_model._use_multimask(is_init_cond_frame=True, point_inputs=None), + # Inject language Embed if possible + language_embd=language_embd, + ) + + if nf_nobj is not None: + pred_masks = low_res_masks.squeeze(1) + pred_masks = pred_masks.unflatten(0, nf_nobj) + else: + pred_masks = low_res_masks + return pred_masks + + def get_sam2_embeddings(self, images, expand_size=1): + # Step 1: inference the backbone with the images + with torch.autocast(device_type="cuda", dtype=torch.bfloat16): + feats = self.sam2_model.forward_image(images) + + if expand_size > 1: + # feats['vision_features'] = feats['vision_features'][:, None].expand(-1, expand_size, -1, -1, -1).flatten(0, 1) + for i, feat in enumerate(feats["backbone_fpn"]): + feats["backbone_fpn"][i] = feat[:, None].expand(-1, expand_size, -1, -1, -1).flatten(0, 1) + for i, pos in enumerate(feats["vision_pos_enc"]): + pos = pos[:, None].expand(-1, expand_size, -1, -1, -1).flatten(0, 1) + feats["vision_pos_enc"][i] = pos + + # Step 2: Process the features to output + _, current_vision_feats, current_vision_pos_embeds, feat_sizes = self.sam2_model._prepare_backbone_features(feats) + + return { + "current_vision_feats": current_vision_feats, + "current_vision_pos_embeds": current_vision_pos_embeds, + "feat_sizes": feat_sizes, + } + + def forward(self, batch): + raise NotImplementedError diff --git a/projects/llava_sam2/models/utils.py b/projects/llava_sam2/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7aec3dea24fb236462c088a082f2c89d57835f --- /dev/null +++ b/projects/llava_sam2/models/utils.py @@ -0,0 +1,58 @@ + +def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, + image_size): + best_ratio_diff = float('inf') + best_ratio = (1, 1) + area = width * height + for ratio in target_ratios: + target_aspect_ratio = ratio[0] / ratio[1] + ratio_diff = abs(aspect_ratio - target_aspect_ratio) + if ratio_diff < best_ratio_diff: + best_ratio_diff = ratio_diff + best_ratio = ratio + elif ratio_diff == best_ratio_diff: + if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: + best_ratio = ratio + return best_ratio + +def dynamic_preprocess(image, + min_num=1, + max_num=6, + image_size=448, + use_thumbnail=False): + orig_width, orig_height = image.size + aspect_ratio = orig_width / orig_height + + # calculate the existing image aspect ratio + target_ratios = {(i, j) + for n in range(min_num, max_num + 1) + for i in range(1, n + 1) for j in range(1, n + 1) + if i * j <= max_num and i * j >= min_num} + target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) + + # find the closest aspect ratio to the target + target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio, + target_ratios, orig_width, + orig_height, image_size) + + # calculate the target width and height + target_width = image_size * target_aspect_ratio[0] + target_height = image_size * target_aspect_ratio[1] + blocks = target_aspect_ratio[0] * target_aspect_ratio[1] + + # resize the image + resized_img = image.resize((target_width, target_height)) + processed_images = [] + for i in range(blocks): + box = ((i % (target_width // image_size)) * image_size, + (i // (target_width // image_size)) * image_size, + ((i % (target_width // image_size)) + 1) * image_size, + ((i // (target_width // image_size)) + 1) * image_size) + # split the image + split_img = resized_img.crop(box) + processed_images.append(split_img) + assert len(processed_images) == blocks + if use_thumbnail and len(processed_images) != 1: + thumbnail_img = image.resize((image_size, image_size)) + processed_images.append(thumbnail_img) + return processed_images \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..87dab577f18cd23558674df7b3d345cebb38a508 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +transformers==4.42.3 +xtuner[deepspeed]==0.1.23 +timm==1.0.9 +mmdet==3.3.0 +hydra-core==1.3.2 +ninja==1.11.1 +decord==0.6.0 \ No newline at end of file diff --git a/third_parts/__init__.py b/third_parts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..566cd7d3e4c1f1c8342541a267626788ec9d47c7 --- /dev/null +++ b/third_parts/__init__.py @@ -0,0 +1 @@ +from .video_io import VideoReader diff --git a/third_parts/mmdet/datasets/refcoco.py b/third_parts/mmdet/datasets/refcoco.py new file mode 100644 index 0000000000000000000000000000000000000000..bd3f1a7cdba051b0a79ae5cb122117d360f06380 --- /dev/null +++ b/third_parts/mmdet/datasets/refcoco.py @@ -0,0 +1,163 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections +import os.path as osp +import random +from typing import Dict, List + +import mmengine +from mmengine.dataset import BaseDataset + +# from mmdet.registry import DATASETS + + +# @DATASETS.register_module() +class RefCocoDataset(BaseDataset): + """RefCOCO dataset. + + The `Refcoco` and `Refcoco+` dataset is based on + `ReferItGame: Referring to Objects in Photographs of Natural Scenes + `_. + + The `Refcocog` dataset is based on + `Generation and Comprehension of Unambiguous Object Descriptions + `_. + + Args: + ann_file (str): Annotation file path. + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str): Prefix for training data. + split_file (str): Split file path. + split (str): Split name. Defaults to 'train'. + text_mode (str): Text mode. Defaults to 'random'. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + ann_file: str, + split_file: str, + data_prefix: Dict, + split: str = 'train', + text_mode: str = 'random', + **kwargs): + self.split_file = split_file + self.split = split + + assert text_mode in ['original', 'random', 'concat', 'select_first'] + self.text_mode = text_mode + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + ann_file=ann_file, + **kwargs, + ) + + def _join_prefix(self): + if not mmengine.is_abs(self.split_file) and self.split_file: + self.split_file = osp.join(self.data_root, self.split_file) + + return super()._join_prefix() + + def _init_refs(self): + """Initialize the refs for RefCOCO.""" + anns, imgs = {}, {} + for ann in self.instances['annotations']: + anns[ann['id']] = ann + for img in self.instances['images']: + imgs[img['id']] = img + + refs, ref_to_ann = {}, {} + for ref in self.splits: + # ids + ref_id = ref['ref_id'] + ann_id = ref['ann_id'] + # add mapping related to ref + refs[ref_id] = ref + ref_to_ann[ref_id] = anns[ann_id] + + self.refs = refs + self.ref_to_ann = ref_to_ann + + def load_data_list(self) -> List[dict]: + """Load data list.""" + self.splits = mmengine.load(self.split_file, file_format='pkl') + self.instances = mmengine.load(self.ann_file, file_format='json') + self._init_refs() + img_prefix = self.data_prefix['img_path'] + + ref_ids = [ + ref['ref_id'] for ref in self.splits if ref['split'] == self.split + ] + full_anno = [] + for ref_id in ref_ids: + ref = self.refs[ref_id] + ann = self.ref_to_ann[ref_id] + ann.update(ref) + full_anno.append(ann) + + image_id_list = [] + final_anno = {} + for anno in full_anno: + image_id_list.append(anno['image_id']) + final_anno[anno['ann_id']] = anno + annotations = [value for key, value in final_anno.items()] + + coco_train_id = [] + image_annot = {} + for i in range(len(self.instances['images'])): + coco_train_id.append(self.instances['images'][i]['id']) + image_annot[self.instances['images'][i] + ['id']] = self.instances['images'][i] + + images = [] + for image_id in list(set(image_id_list)): + images += [image_annot[image_id]] + + data_list = [] + + grounding_dict = collections.defaultdict(list) + for anno in annotations: + image_id = int(anno['image_id']) + grounding_dict[image_id].append(anno) + + join_path = mmengine.fileio.get_file_backend(img_prefix).join_path + for image in images: + img_id = image['id'] + instances = [] + sentences = [] + for grounding_anno in grounding_dict[img_id]: + texts = [x['raw'].lower() for x in grounding_anno['sentences']] + # random select one text + if self.text_mode == 'random': + idx = random.randint(0, len(texts) - 1) + text = [texts[idx]] + # concat all texts + elif self.text_mode == 'concat': + text = [''.join(texts)] + # select the first text + elif self.text_mode == 'select_first': + text = [texts[0]] + # use all texts + elif self.text_mode == 'original': + text = texts + else: + raise ValueError(f'Invalid text mode "{self.text_mode}".') + ins = [{ + 'mask': grounding_anno['segmentation'], + 'ignore_flag': 0 + }] * len(text) + instances.extend(ins) + sentences.extend(text) + data_info = { + 'img_path': join_path(img_prefix, image['file_name']), + 'img_id': img_id, + 'instances': instances, + 'text': sentences + } + data_list.append(data_info) + + if len(data_list) == 0: + raise ValueError(f'No sample in split "{self.split}".') + + return data_list diff --git a/third_parts/mmdet/models/losses/__init__.py b/third_parts/mmdet/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1074fe5e88d3dfbcc12c061f817ad42286787434 --- /dev/null +++ b/third_parts/mmdet/models/losses/__init__.py @@ -0,0 +1,2 @@ +from .cross_entropy_loss import CrossEntropyLoss +from .dice_loss import DiceLoss diff --git a/third_parts/mmdet/models/losses/accuracy.py b/third_parts/mmdet/models/losses/accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..d68484e13965ced3bd6b104071d22657a9b3fde6 --- /dev/null +++ b/third_parts/mmdet/models/losses/accuracy.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + + +def accuracy(pred, target, topk=1, thresh=None): + """Calculate accuracy according to the prediction and target. + + Args: + pred (torch.Tensor): The model prediction, shape (N, num_class) + target (torch.Tensor): The target of each prediction, shape (N, ) + topk (int | tuple[int], optional): If the predictions in ``topk`` + matches the target, the predictions will be regarded as + correct ones. Defaults to 1. + thresh (float, optional): If not None, predictions with scores under + this threshold are considered incorrect. Default to None. + + Returns: + float | tuple[float]: If the input ``topk`` is a single integer, + the function will return a single float as accuracy. If + ``topk`` is a tuple containing multiple integers, the + function will return a tuple containing accuracies of + each ``topk`` number. + """ + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + if pred.size(0) == 0: + accu = [pred.new_tensor(0.) for i in range(len(topk))] + return accu[0] if return_single else accu + assert pred.ndim == 2 and target.ndim == 1 + assert pred.size(0) == target.size(0) + assert maxk <= pred.size(1), \ + f'maxk {maxk} exceeds pred dimension {pred.size(1)}' + pred_value, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() # transpose to shape (maxk, N) + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + if thresh is not None: + # Only prediction values larger than thresh are counted as correct + correct = correct & (pred_value > thresh).t() + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / pred.size(0))) + return res[0] if return_single else res + + +class Accuracy(nn.Module): + + def __init__(self, topk=(1, ), thresh=None): + """Module to calculate the accuracy. + + Args: + topk (tuple, optional): The criterion used to calculate the + accuracy. Defaults to (1,). + thresh (float, optional): If not None, predictions with scores + under this threshold are considered incorrect. Default to None. + """ + super().__init__() + self.topk = topk + self.thresh = thresh + + def forward(self, pred, target): + """Forward function to calculate accuracy. + + Args: + pred (torch.Tensor): Prediction of models. + target (torch.Tensor): Target for each prediction. + + Returns: + tuple[float]: The accuracies under different topk criterions. + """ + return accuracy(pred, target, self.topk, self.thresh) diff --git a/third_parts/mmdet/models/losses/cross_entropy_loss.py b/third_parts/mmdet/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..86af0dadf9c14b7d2fabc323bb71906bb155e91a --- /dev/null +++ b/third_parts/mmdet/models/losses/cross_entropy_loss.py @@ -0,0 +1,401 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F + +# from mmdet.registry import MODELS +from .accuracy import accuracy +from .utils import weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=-100, + avg_non_ignore=False): + """Calculate the CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int | None): The label index to be ignored. + If None, it will be set to default value. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + + Returns: + torch.Tensor: The calculated loss + """ + # The default value of ignore_index is the same as F.cross_entropy + ignore_index = -100 if ignore_index is None else ignore_index + # element-wise losses + loss = F.cross_entropy( + pred, + label, + weight=class_weight, + reduction='none', + ignore_index=ignore_index) + + # average loss over non-ignored elements + # pytorch's official cross_entropy average loss over non-ignored elements + # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = label.numel() - (label == ignore_index).sum().item() + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): + """Expand onehot labels to match the size of prediction.""" + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + valid_mask = (labels >= 0) & (labels != ignore_index) + inds = torch.nonzero( + valid_mask & (labels < label_channels), as_tuple=False) + + if inds.numel() > 0: + bin_labels[inds, labels[inds]] = 1 + + valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), + label_channels).float() + if label_weights is None: + bin_label_weights = valid_mask + else: + bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) + bin_label_weights *= valid_mask + + return bin_labels, bin_label_weights, valid_mask + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=-100, + avg_non_ignore=False): + """Calculate the binary CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, 1) or (N, ). + When the shape of pred is (N, 1), label will be expanded to + one-hot format, and when the shape of pred is (N, ), label + will not be expanded to one-hot format. + label (torch.Tensor): The learning label of the prediction, + with shape (N, ). + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int | None): The label index to be ignored. + If None, it will be set to default value. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + + Returns: + torch.Tensor: The calculated loss. + """ + # The default value of ignore_index is the same as F.cross_entropy + ignore_index = -100 if ignore_index is None else ignore_index + + if pred.dim() != label.dim(): + label, weight, valid_mask = _expand_onehot_labels( + label, weight, pred.size(-1), ignore_index) + else: + # should mask out the ignored elements + valid_mask = ((label >= 0) & (label != ignore_index)).float() + if weight is not None: + # The inplace writing method will have a mismatched broadcast + # shape error if the weight and valid_mask dimensions + # are inconsistent such as (B,N,1) and (B,N,C). + weight = weight * valid_mask + else: + weight = valid_mask + + # average loss over non-ignored elements + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = valid_mask.sum().item() + + # weighted element-wise losses + weight = weight.float() + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), pos_weight=class_weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss( + loss, weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, + target, + label, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=None, + **kwargs): + """Calculate the CrossEntropy loss for masks. + + Args: + pred (torch.Tensor): The prediction with shape (N, C, *), C is the + number of classes. The trailing * indicates arbitrary shape. + target (torch.Tensor): The learning label of the prediction. + label (torch.Tensor): ``label`` indicates the class label of the mask + corresponding object. This will be used to select the mask in the + of the class which the object belongs to when the mask prediction + if not class-agnostic. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (None): Placeholder, to be consistent with other loss. + Default: None. + + Returns: + torch.Tensor: The calculated loss + + Example: + >>> N, C = 3, 11 + >>> H, W = 2, 2 + >>> pred = torch.randn(N, C, H, W) * 1000 + >>> target = torch.rand(N, H, W) + >>> label = torch.randint(0, C, size=(N,)) + >>> reduction = 'mean' + >>> avg_factor = None + >>> class_weights = None + >>> loss = mask_cross_entropy(pred, target, label, reduction, + >>> avg_factor, class_weights) + >>> assert loss.shape == (1,) + """ + assert ignore_index is None, 'BCE loss does not support ignore_index' + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, weight=class_weight, reduction='mean')[None] + + +# @MODELS.register_module() +class CrossEntropyLoss(nn.Module): + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + class_weight=None, + ignore_index=None, + loss_weight=1.0, + avg_non_ignore=False): + """CrossEntropyLoss. + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_mask (bool, optional): Whether to use mask cross entropy loss. + Defaults to False. + reduction (str, optional): . Defaults to 'mean'. + Options are "none", "mean" and "sum". + class_weight (list[float], optional): Weight of each class. + Defaults to None. + ignore_index (int | None): The label index to be ignored. + Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + """ + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + self.ignore_index = ignore_index + self.avg_non_ignore = avg_non_ignore + if ((ignore_index is not None) and not self.avg_non_ignore + and self.reduction == 'mean'): + warnings.warn( + 'Default ``avg_non_ignore`` is False, if you would like to ' + 'ignore the certain label and average loss over non-ignore ' + 'labels, which is the same with PyTorch official ' + 'cross_entropy, set ``avg_non_ignore=True``.') + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + + def extra_repr(self): + """Extra repr.""" + s = f'avg_non_ignore={self.avg_non_ignore}' + return s + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + ignore_index=None, + **kwargs): + """Forward function. + + Args: + cls_score (torch.Tensor): The prediction. + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss. Options are "none", "mean" and "sum". + ignore_index (int | None): The label index to be ignored. + If not None, it will override the default value. Default: None. + Returns: + torch.Tensor: The calculated loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if ignore_index is None: + ignore_index = self.ignore_index + + if self.class_weight is not None: + class_weight = cls_score.new_tensor( + self.class_weight, device=cls_score.device) + else: + class_weight = None + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + ignore_index=ignore_index, + avg_non_ignore=self.avg_non_ignore, + **kwargs) + return loss_cls + + +# @MODELS.register_module() +class CrossEntropyCustomLoss(CrossEntropyLoss): + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + num_classes=-1, + class_weight=None, + ignore_index=None, + loss_weight=1.0, + avg_non_ignore=False): + """CrossEntropyCustomLoss. + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_mask (bool, optional): Whether to use mask cross entropy loss. + Defaults to False. + reduction (str, optional): . Defaults to 'mean'. + Options are "none", "mean" and "sum". + num_classes (int): Number of classes to classify. + class_weight (list[float], optional): Weight of each class. + Defaults to None. + ignore_index (int | None): The label index to be ignored. + Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + """ + super(CrossEntropyCustomLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + self.ignore_index = ignore_index + self.avg_non_ignore = avg_non_ignore + if ((ignore_index is not None) and not self.avg_non_ignore + and self.reduction == 'mean'): + warnings.warn( + 'Default ``avg_non_ignore`` is False, if you would like to ' + 'ignore the certain label and average loss over non-ignore ' + 'labels, which is the same with PyTorch official ' + 'cross_entropy, set ``avg_non_ignore=True``.') + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + + self.num_classes = num_classes + + assert self.num_classes != -1 + + # custom output channels of the classifier + self.custom_cls_channels = True + # custom activation of cls_score + self.custom_activation = True + # custom accuracy of the classsifier + self.custom_accuracy = True + + def get_cls_channels(self, num_classes): + assert num_classes == self.num_classes + if not self.use_sigmoid: + return num_classes + 1 + else: + return num_classes + + def get_activation(self, cls_score): + + fine_cls_score = cls_score[:, :self.num_classes] + + if not self.use_sigmoid: + bg_score = cls_score[:, [-1]] + new_score = torch.cat([fine_cls_score, bg_score], dim=-1) + scores = F.softmax(new_score, dim=-1) + else: + score_classes = fine_cls_score.sigmoid() + score_neg = 1 - score_classes.sum(dim=1, keepdim=True) + score_neg = score_neg.clamp(min=0, max=1) + scores = torch.cat([score_classes, score_neg], dim=1) + + return scores + + def get_accuracy(self, cls_score, labels): + + fine_cls_score = cls_score[:, :self.num_classes] + + pos_inds = labels < self.num_classes + acc_classes = accuracy(fine_cls_score[pos_inds], labels[pos_inds]) + acc = dict() + acc['acc_classes'] = acc_classes + return acc diff --git a/third_parts/mmdet/models/losses/dice_loss.py b/third_parts/mmdet/models/losses/dice_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..bb04b9eb28f1bbbe94ad74ed931f57d0face460a --- /dev/null +++ b/third_parts/mmdet/models/losses/dice_loss.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +# from mmdet.registry import MODELS +from .utils import weight_reduce_loss + + +def dice_loss(pred, + target, + weight=None, + eps=1e-3, + reduction='mean', + naive_dice=False, + avg_factor=None): + """Calculate dice loss, there are two forms of dice loss is supported: + + - the one proposed in `V-Net: Fully Convolutional Neural + Networks for Volumetric Medical Image Segmentation + `_. + - the dice loss in which the power of the number in the + denominator is the first power instead of the second + power. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *) + target (torch.Tensor): The learning label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + eps (float): Avoid dividing by zero. Default: 1e-3. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + Options are "none", "mean" and "sum". + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power.Defaults to False. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + + input = pred.flatten(1) + target = target.flatten(1).float() + + a = torch.sum(input * target, 1) + if naive_dice: + b = torch.sum(input, 1) + c = torch.sum(target, 1) + d = (2 * a + eps) / (b + c + eps) + else: + b = torch.sum(input * input, 1) + eps + c = torch.sum(target * target, 1) + eps + d = (2 * a) / (b + c) + + loss = 1 - d + if weight is not None: + assert weight.ndim == loss.ndim + assert len(weight) == len(pred) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +# @MODELS.register_module() +class DiceLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=False, + loss_weight=1.0, + eps=1e-3): + """Compute dice loss. + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + activate (bool): Whether to activate the predictions inside, + this will disable the inside sigmoid operation. + Defaults to True. + reduction (str, optional): The method used + to reduce the loss. Options are "none", + "mean" and "sum". Defaults to 'mean'. + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power. Defaults to False. + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + eps (float): Avoid dividing by zero. Defaults to 1e-3. + """ + + super(DiceLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.reduction = reduction + self.naive_dice = naive_dice + self.loss_weight = loss_weight + self.eps = eps + self.activate = activate + + def forward(self, + pred, + target, + weight=None, + reduction_override=None, + avg_factor=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *). + target (torch.Tensor): The label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.activate: + if self.use_sigmoid: + pred = pred.sigmoid() + else: + raise NotImplementedError + + loss = self.loss_weight * dice_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + naive_dice=self.naive_dice, + avg_factor=avg_factor) + + return loss diff --git a/third_parts/mmdet/models/losses/utils.py b/third_parts/mmdet/models/losses/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6e7859f353f3e5456f0cfc1f66b4b0ad535427 --- /dev/null +++ b/third_parts/mmdet/models/losses/utils.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +from typing import Callable, Optional + +import torch +import torch.nn.functional as F +from torch import Tensor + + +def reduce_loss(loss: Tensor, reduction: str) -> Tensor: + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss: Tensor, + weight: Optional[Tensor] = None, + reduction: str = 'mean', + avg_factor: Optional[float] = None) -> Tensor: + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Optional[Tensor], optional): Element-wise weights. + Defaults to None. + reduction (str, optional): Same as built-in losses of PyTorch. + Defaults to 'mean'. + avg_factor (Optional[float], optional): Average factor when + computing the mean of losses. Defaults to None. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # Avoid causing ZeroDivisionError when avg_factor is 0.0, + # i.e., all labels of an image belong to ignore index. + eps = torch.finfo(torch.float32).eps + loss = loss.sum() / (avg_factor + eps) + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func: Callable) -> Callable: + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + reduction: str = 'mean', + avg_factor: Optional[int] = None, + **kwargs) -> Tensor: + """ + Args: + pred (Tensor): The prediction. + target (Tensor): Target bboxes. + weight (Optional[Tensor], optional): The weight of loss for each + prediction. Defaults to None. + reduction (str, optional): Options are "none", "mean" and "sum". + Defaults to 'mean'. + avg_factor (Optional[int], optional): Average factor that is used + to average the loss. Defaults to None. + + Returns: + Tensor: Loss tensor. + """ + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/third_parts/mmdet/models/utils/__init__.py b/third_parts/mmdet/models/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2939d28237174db1425ea64bc1744c076c7ca8af --- /dev/null +++ b/third_parts/mmdet/models/utils/__init__.py @@ -0,0 +1 @@ +from .point_sample import get_uncertain_point_coords_with_randomness diff --git a/third_parts/mmdet/models/utils/point_sample.py b/third_parts/mmdet/models/utils/point_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..1afc957f3da7d1dc030c21d40311c768c6952ea4 --- /dev/null +++ b/third_parts/mmdet/models/utils/point_sample.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.ops import point_sample +from torch import Tensor + + +def get_uncertainty(mask_preds: Tensor, labels: Tensor) -> Tensor: + """Estimate uncertainty based on pred logits. + + We estimate uncertainty as L1 distance between 0.0 and the logits + prediction in 'mask_preds' for the foreground class in `classes`. + + Args: + mask_preds (Tensor): mask predication logits, shape (num_rois, + num_classes, mask_height, mask_width). + + labels (Tensor): Either predicted or ground truth label for + each predicted mask, of length num_rois. + + Returns: + scores (Tensor): Uncertainty scores with the most uncertain + locations having the highest uncertainty score, + shape (num_rois, 1, mask_height, mask_width) + """ + if mask_preds.shape[1] == 1: + gt_class_logits = mask_preds.clone() + else: + inds = torch.arange(mask_preds.shape[0], device=mask_preds.device) + gt_class_logits = mask_preds[inds, labels].unsqueeze(1) + return -torch.abs(gt_class_logits) + + +def get_uncertain_point_coords_with_randomness( + mask_preds: Tensor, labels: Tensor, num_points: int, + oversample_ratio: float, importance_sample_ratio: float) -> Tensor: + """Get ``num_points`` most uncertain points with random points during + train. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + 'get_uncertainty()' function that takes point's logit prediction as + input. + + Args: + mask_preds (Tensor): A tensor of shape (num_rois, num_classes, + mask_height, mask_width) for class-specific or class-agnostic + prediction. + labels (Tensor): The ground truth class for each instance. + num_points (int): The number of points to sample. + oversample_ratio (float): Oversampling parameter. + importance_sample_ratio (float): Ratio of points that are sampled + via importnace sampling. + + Returns: + point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) + that contains the coordinates sampled points. + """ + assert oversample_ratio >= 1 + assert 0 <= importance_sample_ratio <= 1 + batch_size = mask_preds.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand( + batch_size, num_sampled, 2, device=mask_preds.device) + point_logits = point_sample(mask_preds, point_coords) + # It is crucial to calculate uncertainty based on the sampled + # prediction value for the points. Calculating uncertainties of the + # coarse predictions first and sampling them for points leads to + # incorrect results. To illustrate this: assume uncertainty func( + # logits)=-abs(logits), a sampled point between two coarse + # predictions with -1 and 1 logits has 0 logits, and therefore 0 + # uncertainty value. However, if we calculate uncertainties for the + # coarse predictions first, both will have -1 uncertainty, + # and sampled point will get -1 uncertainty. + point_uncertainties = get_uncertainty(point_logits, labels) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk( + point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange( + batch_size, dtype=torch.long, device=mask_preds.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + batch_size, num_uncertain_points, 2) + if num_random_points > 0: + rand_roi_coords = torch.rand( + batch_size, num_random_points, 2, device=mask_preds.device) + point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) + return point_coords diff --git a/third_parts/sam2/__init__.py b/third_parts/sam2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e35d3f5bc43d604ccc7574c212f421dc4b76cde0 --- /dev/null +++ b/third_parts/sam2/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from hydra import initialize_config_module + +initialize_config_module("third_parts.sam2.sam2_configs", version_base="1.2") diff --git a/third_parts/sam2/automatic_mask_generator.py b/third_parts/sam2/automatic_mask_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4c46d814347140ea2f7a01e8109bcf161103c0 --- /dev/null +++ b/third_parts/sam2/automatic_mask_generator.py @@ -0,0 +1,434 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py +from typing import Any, Dict, List, Optional, Tuple + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from third_parts.sam2.modeling.sam2_base import SAM2Base +from third_parts.sam2.sam2_image_predictor import SAM2ImagePredictor +from third_parts.sam2.utils.amg import ( + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + MaskData, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SAM2AutomaticMaskGenerator: + def __init__( + self, + model: SAM2Base, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.8, + stability_score_thresh: float = 0.95, + stability_score_offset: float = 1.0, + mask_threshold: float = 0.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + use_m2m: bool = False, + multimask_output: bool = True, + ) -> None: + """ + Using a SAM 2 model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM 2 with a HieraL backbone. + + Arguments: + model (Sam): The SAM 2 model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + mask_threshold (float): Threshold for binarizing the mask logits + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crop_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crop_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + use_m2m (bool): Whether to add a one step refinement using previous mask predictions. + multimask_output (bool): Whether to output multimask at each point of the grid. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + try: + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + except ImportError as e: + print("Please install pycocotools") + raise e + + self.predictor = SAM2ImagePredictor( + model, + max_hole_area=min_mask_region_area, + max_sprinkle_area=min_mask_region_area, + ) + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.mask_threshold = mask_threshold + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + self.use_m2m = use_m2m + self.multimask_output = multimask_output + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [ + coco_encode_rle(rle) for rle in mask_data["rles"] + ] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[:2] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[:2] + self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] * points_scale + + # Generate masks for this crop in batches + data = MaskData() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch( + points, cropped_im_size, crop_box, orig_size, normalize=True + ) + data.cat(batch_data) + del batch_data + self.predictor.reset_predictor() + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.box_nms_thresh, + ) + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["points"] = uncrop_points(data["points"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + normalize=False, + ) -> MaskData: + orig_h, orig_w = orig_size + + # Run model on this batch + points = torch.as_tensor(points, device=self.predictor.device) + in_points = self.predictor._transforms.transform_coords( + points, normalize=normalize, orig_hw=im_size + ) + in_labels = torch.ones( + in_points.shape[0], dtype=torch.int, device=in_points.device + ) + masks, iou_preds, low_res_masks = self.predictor._predict( + in_points[:, None, :], + in_labels[:, None], + multimask_output=self.multimask_output, + return_logits=True, + ) + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=points.repeat_interleave(masks.shape[1], dim=0), + low_res_masks=low_res_masks.flatten(0, 1), + ) + del masks + + if not self.use_m2m: + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate and filter by stability score + data["stability_score"] = calculate_stability_score( + data["masks"], self.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + else: + # One step refinement using previous mask predictions + in_points = self.predictor._transforms.transform_coords( + data["points"], normalize=normalize, orig_hw=im_size + ) + labels = torch.ones( + in_points.shape[0], dtype=torch.int, device=in_points.device + ) + masks, ious = self.refine_with_m2m( + in_points, labels, data["low_res_masks"], self.points_per_batch + ) + data["masks"] = masks.squeeze(1) + data["iou_preds"] = ious.squeeze(1) + + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + data["stability_score"] = calculate_stability_score( + data["masks"], self.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > self.mask_threshold + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge( + data["boxes"], crop_box, [0, 0, orig_w, orig_h] + ) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros_like(boxes[:, 0]), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data + + def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch): + new_masks = [] + new_iou_preds = [] + + for cur_points, cur_point_labels, low_res_mask in batch_iterator( + points_per_batch, points, point_labels, low_res_masks + ): + best_masks, best_iou_preds, _ = self.predictor._predict( + cur_points[:, None, :], + cur_point_labels[:, None], + mask_input=low_res_mask[:, None, :], + multimask_output=False, + return_logits=True, + ) + new_masks.append(best_masks) + new_iou_preds.append(best_iou_preds) + masks = torch.cat(new_masks, dim=0) + return masks, torch.cat(new_iou_preds, dim=0) diff --git a/third_parts/sam2/build_sam.py b/third_parts/sam2/build_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..8a4c873cda7895096593754a35ef83f8494b6d60 --- /dev/null +++ b/third_parts/sam2/build_sam.py @@ -0,0 +1,89 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging + +import torch +from hydra import compose +from hydra.utils import instantiate +from omegaconf import OmegaConf + + +def build_sam2( + config_file, + ckpt_path=None, + device="cuda", + mode="eval", + hydra_overrides_extra=[], + apply_postprocessing=True, +): + + if apply_postprocessing: + hydra_overrides_extra = hydra_overrides_extra.copy() + hydra_overrides_extra += [ + # dynamically fall back to multi-mask if the single mask is not stable + "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98", + ] + # Read config and init model + cfg = compose(config_name=config_file, overrides=hydra_overrides_extra) + OmegaConf.resolve(cfg) + model = instantiate(cfg.model, _recursive_=True) + _load_checkpoint(model, ckpt_path) + model = model.to(device) + if mode == "eval": + model.eval() + return model + + +def build_sam2_video_predictor( + config_file, + ckpt_path=None, + device="cuda", + mode="eval", + hydra_overrides_extra=[], + apply_postprocessing=True, +): + hydra_overrides = [ + "++model._target_=third_parts.sam2.sam2_video_predictor.SAM2VideoPredictor", + ] + if apply_postprocessing: + hydra_overrides_extra = hydra_overrides_extra.copy() + hydra_overrides_extra += [ + # dynamically fall back to multi-mask if the single mask is not stable + "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05", + "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98", + # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking + "++model.binarize_mask_from_pts_for_mem_enc=true", + # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution) + "++model.fill_hole_area=8", + ] + hydra_overrides.extend(hydra_overrides_extra) + + # Read config and init model + cfg = compose(config_name=config_file, overrides=hydra_overrides) + OmegaConf.resolve(cfg) + model = instantiate(cfg.model, _recursive_=True) + _load_checkpoint(model, ckpt_path) + model = model.to(device) + if mode == "eval": + model.eval() + return model + + +def _load_checkpoint(model, ckpt_path): + if ckpt_path is not None: + sd = torch.load(ckpt_path, map_location="cpu")["model"] + missing_keys, unexpected_keys = model.load_state_dict(sd) + if missing_keys: + logging.error(missing_keys) + raise RuntimeError() + if unexpected_keys: + logging.error(unexpected_keys) + raise RuntimeError() + logging.info("Loaded checkpoint sucessfully") diff --git a/third_parts/sam2/csrc/connected_components.cu b/third_parts/sam2/csrc/connected_components.cu new file mode 100644 index 0000000000000000000000000000000000000000..ced21eb32eaaadb818d441c1322b99d1bf068f45 --- /dev/null +++ b/third_parts/sam2/csrc/connected_components.cu @@ -0,0 +1,289 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. + +// This source code is licensed under the license found in the +// LICENSE file in the root directory of this source tree. + +// adapted from https://github.com/zsef123/Connected_components_PyTorch +// with license found in the LICENSE_cctorch file in the root directory. +#include +#include +#include +#include +#include +#include + +// 2d +#define BLOCK_ROWS 16 +#define BLOCK_COLS 16 + +namespace cc2d { + +template +__device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) { + return (bitmap >> pos) & 1; +} + +__device__ int32_t find(const int32_t* s_buf, int32_t n) { + while (s_buf[n] != n) + n = s_buf[n]; + return n; +} + +__device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) { + const int32_t id = n; + while (s_buf[n] != n) { + n = s_buf[n]; + s_buf[id] = n; + } + return n; +} + +__device__ void union_(int32_t* s_buf, int32_t a, int32_t b) { + bool done; + do { + a = find(s_buf, a); + b = find(s_buf, b); + + if (a < b) { + int32_t old = atomicMin(s_buf + b, a); + done = (old == b); + b = old; + } else if (b < a) { + int32_t old = atomicMin(s_buf + a, b); + done = (old == a); + a = old; + } else + done = true; + + } while (!done); +} + +__global__ void +init_labeling(int32_t* label, const uint32_t W, const uint32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row < H && col < W) + label[idx] = idx; +} + +__global__ void +merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + uint32_t P = 0; + + if (img[idx]) + P |= 0x777; + if (row + 1 < H && img[idx + W]) + P |= 0x777 << 4; + if (col + 1 < W && img[idx + 1]) + P |= 0x777 << 1; + + if (col == 0) + P &= 0xEEEE; + if (col + 1 >= W) + P &= 0x3333; + else if (col + 2 >= W) + P &= 0x7777; + + if (row == 0) + P &= 0xFFF0; + if (row + 1 >= H) + P &= 0xFF; + + if (P > 0) { + // If need check about top-left pixel(if flag the first bit) and hit the + // top-left pixel + if (hasBit(P, 0) && img[idx - W - 1]) { + union_(label, idx, idx - 2 * W - 2); // top left block + } + + if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1])) + union_(label, idx, idx - 2 * W); // top bottom block + + if (hasBit(P, 3) && img[idx + 2 - W]) + union_(label, idx, idx - 2 * W + 2); // top right block + + if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1])) + union_(label, idx, idx - 2); // just left block + } +} + +__global__ void compression(int32_t* label, const int32_t W, const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row < H && col < W) + find_n_compress(label, idx); +} + +__global__ void final_labeling( + const uint8_t* img, + int32_t* label, + const int32_t W, + const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2; + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2; + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + int32_t y = label[idx] + 1; + + if (img[idx]) + label[idx] = y; + else + label[idx] = 0; + + if (col + 1 < W) { + if (img[idx + 1]) + label[idx + 1] = y; + else + label[idx + 1] = 0; + + if (row + 1 < H) { + if (img[idx + W + 1]) + label[idx + W + 1] = y; + else + label[idx + W + 1] = 0; + } + } + + if (row + 1 < H) { + if (img[idx + W]) + label[idx + W] = y; + else + label[idx + W] = 0; + } +} + +__global__ void init_counting( + const int32_t* label, + int32_t* count_init, + const int32_t W, + const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y); + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x); + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + int32_t y = label[idx]; + if (y > 0) { + int32_t count_idx = y - 1; + atomicAdd(count_init + count_idx, 1); + } +} + +__global__ void final_counting( + const int32_t* label, + const int32_t* count_init, + int32_t* count_final, + const int32_t W, + const int32_t H) { + const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y); + const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x); + const uint32_t idx = row * W + col; + + if (row >= H || col >= W) + return; + + int32_t y = label[idx]; + if (y > 0) { + int32_t count_idx = y - 1; + count_final[idx] = count_init[count_idx]; + } else { + count_final[idx] = 0; + } +} + +} // namespace cc2d + +std::vector get_connected_componnets( + const torch::Tensor& inputs) { + AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor"); + AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape"); + AT_ASSERTM( + inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type"); + + const uint32_t N = inputs.size(0); + const uint32_t C = inputs.size(1); + const uint32_t H = inputs.size(2); + const uint32_t W = inputs.size(3); + + AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape"); + AT_ASSERTM((H % 2) == 0, "height must be an even number"); + AT_ASSERTM((W % 2) == 0, "width must be an even number"); + + // label must be uint32_t + auto label_options = + torch::TensorOptions().dtype(torch::kInt32).device(inputs.device()); + torch::Tensor labels = torch::zeros({N, C, H, W}, label_options); + torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options); + torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options); + + dim3 grid = dim3( + ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS, + ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS); + dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS); + dim3 grid_count = + dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS); + dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + for (int n = 0; n < N; n++) { + uint32_t offset = n * H * W; + + cc2d::init_labeling<<>>( + labels.data_ptr() + offset, W, H); + cc2d::merge<<>>( + inputs.data_ptr() + offset, + labels.data_ptr() + offset, + W, + H); + cc2d::compression<<>>( + labels.data_ptr() + offset, W, H); + cc2d::final_labeling<<>>( + inputs.data_ptr() + offset, + labels.data_ptr() + offset, + W, + H); + + // get the counting of each pixel + cc2d::init_counting<<>>( + labels.data_ptr() + offset, + counts_init.data_ptr() + offset, + W, + H); + cc2d::final_counting<<>>( + labels.data_ptr() + offset, + counts_init.data_ptr() + offset, + counts_final.data_ptr() + offset, + W, + H); + } + + // returned values are [labels, counts] + std::vector outputs; + outputs.push_back(labels); + outputs.push_back(counts_final); + return outputs; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "get_connected_componnets", + &get_connected_componnets, + "get_connected_componnets"); +} diff --git a/third_parts/sam2/modeling/__init__.py b/third_parts/sam2/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/third_parts/sam2/modeling/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/third_parts/sam2/modeling/backbones/__init__.py b/third_parts/sam2/modeling/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/third_parts/sam2/modeling/backbones/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/third_parts/sam2/modeling/backbones/hieradet.py b/third_parts/sam2/modeling/backbones/hieradet.py new file mode 100644 index 0000000000000000000000000000000000000000..a7163dfb60404bb5e277c752f70b120511921612 --- /dev/null +++ b/third_parts/sam2/modeling/backbones/hieradet.py @@ -0,0 +1,295 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from functools import partial +from typing import List, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from third_parts.sam2.modeling.backbones.utils import ( + PatchEmbed, + window_partition, + window_unpartition, +) + +from third_parts.sam2.modeling.sam2_utils import DropPath, MLP + + +def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor: + if pool is None: + return x + # (B, H, W, C) -> (B, C, H, W) + x = x.permute(0, 3, 1, 2) + x = pool(x) + # (B, C, H', W') -> (B, H', W', C) + x = x.permute(0, 2, 3, 1) + if norm: + x = norm(x) + + return x + + +class MultiScaleAttention(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + q_pool: nn.Module = None, + ): + super().__init__() + + self.dim = dim + self.dim_out = dim_out + + self.num_heads = num_heads + head_dim = dim_out // num_heads + self.scale = head_dim**-0.5 + + self.q_pool = q_pool + self.qkv = nn.Linear(dim, dim_out * 3) + self.proj = nn.Linear(dim_out, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (B, H * W, 3, nHead, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) + # q, k, v with shape (B, H * W, nheads, C) + q, k, v = torch.unbind(qkv, 2) + + # Q pooling (for downsample at stage changes) + if self.q_pool: + q = do_pool(q.reshape(B, H, W, -1), self.q_pool) + H, W = q.shape[1:3] # downsampled shape + q = q.reshape(B, H * W, self.num_heads, -1) + + # Torch's SDPA expects [B, nheads, H*W, C] so we transpose + x = F.scaled_dot_product_attention( + q.transpose(1, 2), + k.transpose(1, 2), + v.transpose(1, 2), + ) + # Transpose back + x = x.transpose(1, 2) + x = x.reshape(B, H, W, -1) + + x = self.proj(x) + + return x + + +class MultiScaleBlock(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + mlp_ratio: float = 4.0, + drop_path: float = 0.0, + norm_layer: Union[nn.Module, str] = "LayerNorm", + q_stride: Tuple[int, int] = None, + act_layer: nn.Module = nn.GELU, + window_size: int = 0, + ): + super().__init__() + + if isinstance(norm_layer, str): + norm_layer = partial(getattr(nn, norm_layer), eps=1e-6) + + self.dim = dim + self.dim_out = dim_out + self.norm1 = norm_layer(dim) + + self.window_size = window_size + + self.pool, self.q_stride = None, q_stride + if self.q_stride: + self.pool = nn.MaxPool2d( + kernel_size=q_stride, stride=q_stride, ceil_mode=False + ) + + self.attn = MultiScaleAttention( + dim, + dim_out, + num_heads=num_heads, + q_pool=self.pool, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = MLP( + dim_out, + int(dim_out * mlp_ratio), + dim_out, + num_layers=2, + activation=act_layer, + ) + + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x # B, H, W, C + x = self.norm1(x) + + # Skip connection + if self.dim != self.dim_out: + shortcut = do_pool(self.proj(x), self.pool) + + # Window partition + window_size = self.window_size + if window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, window_size) + + # Window Attention + Q Pooling (if stage change) + x = self.attn(x) + if self.q_stride: + # Shapes have changed due to Q pooling + window_size = self.window_size // self.q_stride[0] + H, W = shortcut.shape[1:3] + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + pad_hw = (H + pad_h, W + pad_w) + + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, window_size, pad_hw, (H, W)) + + x = shortcut + self.drop_path(x) + # MLP + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Hiera(nn.Module): + """ + Reference: https://arxiv.org/abs/2306.00989 + """ + + def __init__( + self, + embed_dim: int = 96, # initial embed dim + num_heads: int = 1, # initial number of heads + drop_path_rate: float = 0.0, # stochastic depth + q_pool: int = 3, # number of q_pool stages + q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages + stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage + dim_mul: float = 2.0, # dim_mul factor at stage shift + head_mul: float = 2.0, # head_mul factor at stage shift + window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14), + # window size per stage, when not using global att. + window_spec: Tuple[int, ...] = ( + 8, + 4, + 14, + 7, + ), + # global attn in these blocks + global_att_blocks: Tuple[int, ...] = ( + 12, + 16, + 20, + ), + return_interm_layers=True, # return feats from every stage + ): + super().__init__() + + assert len(stages) == len(window_spec) + self.window_spec = window_spec + + depth = sum(stages) + self.q_stride = q_stride + self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] + assert 0 <= q_pool <= len(self.stage_ends[:-1]) + self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] + self.return_interm_layers = return_interm_layers + + self.patch_embed = PatchEmbed( + embed_dim=embed_dim, + ) + # Which blocks have global att? + self.global_att_blocks = global_att_blocks + + # Windowed positional embedding (https://arxiv.org/abs/2311.05613) + self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size + self.pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size) + ) + self.pos_embed_window = nn.Parameter( + torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]) + ) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + + cur_stage = 1 + self.blocks = nn.ModuleList() + + for i in range(depth): + dim_out = embed_dim + # lags by a block, so first block of + # next stage uses an initial window size + # of previous stage and final window size of current stage + window_size = self.window_spec[cur_stage - 1] + + if self.global_att_blocks is not None: + window_size = 0 if i in self.global_att_blocks else window_size + + if i - 1 in self.stage_ends: + dim_out = int(embed_dim * dim_mul) + num_heads = int(num_heads * head_mul) + cur_stage += 1 + + block = MultiScaleBlock( + dim=embed_dim, + dim_out=dim_out, + num_heads=num_heads, + drop_path=dpr[i], + q_stride=self.q_stride if i in self.q_pool_blocks else None, + window_size=window_size, + ) + + embed_dim = dim_out + self.blocks.append(block) + + self.channel_list = ( + [self.blocks[i].dim_out for i in self.stage_ends[::-1]] + if return_interm_layers + else [self.blocks[-1].dim_out] + ) + + def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor: + h, w = hw + window_embed = self.pos_embed_window + pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") + pos_embed = pos_embed + window_embed.tile( + [x // y for x, y in zip(pos_embed.shape, window_embed.shape)] + ) + pos_embed = pos_embed.permute(0, 2, 3, 1) + return pos_embed + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + x = self.patch_embed(x) + # x: (B, H, W, C) + + # Add pos embed + x = x + self._get_pos_embed(x.shape[1:3]) + + outputs = [] + for i, blk in enumerate(self.blocks): + x = blk(x) + if (i == self.stage_ends[-1]) or ( + i in self.stage_ends and self.return_interm_layers + ): + feats = x.permute(0, 3, 1, 2) + outputs.append(feats) + + return outputs diff --git a/third_parts/sam2/modeling/backbones/image_encoder.py b/third_parts/sam2/modeling/backbones/image_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5f92baf47dcab96385ff99899fd3e3a642c1cf9c --- /dev/null +++ b/third_parts/sam2/modeling/backbones/image_encoder.py @@ -0,0 +1,133 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ImageEncoder(nn.Module): + def __init__( + self, + trunk: nn.Module, + neck: nn.Module, + scalp: int = 0, + ): + super().__init__() + self.trunk = trunk + self.neck = neck + self.scalp = scalp + assert ( + self.trunk.channel_list == self.neck.backbone_channel_list + ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}" + + def forward(self, sample: torch.Tensor): + # Forward through backbone + features, pos = self.neck(self.trunk(sample)) + if self.scalp > 0: + # Discard the lowest resolution features + features, pos = features[: -self.scalp], pos[: -self.scalp] + + src = features[-1] + output = { + "vision_features": src, + "vision_pos_enc": pos, + "backbone_fpn": features, + } + return output + + +class FpnNeck(nn.Module): + """ + A modified variant of Feature Pyramid Network (FPN) neck + (we remove output conv and also do bicubic interpolation similar to ViT + pos embed interpolation) + """ + + def __init__( + self, + position_encoding: nn.Module, + d_model: int, + backbone_channel_list: List[int], + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + fpn_interp_model: str = "bilinear", + fuse_type: str = "sum", + fpn_top_down_levels: Optional[List[int]] = None, + ): + """Initialize the neck + :param trunk: the backbone + :param position_encoding: the positional encoding to use + :param d_model: the dimension of the model + :param neck_norm: the normalization to use + """ + super().__init__() + self.position_encoding = position_encoding + self.convs = nn.ModuleList() + self.backbone_channel_list = backbone_channel_list + for dim in backbone_channel_list: + current = nn.Sequential() + current.add_module( + "conv", + nn.Conv2d( + in_channels=dim, + out_channels=d_model, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ), + ) + + self.convs.append(current) + self.fpn_interp_model = fpn_interp_model + assert fuse_type in ["sum", "avg"] + self.fuse_type = fuse_type + + # levels to have top-down features in its outputs + # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3 + # have top-down propagation, while outputs of level 0 and level 1 have only + # lateral features from the same backbone level. + if fpn_top_down_levels is None: + # default is to have top-down features on all levels + fpn_top_down_levels = range(len(self.convs)) + self.fpn_top_down_levels = list(fpn_top_down_levels) + + def forward(self, xs: List[torch.Tensor]): + + out = [None] * len(self.convs) + pos = [None] * len(self.convs) + assert len(xs) == len(self.convs) + # fpn forward pass + # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py + prev_features = None + # forward in top-down order (from low to high resolution) + n = len(self.convs) - 1 + for i in range(n, -1, -1): + x = xs[i] + lateral_features = self.convs[n - i](x) + if i in self.fpn_top_down_levels and prev_features is not None: + top_down_features = F.interpolate( + prev_features.to(dtype=torch.float32), + scale_factor=2.0, + mode=self.fpn_interp_model, + align_corners=( + None if self.fpn_interp_model == "nearest" else False + ), + antialias=False, + ) + prev_features = lateral_features + top_down_features + if self.fuse_type == "avg": + prev_features /= 2 + else: + prev_features = lateral_features + x_out = prev_features + out[i] = x_out + pos[i] = self.position_encoding(x_out).to(x_out.dtype) + + return out, pos diff --git a/third_parts/sam2/modeling/backbones/utils.py b/third_parts/sam2/modeling/backbones/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..32d55c7545f064de133a5ff0200ba1ece9b504b7 --- /dev/null +++ b/third_parts/sam2/modeling/backbones/utils.py @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +"""Some utilities for backbones, in particular for windowing""" + +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def window_partition(x, window_size): + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = ( + x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + ) + return windows, (Hp, Wp) + + +def window_unpartition(windows, window_size, pad_hw, hw): + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view( + B, Hp // window_size, Wp // window_size, window_size, window_size, -1 + ) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, ...] = (7, 7), + stride: Tuple[int, ...] = (4, 4), + padding: Tuple[int, ...] = (3, 3), + in_chans: int = 3, + embed_dim: int = 768, + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/third_parts/sam2/modeling/memory_attention.py b/third_parts/sam2/modeling/memory_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..64097aed192b180cc37345cd3b3819e68257168e --- /dev/null +++ b/third_parts/sam2/modeling/memory_attention.py @@ -0,0 +1,169 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional + +import torch +from torch import nn, Tensor + +from third_parts.sam2.modeling.sam.transformer import RoPEAttention + +from third_parts.sam2.modeling.sam2_utils import get_activation_fn, get_clones + + +class MemoryAttentionLayer(nn.Module): + + def __init__( + self, + activation: str, + cross_attention: nn.Module, + d_model: int, + dim_feedforward: int, + dropout: float, + pos_enc_at_attn: bool, + pos_enc_at_cross_attn_keys: bool, + pos_enc_at_cross_attn_queries: bool, + self_attention: nn.Module, + ): + super().__init__() + self.d_model = d_model + self.dim_feedforward = dim_feedforward + self.dropout_value = dropout + self.self_attn = self_attention + self.cross_attn_image = cross_attention + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation_str = activation + self.activation = get_activation_fn(activation) + + # Where to add pos enc + self.pos_enc_at_attn = pos_enc_at_attn + self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries + self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys + + def _forward_sa(self, tgt, query_pos): + # Self-Attention + tgt2 = self.norm1(tgt) + q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 + tgt2 = self.self_attn(q, k, v=tgt2) + tgt = tgt + self.dropout1(tgt2) + return tgt + + def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0): + kwds = {} + if num_k_exclude_rope > 0: + assert isinstance(self.cross_attn_image, RoPEAttention) + kwds = {"num_k_exclude_rope": num_k_exclude_rope} + + # Cross-Attention + tgt2 = self.norm2(tgt) + tgt2 = self.cross_attn_image( + q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, + k=memory + pos if self.pos_enc_at_cross_attn_keys else memory, + v=memory, + **kwds, + ) + tgt = tgt + self.dropout2(tgt2) + return tgt + + def forward( + self, + tgt, + memory, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + num_k_exclude_rope: int = 0, + ) -> torch.Tensor: + + # Self-Attn, Cross-Attn + tgt = self._forward_sa(tgt, query_pos) + tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) + # MLP + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + +class MemoryAttention(nn.Module): + def __init__( + self, + d_model: int, + pos_enc_at_input: bool, + layer: nn.Module, + num_layers: int, + batch_first: bool = True, # Do layers expect batch first input? + ): + super().__init__() + self.d_model = d_model + self.layers = get_clones(layer, num_layers) + self.num_layers = num_layers + self.norm = nn.LayerNorm(d_model) + self.pos_enc_at_input = pos_enc_at_input + self.batch_first = batch_first + + def forward( + self, + curr: torch.Tensor, # self-attention inputs + memory: torch.Tensor, # cross-attention inputs + curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs + memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs + num_obj_ptr_tokens: int = 0, # number of object pointer *tokens* + ): + if isinstance(curr, list): + assert isinstance(curr_pos, list) + assert len(curr) == len(curr_pos) == 1 + curr, curr_pos = ( + curr[0], + curr_pos[0], + ) + + assert ( + curr.shape[1] == memory.shape[1] + ), "Batch size must be the same for curr and memory" + + output = curr + if self.pos_enc_at_input and curr_pos is not None: + output = output + 0.1 * curr_pos + + if self.batch_first: + # Convert to batch first + output = output.transpose(0, 1) + curr_pos = curr_pos.transpose(0, 1) + memory = memory.transpose(0, 1) + memory_pos = memory_pos.transpose(0, 1) + + for layer in self.layers: + kwds = {} + if isinstance(layer.cross_attn_image, RoPEAttention): + kwds = {"num_k_exclude_rope": num_obj_ptr_tokens} + + output = layer( + tgt=output, + memory=memory, + pos=memory_pos, + query_pos=curr_pos, + **kwds, + ) + normed_output = self.norm(output) + + if self.batch_first: + # Convert back to seq first + normed_output = normed_output.transpose(0, 1) + curr_pos = curr_pos.transpose(0, 1) + + return normed_output diff --git a/third_parts/sam2/modeling/memory_encoder.py b/third_parts/sam2/modeling/memory_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..4c61d87fcc60c6005f2c98e24892389611e30deb --- /dev/null +++ b/third_parts/sam2/modeling/memory_encoder.py @@ -0,0 +1,181 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from third_parts.sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d + + +class MaskDownSampler(nn.Module): + """ + Progressively downsample a mask by total_stride, each time by stride. + Note that LayerNorm is applied per *token*, like in ViT. + + With each downsample (by a factor stride**2), channel capacity increases by the same factor. + In the end, we linearly project to embed_dim channels. + """ + + def __init__( + self, + embed_dim=256, + kernel_size=4, + stride=4, + padding=0, + total_stride=16, + activation=nn.GELU, + ): + super().__init__() + num_layers = int(math.log2(total_stride) // math.log2(stride)) + assert stride**num_layers == total_stride + self.encoder = nn.Sequential() + mask_in_chans, mask_out_chans = 1, 1 + for _ in range(num_layers): + mask_out_chans = mask_in_chans * (stride**2) + self.encoder.append( + nn.Conv2d( + mask_in_chans, + mask_out_chans, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + ) + self.encoder.append(LayerNorm2d(mask_out_chans)) + self.encoder.append(activation()) + mask_in_chans = mask_out_chans + + self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1)) + + def forward(self, x): + return self.encoder(x) + + +# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt) +class CXBlock(nn.Module): + r"""ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + dim, + kernel_size=7, + padding=3, + drop_path=0.0, + layer_scale_init_value=1e-6, + use_dwconv=True, + ): + super().__init__() + self.dwconv = nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=padding, + groups=dim if use_dwconv else 1, + ) # depthwise conv + self.norm = LayerNorm2d(dim, eps=1e-6) + self.pwconv1 = nn.Linear( + dim, 4 * dim + ) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = ( + nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + if layer_scale_init_value > 0 + else None + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = self.norm(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class Fuser(nn.Module): + def __init__(self, layer, num_layers, dim=None, input_projection=False): + super().__init__() + self.proj = nn.Identity() + self.layers = get_clones(layer, num_layers) + + if input_projection: + assert dim is not None + self.proj = nn.Conv2d(dim, dim, kernel_size=1) + + def forward(self, x): + # normally x: (N, C, H, W) + x = self.proj(x) + for layer in self.layers: + x = layer(x) + return x + + +class MemoryEncoder(nn.Module): + def __init__( + self, + out_dim, + mask_downsampler, + fuser, + position_encoding, + in_dim=256, # in_dim of pix_feats + ): + super().__init__() + + self.mask_downsampler = mask_downsampler + + self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) + self.fuser = fuser + self.position_encoding = position_encoding + self.out_proj = nn.Identity() + if out_dim != in_dim: + self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) + + def forward( + self, + pix_feat: torch.Tensor, + masks: torch.Tensor, + skip_mask_sigmoid: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + ## Process masks + # sigmoid, so that less domain shift from gt masks which are bool + if not skip_mask_sigmoid: + masks = F.sigmoid(masks) + masks = self.mask_downsampler(masks) + + ## Fuse pix_feats and downsampled masks + # in case the visual features are on CPU, cast them to CUDA + pix_feat = pix_feat.to(masks.device) + + x = self.pix_feat_proj(pix_feat) + x = x + masks + x = self.fuser(x) + x = self.out_proj(x) + + pos = self.position_encoding(x).to(x.dtype) + + return {"vision_features": x, "vision_pos_enc": [pos]} diff --git a/third_parts/sam2/modeling/position_encoding.py b/third_parts/sam2/modeling/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..85dc1e375279f8bcacb8652f205ab41af0bb21c3 --- /dev/null +++ b/third_parts/sam2/modeling/position_encoding.py @@ -0,0 +1,221 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import Any, Optional, Tuple + +import numpy as np + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__( + self, + num_pos_feats, + temperature: int = 10000, + normalize: bool = True, + scale: Optional[float] = None, + ): + super().__init__() + assert num_pos_feats % 2 == 0, "Expecting even model width" + self.num_pos_feats = num_pos_feats // 2 + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + self.cache = {} + + def _encode_xy(self, x, y): + # The positions are expected to be normalized + assert len(x) == len(y) and x.ndim == y.ndim == 1 + x_embed = x * self.scale + y_embed = y * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, None] / dim_t + pos_y = y_embed[:, None] / dim_t + pos_x = torch.stack( + (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2 + ).flatten(1) + pos_y = torch.stack( + (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2 + ).flatten(1) + return pos_x, pos_y + + @torch.no_grad() + def encode_boxes(self, x, y, w, h): + pos_x, pos_y = self._encode_xy(x, y) + pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1) + return pos + + encode = encode_boxes # Backwards compatibility + + @torch.no_grad() + def encode_points(self, x, y, labels): + (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape + assert bx == by and nx == ny and bx == bl and nx == nl + pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten()) + pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1) + pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2) + return pos + + @torch.no_grad() + def forward(self, x: torch.Tensor): + cache_key = (x.shape[-2], x.shape[-1]) + if cache_key in self.cache: + return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1) + y_embed = ( + torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device) + .view(1, -1, 1) + .repeat(x.shape[0], 1, x.shape[-1]) + ) + x_embed = ( + torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device) + .view(1, 1, -1) + .repeat(x.shape[0], x.shape[-2], 1) + ) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + self.cache[cache_key] = pos[0] + return pos + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + self.first = True + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords.to(self.positional_encoding_gaussian_matrix.dtype) + if self.first: + self.positional_encoding_gaussian_matrix = self.positional_encoding_gaussian_matrix.to(coords.device) + self.first = False + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C + + +# Rotary Positional Encoding, adapted from: +# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py +# 2. https://github.com/naver-ai/rope-vit +# 3. https://github.com/lucidrains/rotary-embedding-torch + + +def init_t_xy(end_x: int, end_y: int): + t = torch.arange(end_x * end_y, dtype=torch.float32) + t_x = (t % end_x).float() + t_y = torch.div(t, end_x, rounding_mode="floor").float() + return t_x, t_y + + +def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0): + freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) + + t_x, t_y = init_t_xy(end_x, end_y) + freqs_x = torch.outer(t_x, freqs_x) + freqs_y = torch.outer(t_y, freqs_y) + freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x) + freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y) + return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1) + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[-2], x.shape[-1]) + shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_enc( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, + repeat_freqs_k: bool = False, +): + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = ( + torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + if xk.shape[-2] != 0 + else None + ) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + if xk_ is None: + # no keys to rotate, due to dropout + return xq_out.type_as(xq).to(xq.device), xk + # repeat freqs along seq_len dim to match k seq_len + if repeat_freqs_k: + r = xk_.shape[-2] // xq_.shape[-2] + freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) diff --git a/third_parts/sam2/modeling/sam/__init__.py b/third_parts/sam2/modeling/sam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/third_parts/sam2/modeling/sam/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/third_parts/sam2/modeling/sam/mask_decoder.py b/third_parts/sam2/modeling/sam/mask_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..55825cd6ede175ff327da6fa8e7627cfb8979f58 --- /dev/null +++ b/third_parts/sam2/modeling/sam/mask_decoder.py @@ -0,0 +1,299 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional, Tuple, Type + +import torch +from torch import nn + +from third_parts.sam2.modeling.sam2_utils import LayerNorm2d, MLP + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + use_high_res_features: bool = False, + iou_prediction_use_sigmoid=False, + dynamic_multimask_via_stability=False, + dynamic_multimask_stability_delta=0.05, + dynamic_multimask_stability_thresh=0.98, + pred_obj_scores: bool = False, + pred_obj_scores_mlp: bool = False, + use_multimask_token_for_obj_ptr: bool = False, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.pred_obj_scores = pred_obj_scores + if self.pred_obj_scores: + self.obj_score_token = nn.Embedding(1, transformer_dim) + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d( + transformer_dim, transformer_dim // 4, kernel_size=2, stride=2 + ), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d( + transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2 + ), + activation(), + ) + self.use_high_res_features = use_high_res_features + if use_high_res_features: + self.conv_s0 = nn.Conv2d( + transformer_dim, transformer_dim // 8, kernel_size=1, stride=1 + ) + self.conv_s1 = nn.Conv2d( + transformer_dim, transformer_dim // 4, kernel_size=1, stride=1 + ) + + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, + iou_head_hidden_dim, + self.num_mask_tokens, + iou_head_depth, + sigmoid_output=iou_prediction_use_sigmoid, + ) + if self.pred_obj_scores: + self.pred_obj_score_head = nn.Linear(transformer_dim, 1) + if pred_obj_scores_mlp: + self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3) + + # When outputting a single mask, optionally we can dynamically fall back to the best + # multimask output token if the single mask output token gives low stability scores. + self.dynamic_multimask_via_stability = dynamic_multimask_via_stability + self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta + self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + torch.Tensor: batched SAM token for mask output + """ + masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + repeat_image=repeat_image, + high_res_features=high_res_features, + ) + + # Select the correct mask or masks for output + if multimask_output: + masks = masks[:, 1:, :, :] + iou_pred = iou_pred[:, 1:] + elif self.dynamic_multimask_via_stability and not self.training: + masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) + else: + masks = masks[:, 0:1, :, :] + iou_pred = iou_pred[:, 0:1] + + if multimask_output and self.use_multimask_token_for_obj_ptr: + sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape + else: + # Take the mask output token. Here we *always* use the token for single mask output. + # At test time, even if we track after 1-click (and using multimask_output=True), + # we still take the single mask token here. The rationale is that we always track + # after multiple clicks during training, so the past tokens seen during training + # are always the single mask token (and we'll let it be the object-memory token). + sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape + + # Prepare output + return masks, iou_pred, sam_tokens_out, object_score_logits + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + repeat_image: bool, + high_res_features: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + s = 0 + if self.pred_obj_scores: + output_tokens = torch.cat( + [ + self.obj_score_token.weight, + self.iou_token.weight, + self.mask_tokens.weight, + ], + dim=0, + ) + s = 1 + else: + output_tokens = torch.cat( + [self.iou_token.weight, self.mask_tokens.weight], dim=0 + ) + output_tokens = output_tokens.unsqueeze(0).expand( + sparse_prompt_embeddings.size(0), -1, -1 + ) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + if repeat_image: + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + else: + assert image_embeddings.shape[0] == tokens.shape[0] + src = image_embeddings + src = src + dense_prompt_embeddings + assert ( + image_pe.size(0) == 1 + ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)" + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + # print('src: ', src.dtype, 'pos_src:', pos_src.dtype, 'tokens:', tokens.dtype) + _dtype = pos_src.dtype + src = src.to(_dtype) + tokens = tokens.to(_dtype) + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, s, :] + mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + if not self.use_high_res_features: + upscaled_embedding = self.output_upscaling(src) + else: + dc1, ln1, act1, dc2, act2 = self.output_upscaling + feat_s0, feat_s1 = high_res_features + upscaled_embedding = act1(ln1(dc1(src) + feat_s1)) + upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0) + + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append( + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) + ) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + if self.pred_obj_scores: + assert s == 1 + object_score_logits = self.pred_obj_score_head(hs[:, 0, :]) + else: + # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1 + object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1) + + return masks, iou_pred, mask_tokens_out, object_score_logits + + def _get_stability_scores(self, mask_logits): + """ + Compute stability scores of the mask logits based on the IoU between upper and + lower thresholds, similar to https://github.com/fairinternal/onevision/pull/568. + """ + mask_logits = mask_logits.flatten(-2) + stability_delta = self.dynamic_multimask_stability_delta + area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() + area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() + stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) + return stability_scores + + def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): + """ + When outputting a single mask, if the stability score from the current single-mask + output (based on output token 0) falls below a threshold, we instead select from + multi-mask outputs (based on output token 1~3) the mask with the highest predicted + IoU score. This is intended to ensure a valid mask for both clicking and tracking. + """ + # The best mask from multimask output tokens (1~3) + multimask_logits = all_mask_logits[:, 1:, :, :] + multimask_iou_scores = all_iou_scores[:, 1:] + best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) + batch_inds = torch.arange( + multimask_iou_scores.size(0), device=all_iou_scores.device + ) + best_multimask_logits = multimask_logits[batch_inds, best_scores_inds] + best_multimask_logits = best_multimask_logits.unsqueeze(1) + best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds] + best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1) + + # The mask from singlemask output token 0 and its stability score + singlemask_logits = all_mask_logits[:, 0:1, :, :] + singlemask_iou_scores = all_iou_scores[:, 0:1] + stability_scores = self._get_stability_scores(singlemask_logits) + is_stable = stability_scores >= self.dynamic_multimask_stability_thresh + + # Dynamically fall back to best multimask output upon low stability scores. + mask_logits_out = torch.where( + is_stable[..., None, None].expand_as(singlemask_logits), + singlemask_logits, + best_multimask_logits, + ) + iou_scores_out = torch.where( + is_stable.expand_as(singlemask_iou_scores), + singlemask_iou_scores, + best_multimask_iou_scores, + ) + return mask_logits_out, iou_scores_out diff --git a/third_parts/sam2/modeling/sam/prompt_encoder.py b/third_parts/sam2/modeling/sam/prompt_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5c7e7c97875f2d4bfd11ebc2cb604c56bb901236 --- /dev/null +++ b/third_parts/sam2/modeling/sam/prompt_encoder.py @@ -0,0 +1,182 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Tuple, Type + +import torch +from torch import nn + +from third_parts.sam2.modeling.position_encoding import PositionEmbeddingRandom + +from third_parts.sam2.modeling.sam2_utils import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [ + nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings) + ] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = ( + 4 * image_embedding_size[0], + 4 * image_embedding_size[1], + ) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords( + points, self.input_image_size + ) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + point_embedding[labels == 2] += self.point_embeddings[2].weight + point_embedding[labels == 3] += self.point_embeddings[3].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords( + coords, self.input_image_size + ) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings diff --git a/third_parts/sam2/modeling/sam/transformer.py b/third_parts/sam2/modeling/sam/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..58d6f8bb43fbe4f6ba4f2bbbab13650c380aecf7 --- /dev/null +++ b/third_parts/sam2/modeling/sam/transformer.py @@ -0,0 +1,328 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import warnings +from functools import partial +from typing import Tuple, Type + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +from third_parts.sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis + +from third_parts.sam2.modeling.sam2_utils import MLP +from third_parts.sam2.utils.misc import get_sdpa_settings + +warnings.simplefilter(action="ignore", category=FutureWarning) +# OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings() +OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = True, True, True + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attention layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLP( + embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation + ) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + dropout: float = 0.0, + kv_in_dim: int = None, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert ( + self.internal_dim % num_heads == 0 + ), "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + self.dropout_p = dropout + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + with torch.backends.cuda.sdp_kernel( + enable_flash=USE_FLASH_ATTN, + # if Flash attention kernel is off, then math kernel needs to be enabled + enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON, + enable_mem_efficient=OLD_GPU, + ): + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out + + +class RoPEAttention(Attention): + """Attention with rotary position encoding.""" + + def __init__( + self, + *args, + rope_theta=10000.0, + # whether to repeat q rope to match k length + # this is needed for cross-attention to memories + rope_k_repeat=False, + feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.compute_cis = partial( + compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta + ) + freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) + self.freqs_cis = freqs_cis + self.rope_k_repeat = rope_k_repeat + + def forward( + self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0 + ) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Apply rotary position encoding + w = h = math.sqrt(q.shape[-2]) + self.freqs_cis = self.freqs_cis.to(q.device) + if self.freqs_cis.shape[0] != q.shape[-2]: + self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) + if q.shape[-2] != k.shape[-2]: + assert self.rope_k_repeat + + num_k_rope = k.size(-2) - num_k_exclude_rope + q, k[:, :, :num_k_rope] = apply_rotary_enc( + q, + k[:, :, :num_k_rope], + freqs_cis=self.freqs_cis, + repeat_freqs_k=self.rope_k_repeat, + ) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + with torch.backends.cuda.sdp_kernel( + enable_flash=USE_FLASH_ATTN, + # if Flash attention kernel is off, then math kernel needs to be enabled + enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON, + enable_mem_efficient=OLD_GPU, + ): + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/third_parts/sam2/modeling/sam2_base.py b/third_parts/sam2/modeling/sam2_base.py new file mode 100644 index 0000000000000000000000000000000000000000..335268257a2806c3839b4b6c8730bb1318488602 --- /dev/null +++ b/third_parts/sam2/modeling/sam2_base.py @@ -0,0 +1,830 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.distributed +import torch.nn.functional as F + +from torch.nn.init import trunc_normal_ + +from third_parts.sam2.modeling.sam.mask_decoder import MaskDecoder +from third_parts.sam2.modeling.sam.prompt_encoder import PromptEncoder +from third_parts.sam2.modeling.sam.transformer import TwoWayTransformer +from third_parts.sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames + +# a large negative value as a placeholder score for missing objects +NO_OBJ_SCORE = -1024.0 + + +class SAM2Base(torch.nn.Module): + def __init__( + self, + image_encoder, + memory_attention, + memory_encoder, + num_maskmem=7, # default 1 input frame + 6 previous frames + image_size=512, + backbone_stride=16, # stride of the image backbone output + sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob + sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob + # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks + binarize_mask_from_pts_for_mem_enc=False, + use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder + # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit, + # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model + # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM. + max_cond_frames_in_attn=-1, + # on the first frame, whether to directly add the no-memory embedding to the image feature + # (instead of using the transformer encoder) + directly_add_no_mem_embed=False, + # whether to use high-resolution feature maps in the SAM mask decoder + use_high_res_features_in_sam=False, + # whether to output multiple (3) masks for the first click on initial conditioning frames + multimask_output_in_sam=False, + # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`; + # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points) + multimask_min_pt_num=1, + multimask_max_pt_num=1, + # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`) + multimask_output_for_tracking=False, + # Whether to use multimask tokens for obj ptr; Only relevant when both + # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True + use_multimask_token_for_obj_ptr: bool = False, + # whether to use sigmoid to restrict ious prediction to [0-1] + iou_prediction_use_sigmoid=False, + # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5). + # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of + # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame. + memory_temporal_stride_for_eval=1, + # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click + # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames + add_all_frames_to_correct_as_cond=False, + # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks) + non_overlap_masks_for_mem_enc=False, + # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder=False, + # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`) + max_obj_ptrs_in_encoder=16, + # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`) + add_tpos_enc_to_obj_ptrs=True, + # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference + # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`) + proj_tpos_enc_in_obj_ptrs=False, + # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation + # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking) + only_obj_ptrs_in_the_past_for_eval=False, + # Whether to predict if there is an object in the frame + pred_obj_scores: bool = False, + # Whether to use an MLP to predict object scores + pred_obj_scores_mlp: bool = False, + # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True; + # Whether to have a fixed no obj pointer when there is no object present + # or to use it as an additive embedding with obj_ptr produced by decoder + fixed_no_obj_ptr: bool = False, + # Soft no object, i.e. mix in no_obj_ptr softly, + # hope to make recovery easier if there is a mistake and mitigate accumulation of errors + soft_no_obj_ptr: bool = False, + use_mlp_for_obj_ptr_proj: bool = False, + # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class. + sam_mask_decoder_extra_args=None, + compile_image_encoder: bool = False, + ): + super().__init__() + + # Part 1: the image backbone + self.image_encoder = image_encoder + # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting + self.use_high_res_features_in_sam = use_high_res_features_in_sam + self.num_feature_levels = 3 if use_high_res_features_in_sam else 1 + self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder + self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder + if use_obj_ptrs_in_encoder: + # A conv layer to downsample the mask prompt to stride 4 (the same stride as + # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale, + # so that it can be fed into the SAM mask decoder to generate a pointer. + self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) + self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs + if proj_tpos_enc_in_obj_ptrs: + assert add_tpos_enc_to_obj_ptrs # these options need to be used together + self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs + self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval + + # Part 2: memory attention to condition current frame's visual features + # with memories (and obj ptrs) from past frames + self.memory_attention = memory_attention + self.hidden_dim = memory_attention.d_model + + # Part 3: memory encoder for the previous frame's outputs + self.memory_encoder = memory_encoder + self.mem_dim = self.hidden_dim + if hasattr(self.memory_encoder, "out_proj") and hasattr( + self.memory_encoder.out_proj, "weight" + ): + # if there is compression of memories along channel dim + self.mem_dim = self.memory_encoder.out_proj.weight.shape[0] + self.num_maskmem = num_maskmem # Number of memories accessible + # Temporal encoding of the memories + self.maskmem_tpos_enc = torch.nn.Parameter( + torch.zeros(num_maskmem, 1, 1, self.mem_dim) + ) + trunc_normal_(self.maskmem_tpos_enc, std=0.02) + # a single token to indicate no memory embedding from previous frames + self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) + trunc_normal_(self.no_mem_embed, std=0.02) + trunc_normal_(self.no_mem_pos_enc, std=0.02) + self.directly_add_no_mem_embed = directly_add_no_mem_embed + # Apply sigmoid to the output raw mask logits (to turn them from + # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder + self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc + self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc + self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc + self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc + self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval + # On frames with mask input, whether to directly output the input mask without + # using a SAM prompt encoder + mask decoder + self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam + self.multimask_output_in_sam = multimask_output_in_sam + self.multimask_min_pt_num = multimask_min_pt_num + self.multimask_max_pt_num = multimask_max_pt_num + self.multimask_output_for_tracking = multimask_output_for_tracking + self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr + self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid + + # Part 4: SAM-style prompt encoder (for both mask and point inputs) + # and SAM-style mask decoder for the final mask output + self.image_size = image_size + self.backbone_stride = backbone_stride + self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args + self.pred_obj_scores = pred_obj_scores + self.pred_obj_scores_mlp = pred_obj_scores_mlp + self.fixed_no_obj_ptr = fixed_no_obj_ptr + self.soft_no_obj_ptr = soft_no_obj_ptr + if self.fixed_no_obj_ptr: + assert self.pred_obj_scores + assert self.use_obj_ptrs_in_encoder + if self.pred_obj_scores and self.use_obj_ptrs_in_encoder: + self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) + trunc_normal_(self.no_obj_ptr, std=0.02) + self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj + + self._build_sam_heads() + self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond + self.max_cond_frames_in_attn = max_cond_frames_in_attn + + # Model compilation + if compile_image_encoder: + # Compile the forward function (not the full module) to allow loading checkpoints. + print( + "Image encoder compilation is enabled. First forward pass will be slow." + ) + self.image_encoder.forward = torch.compile( + self.image_encoder.forward, + mode="max-autotune", + fullgraph=True, + dynamic=False, + ) + + @property + def device(self): + return next(self.parameters()).device + + def forward(self, *args, **kwargs): + raise NotImplementedError( + "Please use the corresponding methods in SAM2VideoPredictor for inference." + "See notebooks/video_predictor_example.ipynb for an example." + ) + + def _build_sam_heads(self): + """Build SAM-style prompt encoder and mask decoder.""" + self.sam_prompt_embed_dim = self.hidden_dim + self.sam_image_embedding_size = self.image_size // self.backbone_stride + + # build PromptEncoder and MaskDecoder from SAM + # (their hyperparameters like `mask_in_chans=16` are from SAM code) + self.sam_prompt_encoder = PromptEncoder( + embed_dim=self.sam_prompt_embed_dim, + image_embedding_size=( + self.sam_image_embedding_size, + self.sam_image_embedding_size, + ), + input_image_size=(self.image_size, self.image_size), + mask_in_chans=16, + ) + self.sam_mask_decoder = MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=self.sam_prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=self.sam_prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + use_high_res_features=self.use_high_res_features_in_sam, + iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid, + pred_obj_scores=self.pred_obj_scores, + pred_obj_scores_mlp=self.pred_obj_scores_mlp, + use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr, + **(self.sam_mask_decoder_extra_args or {}), + ) + if self.use_obj_ptrs_in_encoder: + # a linear projection on SAM output tokens to turn them into object pointers + self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim) + if self.use_mlp_for_obj_ptr_proj: + self.obj_ptr_proj = MLP( + self.hidden_dim, self.hidden_dim, self.hidden_dim, 3 + ) + else: + self.obj_ptr_proj = torch.nn.Identity() + if self.proj_tpos_enc_in_obj_ptrs: + # a linear projection on temporal positional encoding in object pointers to + # avoid potential interference with spatial positional encoding + self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim) + else: + self.obj_ptr_tpos_proj = torch.nn.Identity() + + def _forward_sam_heads( + self, + backbone_features, + point_inputs=None, + mask_inputs=None, + high_res_features=None, + multimask_output=False, + ): + """ + Forward SAM prompt encoders and mask heads. + + Inputs: + - backbone_features: image features of [B, C, H, W] shape + - point_inputs: a dictionary with "point_coords" and "point_labels", where + 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the + absolute pixel-unit coordinate in (x, y) format of the P input points + 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means + positive clicks, 0 means negative clicks, and -1 means padding + - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the + same spatial size as the image. + - high_res_features: either 1) None or 2) or a list of length 2 containing + two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively, + which will be used as high-resolution feature maps for SAM decoder. + - multimask_output: if it's True, we output 3 candidate masks and their 3 + corresponding IoU estimates, and if it's False, we output only 1 mask and + its corresponding IoU estimate. + + Outputs: + - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if + `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM + output mask logits (before sigmoid) for the low-resolution masks, with 4x + the resolution (1/4 stride) of the input backbone_features. + - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3 + if `multimask_output=True` and M = 1 if `multimask_output=False`), + upsampled from the low-resolution masks, with shape size as the image + (stride is 1 pixel). + - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1 + if `multimask_output=False`), the estimated IoU of each output mask. + - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `low_res_multimasks`. + - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`. + If `multimask_output=True`, it's the mask with the highest IoU estimate. + If `multimask_output=False`, it's the same as `high_res_multimasks`. + - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted + based on the output token from the SAM mask decoder. + """ + B = backbone_features.size(0) + device = backbone_features.device + assert backbone_features.size(1) == self.sam_prompt_embed_dim + assert backbone_features.size(2) == self.sam_image_embedding_size + assert backbone_features.size(3) == self.sam_image_embedding_size + + # a) Handle point prompts + if point_inputs is not None: + sam_point_coords = point_inputs["point_coords"] + sam_point_labels = point_inputs["point_labels"] + assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B + else: + # If no points are provide, pad with an empty point (with label -1) + sam_point_coords = torch.zeros(B, 1, 2, device=device) + sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) + + # b) Handle mask prompts + if mask_inputs is not None: + # If mask_inputs is provided, downsize it into low-res mask input if needed + # and feed it as a dense mask prompt into the SAM mask encoder + assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) + if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: + sam_mask_prompt = F.interpolate( + mask_inputs.float(), + size=self.sam_prompt_encoder.mask_input_size, + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + else: + sam_mask_prompt = mask_inputs + else: + # Otherwise, simply feed None (and SAM's prompt encoder will add + # a learned `no_mask_embed` to indicate no mask input in this case). + sam_mask_prompt = None + + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( + points=(sam_point_coords, sam_point_labels), + boxes=None, + masks=sam_mask_prompt, + ) + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=self.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features=high_res_features, + ) + if self.pred_obj_scores: + is_obj_appearing = object_score_logits > 0 + + # Mask used for spatial memories is always a *hard* choice between obj and no obj, + # consistent with the actual mask prediction + low_res_multimasks = torch.where( + is_obj_appearing[:, None, None], + low_res_multimasks, + NO_OBJ_SCORE, + ) + + # convert masks from possibly bfloat16 (or float16) to float32 + # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) + _dtype = low_res_multimasks.dtype + # low_res_multimasks = low_res_multimasks.float() + high_res_multimasks = F.interpolate( + low_res_multimasks.float(), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ).to(_dtype) + + sam_output_token = sam_output_tokens[:, 0] + if multimask_output: + # take the best mask prediction (with the highest IoU estimation) + best_iou_inds = torch.argmax(ious, dim=-1) + batch_inds = torch.arange(B, device=device) + low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) + if sam_output_tokens.size(1) > 1: + sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] + else: + low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks + + # Extract object pointer from the SAM output token (with occlusion handling) + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: + # Allow *soft* no obj ptr, unlike for masks + if self.soft_no_obj_ptr: + # Only hard possible with gt + assert not self.teacher_force_obj_scores_for_mem + lambda_is_obj_appearing = object_score_logits.sigmoid() + else: + lambda_is_obj_appearing = is_obj_appearing.float() + + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_multimasks, + high_res_multimasks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): + """ + Directly turn binary `mask_inputs` into a output mask logits without using SAM. + (same input and output shapes as in _forward_sam_heads above). + """ + # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). + out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 + mask_inputs_float = mask_inputs.float() + high_res_masks = mask_inputs_float * out_scale + out_bias + low_res_masks = F.interpolate( + high_res_masks, + size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + # a dummy IoU prediction of all 1's under mask input + ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float() + if not self.use_obj_ptrs_in_encoder: + # all zeros as a dummy object pointer (of shape [B, C]) + obj_ptr = torch.zeros( + mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device + ) + else: + # produce an object pointer using the SAM decoder from the mask input + _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads( + backbone_features=backbone_features, + mask_inputs=self.mask_downsample(mask_inputs_float), + high_res_features=high_res_features, + ) + # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; + # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying + # on the object_scores from the SAM decoder. + is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) + is_obj_appearing = is_obj_appearing[..., None] + lambda_is_obj_appearing = is_obj_appearing.float() + object_score_logits = out_scale * lambda_is_obj_appearing + out_bias + if self.pred_obj_scores: + if self.fixed_no_obj_ptr: + obj_ptr = lambda_is_obj_appearing * obj_ptr + obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr + + return ( + low_res_masks, + high_res_masks, + ious, + low_res_masks, + high_res_masks, + obj_ptr, + object_score_logits, + ) + + def forward_image(self, img_batch: torch.Tensor): + """Get the image feature on the input batch.""" + backbone_out = self.image_encoder(img_batch) + if self.use_high_res_features_in_sam: + # precompute projected level 0 and level 1 features in SAM decoder + # to avoid running it again on every SAM click + backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0( + backbone_out["backbone_fpn"][0] + ) + backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( + backbone_out["backbone_fpn"][1] + ) + return backbone_out + + def _prepare_backbone_features(self, backbone_out): + """Prepare and flatten visual features.""" + backbone_out = backbone_out.copy() + assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"]) + assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels + + feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :] + vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :] + + feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds] + # flatten NxCxHxW to HWxNxC + vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps] + vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds] + + return backbone_out, vision_feats, vision_pos_embeds, feat_sizes + + def _prepare_memory_conditioned_features( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + ): + """Fuse the current frame's visual feature map with previous memory.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + device = current_vision_feats[-1].device + # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images. + # In this case, we skip the fusion with any memory. + if self.num_maskmem == 0: # Disable memory and skip fusion + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + return pix_feat + + num_obj_ptr_tokens = 0 + # Step 1: condition the visual features of the current frame on previous memories + if not is_init_cond_frame: + # Retrieve the memories encoded with the maskmem backbone + to_cat_memory, to_cat_memory_pos_embed = [], [] + # Add conditioning frames's output first (all cond frames have t_pos=0 for + # when getting temporal positional embedding below) + assert len(output_dict["cond_frame_outputs"]) > 0 + # Select a maximum number of temporally closest cond frames for cross attention + cond_outputs = output_dict["cond_frame_outputs"] + selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames( + frame_idx, cond_outputs, self.max_cond_frames_in_attn + ) + t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()] + # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory + # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1 + # We also allow taking the memory frame non-consecutively (with r>1), in which case + # we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame. + r = self.memory_temporal_stride_for_eval + for t_pos in range(1, self.num_maskmem): + t_rel = self.num_maskmem - t_pos # how many frames before current frame + if t_rel == 1: + # for t_rel == 1, we take the last frame (regardless of r) + if not track_in_reverse: + # the frame immediately before this frame (i.e. frame_idx - 1) + prev_frame_idx = frame_idx - t_rel + else: + # the frame immediately after this frame (i.e. frame_idx + 1) + prev_frame_idx = frame_idx + t_rel + else: + # for t_rel >= 2, we take the memory frame from every r-th frames + if not track_in_reverse: + # first find the nearest frame among every r-th frames before this frame + # for r=1, this would be (frame_idx - 2) + prev_frame_idx = ((frame_idx - 2) // r) * r + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx - (t_rel - 2) * r + else: + # first find the nearest frame among every r-th frames after this frame + # for r=1, this would be (frame_idx + 2) + prev_frame_idx = -(-(frame_idx + 2) // r) * r + # then seek further among every r-th frames + prev_frame_idx = prev_frame_idx + (t_rel - 2) * r + out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None) + if out is None: + # If an unselected conditioning frame is among the last (self.num_maskmem - 1) + # frames, we still attend to it as if it's a non-conditioning frame. + out = unselected_cond_outputs.get(prev_frame_idx, None) + t_pos_and_prevs.append((t_pos, out)) + + for t_pos, prev in t_pos_and_prevs: + if prev is None: + continue # skip padding frames + # "maskmem_features" might have been offloaded to CPU in demo use cases, + # so we load it back to GPU (it's a no-op if it's already on GPU). + feats = prev["maskmem_features"].cuda(non_blocking=True) + to_cat_memory.append(feats.flatten(2).permute(2, 0, 1)) + # Spatial positional encoding (it might have been offloaded to CPU in eval) + maskmem_enc = prev["maskmem_pos_enc"][-1].cuda() + maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1) + # Temporal positional encoding + maskmem_enc = ( + maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1] + ) + to_cat_memory_pos_embed.append(maskmem_enc) + + # Construct the list of past object pointers + if self.use_obj_ptrs_in_encoder: + max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder) + # First add those object pointers from selected conditioning frames + # (optionally, only include object pointers in the past during evaluation) + if not self.training and self.only_obj_ptrs_in_the_past_for_eval: + ptr_cond_outputs = { + t: out + for t, out in selected_cond_outputs.items() + if (t >= frame_idx if track_in_reverse else t <= frame_idx) + } + else: + ptr_cond_outputs = selected_cond_outputs + pos_and_ptrs = [ + # Temporal pos encoding contains how far away each pointer is from current frame + (abs(frame_idx - t), out["obj_ptr"]) + for t, out in ptr_cond_outputs.items() + ] + # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame + for t_diff in range(1, max_obj_ptrs_in_encoder): + t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff + if t < 0 or (num_frames is not None and t >= num_frames): + break + out = output_dict["non_cond_frame_outputs"].get( + t, unselected_cond_outputs.get(t, None) + ) + if out is not None: + pos_and_ptrs.append((t_diff, out["obj_ptr"])) + # If we have at least one object pointer, add them to the across attention + if len(pos_and_ptrs) > 0: + pos_list, ptrs_list = zip(*pos_and_ptrs) + # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape + obj_ptrs = torch.stack(ptrs_list, dim=0) + # a temporal positional embedding based on how far each object pointer is from + # the current frame (sine embedding normalized by the max pointer num). + if self.add_tpos_enc_to_obj_ptrs: + t_diff_max = max_obj_ptrs_in_encoder - 1 + tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim + obj_pos = torch.tensor(pos_list, device=device) + obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim) + obj_pos = self.obj_ptr_tpos_proj(obj_pos) + obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim) + else: + obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim) + if self.mem_dim < C: + # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C + obj_ptrs = obj_ptrs.reshape( + -1, B, C // self.mem_dim, self.mem_dim + ) + obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) + obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0) + to_cat_memory.append(obj_ptrs) + to_cat_memory_pos_embed.append(obj_pos) + num_obj_ptr_tokens = obj_ptrs.shape[0] + else: + num_obj_ptr_tokens = 0 + else: + # for initial conditioning frames, encode them without using any previous memory + if self.directly_add_no_mem_embed: + # directly add no-mem embedding (instead of using the transformer encoder) + pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + # Use a dummy token on the first frame (to avoid emtpy memory input to tranformer encoder) + to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)] + to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)] + + # Step 2: Concatenate the memories and forward through the transformer encoder + memory = torch.cat(to_cat_memory, dim=0) + memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) + + pix_feat_with_mem = self.memory_attention( + curr=current_vision_feats, + curr_pos=current_vision_pos_embeds, + memory=memory, + memory_pos=memory_pos_embed, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + # reshape the output (HW)BC => BCHW + pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) + return pix_feat_with_mem + + def _encode_new_memory( + self, + current_vision_feats, + feat_sizes, + pred_masks_high_res, + is_mask_from_pts, + ): + """Encode the current image and its prediction into a memory feature.""" + B = current_vision_feats[-1].size(1) # batch size on this frame + C = self.hidden_dim + H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size + # top-level feature, (HW)BC => BCHW + pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) + if self.non_overlap_masks_for_mem_enc and not self.training: + # optionally, apply non-overlapping constraints to the masks (it's applied + # in the batch dimension and should only be used during eval, where all + # the objects come from the same video under batch size 1). + pred_masks_high_res = self._apply_non_overlapping_constraints( + pred_masks_high_res + ) + # scale the raw mask logits with a temperature before applying sigmoid + binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts + if binarize and not self.training: + mask_for_mem = (pred_masks_high_res > 0).float() + else: + # apply sigmoid on the raw mask logits to turn them into range (0, 1) + mask_for_mem = torch.sigmoid(pred_masks_high_res) + # apply scale and bias terms to the sigmoid probabilities + if self.sigmoid_scale_for_mem_enc != 1.0: + mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc + if self.sigmoid_bias_for_mem_enc != 0.0: + mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc + maskmem_out = self.memory_encoder( + pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied + ) + maskmem_features = maskmem_out["vision_features"] + maskmem_pos_enc = maskmem_out["vision_pos_enc"] + + return maskmem_features, maskmem_pos_enc + + def track_step( + self, + frame_idx, + is_init_cond_frame, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + point_inputs, + mask_inputs, + output_dict, + num_frames, + track_in_reverse=False, # tracking in reverse time order (for demo usage) + # Whether to run the memory encoder on the predicted masks. Sometimes we might want + # to skip the memory encoder with `run_mem_encoder=False`. For example, + # in demo we might call `track_step` multiple times for each user click, + # and only encode the memory when the user finalizes their clicks. And in ablation + # settings like SAM training on static images, we don't need the memory encoder. + run_mem_encoder=True, + # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). + prev_sam_mask_logits=None, + ): + current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} + # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW + if len(current_vision_feats) > 1: + high_res_features = [ + x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) + for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1]) + ] + else: + high_res_features = None + if mask_inputs is not None and self.use_mask_input_as_output_without_sam: + # When use_mask_input_as_output_without_sam=True, we directly output the mask input + # (see it as a GT mask) without using a SAM prompt encoder + mask decoder. + pix_feat = current_vision_feats[-1].permute(1, 2, 0) + pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) + sam_outputs = self._use_mask_as_output( + pix_feat, high_res_features, mask_inputs + ) + else: + # fused the visual feature with previous memory features in the memory bank + pix_feat_with_mem = self._prepare_memory_conditioned_features( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats[-1:], + current_vision_pos_embeds=current_vision_pos_embeds[-1:], + feat_sizes=feat_sizes[-1:], + output_dict=output_dict, + num_frames=num_frames, + track_in_reverse=track_in_reverse, + ) + # apply SAM-style segmentation head + # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, + # e.g. in demo where such logits come from earlier interaction instead of correction sampling + # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) + if prev_sam_mask_logits is not None: + assert point_inputs is not None and mask_inputs is None + mask_inputs = prev_sam_mask_logits + multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) + sam_outputs = self._forward_sam_heads( + backbone_features=pix_feat_with_mem, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + high_res_features=high_res_features, + multimask_output=multimask_output, + ) + ( + _, + _, + _, + low_res_masks, + high_res_masks, + obj_ptr, + _, + ) = sam_outputs + + current_out["pred_masks"] = low_res_masks + current_out["pred_masks_high_res"] = high_res_masks + current_out["obj_ptr"] = obj_ptr + + # Finally run the memory encoder on the predicted mask to encode + # it into a new memory feature (that can be used in future frames) + if run_mem_encoder and self.num_maskmem > 0: + high_res_masks_for_mem_enc = high_res_masks + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks_for_mem_enc, + is_mask_from_pts=(point_inputs is not None), + ) + current_out["maskmem_features"] = maskmem_features + current_out["maskmem_pos_enc"] = maskmem_pos_enc + else: + current_out["maskmem_features"] = None + current_out["maskmem_pos_enc"] = None + + return current_out + + def _use_multimask(self, is_init_cond_frame, point_inputs): + """Whether to use multimask output in the SAM head.""" + num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1) + multimask_output = ( + self.multimask_output_in_sam + and (is_init_cond_frame or self.multimask_output_for_tracking) + and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num) + ) + return multimask_output + + def _apply_non_overlapping_constraints(self, pred_masks): + """ + Apply non-overlapping constraints to the object scores in pred_masks. Here we + keep only the highest scoring object at each spatial location in pred_masks. + """ + batch_size = pred_masks.size(0) + if batch_size == 1: + return pred_masks + + device = pred_masks.device + # "max_obj_inds": object index of the object with the highest score at each location + max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True) + # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks` + batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None] + keep = max_obj_inds == batch_obj_inds + # suppress overlapping regions' scores below -10.0 so that the foreground regions + # don't overlap (here sigmoid(-10.0)=4.5398e-05) + pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0)) + return pred_masks diff --git a/third_parts/sam2/modeling/sam2_utils.py b/third_parts/sam2/modeling/sam2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d9705963efc57d74b7d1bff31692d7d293a46ad --- /dev/null +++ b/third_parts/sam2/modeling/sam2_utils.py @@ -0,0 +1,149 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num): + """ + Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs` + that are temporally closest to the current frame at `frame_idx`. Here, we take + - a) the closest conditioning frame before `frame_idx` (if any); + - b) the closest conditioning frame after `frame_idx` (if any); + - c) any other temporally closest conditioning frames until reaching a total + of `max_cond_frame_num` conditioning frames. + + Outputs: + - selected_outputs: selected items (keys & values) from `cond_frame_outputs`. + - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`. + """ + if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num: + selected_outputs = cond_frame_outputs + unselected_outputs = {} + else: + assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames" + selected_outputs = {} + + # the closest conditioning frame before `frame_idx` (if any) + idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None) + if idx_before is not None: + selected_outputs[idx_before] = cond_frame_outputs[idx_before] + + # the closest conditioning frame after `frame_idx` (if any) + idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None) + if idx_after is not None: + selected_outputs[idx_after] = cond_frame_outputs[idx_after] + + # add other temporally closest conditioning frames until reaching a total + # of `max_cond_frame_num` conditioning frames. + num_remain = max_cond_frame_num - len(selected_outputs) + inds_remain = sorted( + (t for t in cond_frame_outputs if t not in selected_outputs), + key=lambda x: abs(x - frame_idx), + )[:num_remain] + selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain) + unselected_outputs = { + t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs + } + + return selected_outputs, unselected_outputs + + +def get_1d_sine_pe(pos_inds, dim, temperature=10000): + """ + Get 1D sine positional embedding as in the original Transformer paper. + """ + pe_dim = dim // 2 + dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device) + dim_t = temperature ** (2 * (dim_t // 2) / pe_dim) + + pos_embed = pos_inds.unsqueeze(-1) / dim_t + pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1) + return pos_embed + + +def get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +class DropPath(nn.Module): + # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py + def __init__(self, drop_prob=0.0, scale_by_keep=True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + if self.drop_prob == 0.0 or not self.training: + return x + keep_prob = 1 - self.drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and self.scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + activation: nn.Module = nn.ReLU, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + self.act = activation() + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/third_parts/sam2/sam2_configs/__init__.py b/third_parts/sam2/sam2_configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/third_parts/sam2/sam2_configs/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/third_parts/sam2/sam2_configs/sam2_hiera_b+.yaml b/third_parts/sam2/sam2_configs/sam2_hiera_b+.yaml new file mode 100644 index 0000000000000000000000000000000000000000..509ce3d030a56ea08026c894223ccc3bc1de9b90 --- /dev/null +++ b/third_parts/sam2/sam2_configs/sam2_hiera_b+.yaml @@ -0,0 +1,113 @@ +# @package _global_ + +# Model +model: + _target_: third_parts.sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: third_parts.sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: third_parts.sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + neck: + _target_: third_parts.sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: third_parts.sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: third_parts.sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: third_parts.sam2.modeling.memory_encoder.Fuser + layer: + _target_: third_parts.sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/third_parts/sam2/sam2_configs/sam2_hiera_l.yaml b/third_parts/sam2/sam2_configs/sam2_hiera_l.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f00a7db45789148463c39be186f6a7b53e1f21ad --- /dev/null +++ b/third_parts/sam2/sam2_configs/sam2_hiera_l.yaml @@ -0,0 +1,117 @@ +# @package _global_ + +# Model +model: + _target_: third_parts.sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: third_parts.sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: third_parts.sam2.modeling.backbones.hieradet.Hiera + embed_dim: 144 + num_heads: 2 + stages: [2, 6, 36, 4] + global_att_blocks: [23, 33, 43] + window_pos_embed_bkg_spatial_size: [7, 7] + window_spec: [8, 4, 16, 8] + neck: + _target_: third_parts.sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [1152, 576, 288, 144] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: third_parts.sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: third_parts.sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: third_parts.sam2.modeling.memory_encoder.Fuser + layer: + _target_: third_parts.sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/third_parts/sam2/sam2_configs/sam2_hiera_s.yaml b/third_parts/sam2/sam2_configs/sam2_hiera_s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08a767bd833e8d73b66960f7ba6998eeaaf673c8 --- /dev/null +++ b/third_parts/sam2/sam2_configs/sam2_hiera_s.yaml @@ -0,0 +1,116 @@ +# @package _global_ + +# Model +model: + _target_: third_parts.sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: third_parts.sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: third_parts.sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 11, 2] + global_att_blocks: [7, 10, 13] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: third_parts.sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: third_parts.sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: third_parts.sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: third_parts.sam2.modeling.memory_encoder.Fuser + layer: + _target_: third_parts.sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/third_parts/sam2/sam2_configs/sam2_hiera_t.yaml b/third_parts/sam2/sam2_configs/sam2_hiera_t.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb681b73fc95527566b28ffeeb71d6408dae7b3c --- /dev/null +++ b/third_parts/sam2/sam2_configs/sam2_hiera_t.yaml @@ -0,0 +1,118 @@ +# @package _global_ + +# Model +model: + _target_: third_parts.sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: third_parts.sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: third_parts.sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 7, 2] + global_att_blocks: [5, 7, 9] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: third_parts.sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: third_parts.sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: third_parts.sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: third_parts.sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: third_parts.sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: third_parts.sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: third_parts.sam2.modeling.memory_encoder.Fuser + layer: + _target_: third_parts.sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + # SAM decoder + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # HieraT does not currently support compilation, should always be set to False + compile_image_encoder: False diff --git a/third_parts/sam2/sam2_image_predictor.py b/third_parts/sam2/sam2_image_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..d8d1d6ac5178e05c75ccb0683d27b4e83e7014b9 --- /dev/null +++ b/third_parts/sam2/sam2_image_predictor.py @@ -0,0 +1,446 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import logging + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL.Image import Image + +from third_parts.sam2.modeling.sam2_base import SAM2Base + +from third_parts.sam2.utils.transforms import SAM2Transforms + + +class SAM2ImagePredictor: + def __init__( + self, + sam_model: SAM2Base, + mask_threshold=0.0, + max_hole_area=0.0, + max_sprinkle_area=0.0, + ) -> None: + """ + Uses SAM-2 to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam-2): The model to use for mask prediction. + mask_threshold (float): The threshold to use when converting mask logits + to binary masks. Masks are thresholded at 0 by default. + fill_hole_area (int): If fill_hole_area > 0, we fill small holes in up to + the maximum area of fill_hole_area in low_res_masks. + """ + super().__init__() + self.model = sam_model + self._transforms = SAM2Transforms( + resolution=self.model.image_size, + mask_threshold=mask_threshold, + max_hole_area=max_hole_area, + max_sprinkle_area=max_sprinkle_area, + ) + + # Predictor state + self._is_image_set = False + self._features = None + self._orig_hw = None + # Whether the predictor is set for single image or a batch of images + self._is_batch = False + + # Predictor config + self.mask_threshold = mask_threshold + + # Spatial dim for backbone feature maps + self._bb_feat_sizes = [ + (256, 256), + (128, 128), + (64, 64), + ] + + @torch.no_grad() + def set_image( + self, + image: Union[np.ndarray, Image], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image + with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + self.reset_predictor() + # Transform the image to the form expected by the model + if isinstance(image, np.ndarray): + logging.info("For numpy array image, we assume (HxWxC) format") + self._orig_hw = [image.shape[:2]] + elif isinstance(image, Image): + w, h = image.size + self._orig_hw = [(h, w)] + else: + raise NotImplementedError("Image format not supported") + + input_image = self._transforms(image) + input_image = input_image[None, ...].to(self.device) + + assert ( + len(input_image.shape) == 4 and input_image.shape[1] == 3 + ), f"input_image must be of size 1x3xHxW, got {input_image.shape}" + logging.info("Computing image embeddings for the provided image...") + backbone_out = self.model.forward_image(input_image) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + logging.info("Image embeddings computed.") + + @torch.no_grad() + def set_image_batch( + self, + image_list: List[Union[np.ndarray]], + ) -> None: + """ + Calculates the image embeddings for the provided image batch, allowing + masks to be predicted with the 'predict_batch' method. + + Arguments: + image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray + with pixel values in [0, 255]. + """ + self.reset_predictor() + assert isinstance(image_list, list) + self._orig_hw = [] + for image in image_list: + assert isinstance( + image, np.ndarray + ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC" + self._orig_hw.append(image.shape[:2]) + # Transform the image to the form expected by the model + img_batch = self._transforms.forward_batch(image_list) + img_batch = img_batch.to(self.device) + batch_size = img_batch.shape[0] + assert ( + len(img_batch.shape) == 4 and img_batch.shape[1] == 3 + ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}" + logging.info("Computing image embeddings for the provided images...") + backbone_out = self.model.forward_image(img_batch) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} + self._is_image_set = True + self._is_batch = True + logging.info("Image embeddings computed.") + + def predict_batch( + self, + point_coords_batch: List[np.ndarray] = None, + point_labels_batch: List[np.ndarray] = None, + box_batch: List[np.ndarray] = None, + mask_input_batch: List[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images. + It returns a tupele of lists of masks, ious, and low_res_masks_logits. + """ + assert self._is_batch, "This function should only be used when in batched mode" + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image_batch(...) before mask prediction." + ) + num_images = len(self._features["image_embed"]) + all_masks = [] + all_ious = [] + all_low_res_masks = [] + for img_idx in range(num_images): + # Transform input prompts + point_coords = ( + point_coords_batch[img_idx] if point_coords_batch is not None else None + ) + point_labels = ( + point_labels_batch[img_idx] if point_labels_batch is not None else None + ) + box = box_batch[img_idx] if box_batch is not None else None + mask_input = ( + mask_input_batch[img_idx] if mask_input_batch is not None else None + ) + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, + point_labels, + box, + mask_input, + normalize_coords, + img_idx=img_idx, + ) + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + img_idx=img_idx, + ) + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = ( + iou_predictions.squeeze(0).float().detach().cpu().numpy() + ) + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + all_masks.append(masks_np) + all_ious.append(iou_predictions_np) + all_low_res_masks.append(low_res_masks_np) + + return all_masks, all_ious, all_low_res_masks + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + normalize_coords=True, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + # Transform input prompts + + mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( + point_coords, point_labels, box, mask_input, normalize_coords + ) + + masks, iou_predictions, low_res_masks = self._predict( + unnorm_coords, + labels, + unnorm_box, + mask_input, + multimask_output, + return_logits=return_logits, + ) + + masks_np = masks.squeeze(0).float().detach().cpu().numpy() + iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy() + low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + def _prep_prompts( + self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1 + ): + + unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = torch.as_tensor( + point_coords, dtype=torch.float, device=self.device + ) + unnorm_coords = self._transforms.transform_coords( + point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) + labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + if len(unnorm_coords.shape) == 2: + unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...] + if box is not None: + box = torch.as_tensor(box, dtype=torch.float, device=self.device) + unnorm_box = self._transforms.transform_boxes( + box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx] + ) # Bx2x2 + if mask_logits is not None: + mask_input = torch.as_tensor( + mask_logits, dtype=torch.float, device=self.device + ) + if len(mask_input.shape) == 3: + mask_input = mask_input[None, :, :, :] + return mask_input, unnorm_coords, labels, unnorm_box + + @torch.no_grad() + def _predict( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + img_idx: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using SAM2Transforms. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) before mask prediction." + ) + + if point_coords is not None: + concat_points = (point_coords, point_labels) + else: + concat_points = None + + # Embed prompts + if boxes is not None: + box_coords = boxes.reshape(-1, 2, 2) + box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device) + box_labels = box_labels.repeat(boxes.size(0), 1) + # we merge "boxes" and "points" into a single "concat_points" input (where + # boxes are added at the beginning) to sam_prompt_encoder + if concat_points is not None: + concat_coords = torch.cat([box_coords, concat_points[0]], dim=1) + concat_labels = torch.cat([box_labels, concat_points[1]], dim=1) + concat_points = (concat_coords, concat_labels) + else: + concat_points = (box_coords, box_labels) + + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( + points=concat_points, + boxes=None, + masks=mask_input, + ) + + # Predict masks + batched_mode = ( + concat_points is not None and concat_points[0].shape[0] > 1 + ) # multi object prediction + high_res_features = [ + feat_level[img_idx].unsqueeze(0) + for feat_level in self._features["high_res_feats"] + ] + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( + image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), + image_pe=self.model.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=batched_mode, + high_res_features=high_res_features, + ) + + # Upscale the masks to the original image resolution + masks = self._transforms.postprocess_masks( + low_res_masks, self._orig_hw[img_idx] + ) + low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0) + if not return_logits: + masks = masks > self.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self._is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert ( + self._features is not None + ), "Features must exist if an image has been set." + return self._features["image_embed"] + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_predictor(self) -> None: + """ + Resets the image embeddings and other state variables. + """ + self._is_image_set = False + self._features = None + self._orig_hw = None + self._is_batch = False diff --git a/third_parts/sam2/sam2_video_predictor.py b/third_parts/sam2/sam2_video_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..5bad0d4e69f5d8bc2160d61acb248df501b07b92 --- /dev/null +++ b/third_parts/sam2/sam2_video_predictor.py @@ -0,0 +1,898 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import OrderedDict + +import torch + +from tqdm import tqdm + +from third_parts.sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base +from third_parts.sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames + + +class SAM2VideoPredictor(SAM2Base): + """The predictor class to handle user interactions and manage inference states.""" + + def __init__( + self, + fill_hole_area=0, + # whether to apply non-overlapping constraints on the output object masks + non_overlap_masks=False, + # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks; + # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True) + clear_non_cond_mem_around_input=False, + # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True). + clear_non_cond_mem_for_multi_obj=False, + **kwargs, + ): + super().__init__(**kwargs) + self.fill_hole_area = fill_hole_area + self.non_overlap_masks = non_overlap_masks + self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input + self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj + + @torch.inference_mode() + def init_state( + self, + video_path, + offload_video_to_cpu=False, + offload_state_to_cpu=False, + async_loading_frames=False, + ): + """Initialize a inference state.""" + images, video_height, video_width = load_video_frames( + video_path=video_path, + image_size=self.image_size, + offload_video_to_cpu=offload_video_to_cpu, + async_loading_frames=async_loading_frames, + ) + inference_state = {} + inference_state["images"] = images + inference_state["num_frames"] = len(images) + # whether to offload the video frames to CPU memory + # turning on this option saves the GPU memory with only a very small overhead + inference_state["offload_video_to_cpu"] = offload_video_to_cpu + # whether to offload the inference state to CPU memory + # turning on this option saves the GPU memory at the cost of a lower tracking fps + # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object + # and from 24 to 21 when tracking two objects) + inference_state["offload_state_to_cpu"] = offload_state_to_cpu + # the original video height and width, used for resizing final output scores + inference_state["video_height"] = video_height + inference_state["video_width"] = video_width + inference_state["device"] = torch.device("cuda") + if offload_state_to_cpu: + inference_state["storage_device"] = torch.device("cpu") + else: + inference_state["storage_device"] = torch.device("cuda") + # inputs on each frame + inference_state["point_inputs_per_obj"] = {} + inference_state["mask_inputs_per_obj"] = {} + # visual features on a small number of recently visited frames for quick interactions + inference_state["cached_features"] = {} + # values that don't change across frames (so we only need to hold one copy of them) + inference_state["constants"] = {} + # mapping between client-side object id and model-side object index + inference_state["obj_id_to_idx"] = OrderedDict() + inference_state["obj_idx_to_id"] = OrderedDict() + inference_state["obj_ids"] = [] + # A storage to hold the model's tracking results and states on each frame + inference_state["output_dict"] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + # Slice (view) of each object tracking results, sharing the same memory with "output_dict" + inference_state["output_dict_per_obj"] = {} + # A temporary storage to hold new outputs when user interact with a frame + # to add clicks or mask (it's merged into "output_dict" before propagation starts) + inference_state["temp_output_dict_per_obj"] = {} + # Frames that already holds consolidated outputs from click or mask inputs + # (we directly use their consolidated outputs during tracking) + inference_state["consolidated_frame_inds"] = { + "cond_frame_outputs": set(), # set containing frame indices + "non_cond_frame_outputs": set(), # set containing frame indices + } + # metadata for each tracking frame (e.g. which direction it's tracked) + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"] = {} + # Warm up the visual backbone and cache the image feature on frame 0 + self._get_image_feature(inference_state, frame_idx=0, batch_size=1) + return inference_state + + def _obj_id_to_idx(self, inference_state, obj_id): + """Map client-side object id to model-side object index.""" + obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None) + if obj_idx is not None: + return obj_idx + + # This is a new object id not sent to the server before. We only allow adding + # new objects *before* the tracking starts. + allow_new_object = not inference_state["tracking_has_started"] + if allow_new_object: + # get the next object slot + obj_idx = len(inference_state["obj_id_to_idx"]) + inference_state["obj_id_to_idx"][obj_id] = obj_idx + inference_state["obj_idx_to_id"][obj_idx] = obj_id + inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"]) + # set up input and output structures for this object + inference_state["point_inputs_per_obj"][obj_idx] = {} + inference_state["mask_inputs_per_obj"][obj_idx] = {} + inference_state["output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + inference_state["temp_output_dict_per_obj"][obj_idx] = { + "cond_frame_outputs": {}, # dict containing {frame_idx: } + "non_cond_frame_outputs": {}, # dict containing {frame_idx: } + } + return obj_idx + else: + raise RuntimeError( + f"Cannot add new object id {obj_id} after tracking starts. " + f"All existing object ids: {inference_state['obj_ids']}. " + f"Please call 'reset_state' to restart from scratch." + ) + + def _obj_idx_to_id(self, inference_state, obj_idx): + """Map model-side object index to client-side object id.""" + return inference_state["obj_idx_to_id"][obj_idx] + + def _get_obj_num(self, inference_state): + """Get the total number of unique object ids received so far in this session.""" + return len(inference_state["obj_idx_to_id"]) + + @torch.inference_mode() + def add_new_points( + self, + inference_state, + frame_idx, + obj_id, + points, + labels, + clear_old_points=True, + normalize_coords=True, + ): + """Add new points to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if not isinstance(points, torch.Tensor): + points = torch.tensor(points, dtype=torch.float32) + if not isinstance(labels, torch.Tensor): + labels = torch.tensor(labels, dtype=torch.int32) + if points.dim() == 2: + points = points.unsqueeze(0) # add batch dimension + if labels.dim() == 1: + labels = labels.unsqueeze(0) # add batch dimension + if normalize_coords: + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + points = points / torch.tensor([video_W, video_H]).to(points.device) + # scale the (normalized) coordinates by the model's internal image size + points = points * self.image_size + points = points.to(inference_state["device"]) + labels = labels.to(inference_state["device"]) + + if not clear_old_points: + point_inputs = point_inputs_per_frame.get(frame_idx, None) + else: + point_inputs = None + point_inputs = concat_points(point_inputs, points, labels) + + point_inputs_per_frame[frame_idx] = point_inputs + mask_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + # Get any previously predicted mask logits on this object and feed it along with + # the new clicks into the SAM mask decoder. + prev_sam_mask_logits = None + # lookup temporary output dict first, which contains the most recent output + # (if not found, then lookup conditioning and non-conditioning frame output) + prev_out = obj_temp_output_dict[storage_key].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx) + if prev_out is None: + prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx) + + if prev_out is not None and prev_out["pred_masks"] is not None: + prev_sam_mask_logits = prev_out["pred_masks"].cuda(non_blocking=True) + # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues. + prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0) + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=point_inputs, + mask_inputs=None, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + prev_sam_mask_logits=prev_sam_mask_logits, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + @torch.inference_mode() + def add_new_mask( + self, + inference_state, + frame_idx, + obj_id, + mask, + ): + """Add new mask to a frame.""" + obj_idx = self._obj_id_to_idx(inference_state, obj_id) + point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx] + mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx] + + if not isinstance(mask, torch.Tensor): + mask = torch.tensor(mask, dtype=torch.bool) + assert mask.dim() == 2 + mask_H, mask_W = mask.shape + mask_inputs_orig = mask[None, None] # add batch and channel dimension + mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"]) + + # resize the mask if it doesn't match the model's image size + if mask_H != self.image_size or mask_W != self.image_size: + mask_inputs = torch.nn.functional.interpolate( + mask_inputs_orig, + size=(self.image_size, self.image_size), + align_corners=False, + mode="bilinear", + antialias=True, # use antialias for downsampling + ) + mask_inputs = (mask_inputs >= 0.5).float() + else: + mask_inputs = mask_inputs_orig + + mask_inputs_per_frame[frame_idx] = mask_inputs + point_inputs_per_frame.pop(frame_idx, None) + # If this frame hasn't been tracked before, we treat it as an initial conditioning + # frame, meaning that the inputs points are to generate segments on this frame without + # using any memory from other frames, like in SAM. Otherwise (if it has been tracked), + # the input points will be used to correct the already tracked masks. + is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"] + # whether to track in reverse time order + if is_init_cond_frame: + reverse = False + else: + reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + # Add a frame to conditioning output if it's an initial conditioning frame or + # if the model sees all frames receiving clicks/mask as conditioning frames. + is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + + current_out, _ = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=obj_output_dict, # run on the slice of a single object + frame_idx=frame_idx, + batch_size=1, # run on the slice of a single object + is_init_cond_frame=is_init_cond_frame, + point_inputs=None, + mask_inputs=mask_inputs, + reverse=reverse, + # Skip the memory encoder when adding clicks or mask. We execute the memory encoder + # at the beginning of `propagate_in_video` (after user finalize their clicks). This + # allows us to enforce non-overlapping constraints on all objects before encoding + # them into memory. + run_mem_encoder=False, + ) + # Add the output to the output dict (to be used as future memory) + obj_temp_output_dict[storage_key][frame_idx] = current_out + + # Resize the output mask to the original video resolution + obj_ids = inference_state["obj_ids"] + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, + frame_idx, + is_cond=is_cond, + run_mem_encoder=False, + consolidate_at_video_res=True, + ) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, consolidated_out["pred_masks_video_res"] + ) + return frame_idx, obj_ids, video_res_masks + + def _get_orig_video_res_output(self, inference_state, any_res_masks): + """ + Resize the object scores to the original video resolution (video_res_masks) + and apply non-overlapping constraints for final output. + """ + device = inference_state["device"] + video_H = inference_state["video_height"] + video_W = inference_state["video_width"] + any_res_masks = any_res_masks.to(device, non_blocking=True) + if any_res_masks.shape[-2:] == (video_H, video_W): + video_res_masks = any_res_masks + else: + video_res_masks = torch.nn.functional.interpolate( + any_res_masks, + size=(video_H, video_W), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks: + video_res_masks = self._apply_non_overlapping_constraints(video_res_masks) + return any_res_masks, video_res_masks + + def _consolidate_temp_output_across_obj( + self, + inference_state, + frame_idx, + is_cond, + run_mem_encoder, + consolidate_at_video_res=False, + ): + """ + Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on + a frame into a single output for all objects, including + 1) fill any missing objects either from `output_dict_per_obj` (if they exist in + `output_dict_per_obj` for this frame) or leave them as placeholder values + (if they don't exist in `output_dict_per_obj` for this frame); + 2) if specified, rerun memory encoder after apply non-overlapping constraints + on the object scores. + """ + batch_size = self._get_obj_num(inference_state) + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Optionally, we allow consolidating the temporary outputs at the original + # video resolution (to provide a better editing experience for mask prompts). + if consolidate_at_video_res: + assert not run_mem_encoder, "memory encoder cannot run at video resolution" + consolidated_H = inference_state["video_height"] + consolidated_W = inference_state["video_width"] + consolidated_mask_key = "pred_masks_video_res" + else: + consolidated_H = consolidated_W = self.image_size // 4 + consolidated_mask_key = "pred_masks" + + # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc" + # will be added when rerunning the memory encoder after applying non-overlapping + # constraints to object scores. Its "pred_masks" are prefilled with a large + # negative value (NO_OBJ_SCORE) to represent missing objects. + consolidated_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + consolidated_mask_key: torch.full( + size=(batch_size, 1, consolidated_H, consolidated_W), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["storage_device"], + ), + "obj_ptr": torch.full( + size=(batch_size, self.hidden_dim), + fill_value=NO_OBJ_SCORE, + dtype=torch.float32, + device=inference_state["device"], + ), + } + empty_mask_ptr = None + for obj_idx in range(batch_size): + obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx] + obj_output_dict = inference_state["output_dict_per_obj"][obj_idx] + out = obj_temp_output_dict[storage_key].get(frame_idx, None) + # If the object doesn't appear in "temp_output_dict_per_obj" on this frame, + # we fall back and look up its previous output in "output_dict_per_obj". + # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in + # "output_dict_per_obj" to find a previous output for this object. + if out is None: + out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None) + if out is None: + out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None) + # If the object doesn't appear in "output_dict_per_obj" either, we skip it + # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE + # placeholder above) and set its object pointer to be a dummy pointer. + if out is None: + # Fill in dummy object pointers for those objects without any inputs or + # tracking outcomes on this frame (only do it under `run_mem_encoder=True`, + # i.e. when we need to build the memory for tracking). + if run_mem_encoder: + if empty_mask_ptr is None: + empty_mask_ptr = self._get_empty_mask_ptr( + inference_state, frame_idx + ) + # fill object pointer with a dummy pointer (based on an empty mask) + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr + continue + # Add the temporary object output mask to consolidated output mask + obj_mask = out["pred_masks"] + consolidated_pred_masks = consolidated_out[consolidated_mask_key] + if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]: + consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask + else: + # Resize first if temporary object mask has a different resolution + resized_obj_mask = torch.nn.functional.interpolate( + obj_mask, + size=consolidated_pred_masks.shape[-2:], + mode="bilinear", + align_corners=False, + ) + consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask + consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"] + + # Optionally, apply non-overlapping constraints on the consolidated scores + # and rerun the memory encoder + if run_mem_encoder: + device = inference_state["device"] + high_res_masks = torch.nn.functional.interpolate( + consolidated_out["pred_masks"].to(device, non_blocking=True), + size=(self.image_size, self.image_size), + mode="bilinear", + align_corners=False, + ) + if self.non_overlap_masks_for_mem_enc: + high_res_masks = self._apply_non_overlapping_constraints(high_res_masks) + maskmem_features, maskmem_pos_enc = self._run_memory_encoder( + inference_state=inference_state, + frame_idx=frame_idx, + batch_size=batch_size, + high_res_masks=high_res_masks, + is_mask_from_pts=True, # these frames are what the user interacted with + ) + consolidated_out["maskmem_features"] = maskmem_features + consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc + + return consolidated_out + + def _get_empty_mask_ptr(self, inference_state, frame_idx): + """Get a dummy object pointer based on an empty mask on the current frame.""" + # A dummy (empty) mask with a single object + batch_size = 1 + mask_inputs = torch.zeros( + (batch_size, 1, self.image_size, self.image_size), + dtype=torch.float32, + device=inference_state["device"], + ) + + # Retrieve correct image features + ( + _, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # Feed the empty mask and image feature above to get a dummy object pointer + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=True, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + point_inputs=None, + mask_inputs=mask_inputs, + output_dict={}, + num_frames=inference_state["num_frames"], + track_in_reverse=False, + run_mem_encoder=False, + prev_sam_mask_logits=None, + ) + return current_out["obj_ptr"] + + @torch.inference_mode() + def propagate_in_video_preflight(self, inference_state): + """Prepare inference_state and consolidate temporary outputs before tracking.""" + # Tracking has started and we don't allow adding new objects until session is reset. + inference_state["tracking_has_started"] = True + batch_size = self._get_obj_num(inference_state) + + # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and + # add them into "output_dict". + temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"] + output_dict = inference_state["output_dict"] + # "consolidated_frame_inds" contains indices of those frames where consolidated + # temporary outputs have been added (either in this call or any previous calls + # to `propagate_in_video_preflight`). + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + for is_cond in [False, True]: + # Separately consolidate conditioning and non-conditioning temp outptus + storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs" + # Find all the frames that contain temporary outputs for any objects + # (these should be the frames that have just received clicks for mask inputs + # via `add_new_points` or `add_new_mask`) + temp_frame_inds = set() + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + temp_frame_inds.update(obj_temp_output_dict[storage_key].keys()) + consolidated_frame_inds[storage_key].update(temp_frame_inds) + # consolidate the temprary output across all objects on this frame + for frame_idx in temp_frame_inds: + consolidated_out = self._consolidate_temp_output_across_obj( + inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True + ) + # merge them into "output_dict" and also create per-object slices + output_dict[storage_key][frame_idx] = consolidated_out + self._add_output_per_object( + inference_state, frame_idx, consolidated_out, storage_key + ) + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + + # clear temporary outputs in `temp_output_dict_per_obj` + for obj_temp_output_dict in temp_output_dict_per_obj.values(): + obj_temp_output_dict[storage_key].clear() + + # edge case: if an output is added to "cond_frame_outputs", we remove any prior + # output on the same frame in "non_cond_frame_outputs" + for frame_idx in output_dict["cond_frame_outputs"]: + output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + for frame_idx in obj_output_dict["cond_frame_outputs"]: + obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None) + for frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + assert frame_idx in output_dict["cond_frame_outputs"] + consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx) + + # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames + # with either points or mask inputs (which should be true under a correct workflow). + all_consolidated_frame_inds = ( + consolidated_frame_inds["cond_frame_outputs"] + | consolidated_frame_inds["non_cond_frame_outputs"] + ) + input_frames_inds = set() + for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values(): + input_frames_inds.update(point_inputs_per_frame.keys()) + for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values(): + input_frames_inds.update(mask_inputs_per_frame.keys()) + assert all_consolidated_frame_inds == input_frames_inds + + @torch.inference_mode() + def propagate_in_video( + self, + inference_state, + start_frame_idx=None, + max_frame_num_to_track=None, + reverse=False, + ): + """Propagate the input points across frames to track in the entire video.""" + self.propagate_in_video_preflight(inference_state) + + output_dict = inference_state["output_dict"] + consolidated_frame_inds = inference_state["consolidated_frame_inds"] + obj_ids = inference_state["obj_ids"] + num_frames = inference_state["num_frames"] + batch_size = self._get_obj_num(inference_state) + if len(output_dict["cond_frame_outputs"]) == 0: + raise RuntimeError("No points are provided; please add points first") + clear_non_cond_mem = self.clear_non_cond_mem_around_input and ( + self.clear_non_cond_mem_for_multi_obj or batch_size <= 1 + ) + + # set start index, end index, and processing order + if start_frame_idx is None: + # default: start from the earliest frame with input points + start_frame_idx = min(output_dict["cond_frame_outputs"]) + if max_frame_num_to_track is None: + # default: track all the frames in the video + max_frame_num_to_track = num_frames + if reverse: + end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) + if start_frame_idx > 0: + processing_order = range(start_frame_idx, end_frame_idx - 1, -1) + else: + processing_order = [] # skip reverse tracking if starting from frame 0 + else: + end_frame_idx = min( + start_frame_idx + max_frame_num_to_track, num_frames - 1 + ) + processing_order = range(start_frame_idx, end_frame_idx + 1) + + for frame_idx in tqdm(processing_order, desc="propagate in video"): + # We skip those frames already in consolidated outputs (these are frames + # that received input clicks or mask). Note that we cannot directly run + # batched forward on them via `_run_single_frame_inference` because the + # number of clicks on each object might be different. + if frame_idx in consolidated_frame_inds["cond_frame_outputs"]: + storage_key = "cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + if clear_non_cond_mem: + # clear non-conditioning memory of the surrounding frames + self._clear_non_cond_mem_around_input(inference_state, frame_idx) + elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]: + storage_key = "non_cond_frame_outputs" + current_out = output_dict[storage_key][frame_idx] + pred_masks = current_out["pred_masks"] + else: + storage_key = "non_cond_frame_outputs" + current_out, pred_masks = self._run_single_frame_inference( + inference_state=inference_state, + output_dict=output_dict, + frame_idx=frame_idx, + batch_size=batch_size, + is_init_cond_frame=False, + point_inputs=None, + mask_inputs=None, + reverse=reverse, + run_mem_encoder=True, + ) + output_dict[storage_key][frame_idx] = current_out + # Create slices of per-object outputs for subsequent interaction with each + # individual object after tracking. + self._add_output_per_object( + inference_state, frame_idx, current_out, storage_key + ) + inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse} + + # Resize the output mask to the original video resolution (we directly use + # the mask scores on GPU for output to avoid any CPU conversion in between) + _, video_res_masks = self._get_orig_video_res_output( + inference_state, pred_masks + ) + yield frame_idx, obj_ids, video_res_masks + + def _add_output_per_object( + self, inference_state, frame_idx, current_out, storage_key + ): + """ + Split a multi-object output into per-object output slices and add them into + `output_dict_per_obj`. The resulting slices share the same tensor storage. + """ + maskmem_features = current_out["maskmem_features"] + assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor) + + maskmem_pos_enc = current_out["maskmem_pos_enc"] + assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list) + + output_dict_per_obj = inference_state["output_dict_per_obj"] + for obj_idx, obj_output_dict in output_dict_per_obj.items(): + obj_slice = slice(obj_idx, obj_idx + 1) + obj_out = { + "maskmem_features": None, + "maskmem_pos_enc": None, + "pred_masks": current_out["pred_masks"][obj_slice], + "obj_ptr": current_out["obj_ptr"][obj_slice], + } + if maskmem_features is not None: + obj_out["maskmem_features"] = maskmem_features[obj_slice] + if maskmem_pos_enc is not None: + obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc] + obj_output_dict[storage_key][frame_idx] = obj_out + + @torch.inference_mode() + def reset_state(self, inference_state): + """Remove all input points or mask in all frames throughout the video.""" + self._reset_tracking_results(inference_state) + # Remove all object ids + inference_state["obj_id_to_idx"].clear() + inference_state["obj_idx_to_id"].clear() + inference_state["obj_ids"].clear() + inference_state["point_inputs_per_obj"].clear() + inference_state["mask_inputs_per_obj"].clear() + inference_state["output_dict_per_obj"].clear() + inference_state["temp_output_dict_per_obj"].clear() + + def _reset_tracking_results(self, inference_state): + """Reset all tracking inputs and results across the videos.""" + for v in inference_state["point_inputs_per_obj"].values(): + v.clear() + for v in inference_state["mask_inputs_per_obj"].values(): + v.clear() + for v in inference_state["output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + for v in inference_state["temp_output_dict_per_obj"].values(): + v["cond_frame_outputs"].clear() + v["non_cond_frame_outputs"].clear() + inference_state["output_dict"]["cond_frame_outputs"].clear() + inference_state["output_dict"]["non_cond_frame_outputs"].clear() + inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear() + inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear() + inference_state["tracking_has_started"] = False + inference_state["frames_already_tracked"].clear() + + def _get_image_feature(self, inference_state, frame_idx, batch_size): + """Compute the image features on a given frame.""" + # Look up in the cache first + image, backbone_out = inference_state["cached_features"].get( + frame_idx, (None, None) + ) + if backbone_out is None: + # Cache miss -- we will run inference on a single image + image = inference_state["images"][frame_idx].cuda().float().unsqueeze(0) + backbone_out = self.forward_image(image) + # Cache the most recent frame's feature (for repeated interactions with + # a frame; we can use an LRU cache for more frames in the future). + inference_state["cached_features"] = {frame_idx: (image, backbone_out)} + + # expand the features to have the same dimension as the number of objects + expanded_image = image.expand(batch_size, -1, -1, -1) + expanded_backbone_out = { + "backbone_fpn": backbone_out["backbone_fpn"].copy(), + "vision_pos_enc": backbone_out["vision_pos_enc"].copy(), + } + for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]): + expanded_backbone_out["backbone_fpn"][i] = feat.expand( + batch_size, -1, -1, -1 + ) + for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]): + pos = pos.expand(batch_size, -1, -1, -1) + expanded_backbone_out["vision_pos_enc"][i] = pos + + features = self._prepare_backbone_features(expanded_backbone_out) + features = (expanded_image,) + features + return features + + def _run_single_frame_inference( + self, + inference_state, + output_dict, + frame_idx, + batch_size, + is_init_cond_frame, + point_inputs, + mask_inputs, + reverse, + run_mem_encoder, + prev_sam_mask_logits=None, + ): + """Run tracking on a single frame based on current inputs and previous memory.""" + # Retrieve correct image features + ( + _, + _, + current_vision_feats, + current_vision_pos_embeds, + feat_sizes, + ) = self._get_image_feature(inference_state, frame_idx, batch_size) + + # point and mask should not appear as input simultaneously on the same frame + assert point_inputs is None or mask_inputs is None + current_out = self.track_step( + frame_idx=frame_idx, + is_init_cond_frame=is_init_cond_frame, + current_vision_feats=current_vision_feats, + current_vision_pos_embeds=current_vision_pos_embeds, + feat_sizes=feat_sizes, + point_inputs=point_inputs, + mask_inputs=mask_inputs, + output_dict=output_dict, + num_frames=inference_state["num_frames"], + track_in_reverse=reverse, + run_mem_encoder=run_mem_encoder, + prev_sam_mask_logits=prev_sam_mask_logits, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = current_out["maskmem_features"] + if maskmem_features is not None: + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + pred_masks_gpu = current_out["pred_masks"] + # potentially fill holes in the predicted masks + if self.fill_hole_area > 0: + pred_masks_gpu = fill_holes_in_mask_scores( + pred_masks_gpu, self.fill_hole_area + ) + pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out) + # object pointer is a small tensor, so we always keep it on GPU memory for fast access + obj_ptr = current_out["obj_ptr"] + # make a compact version of this frame's output to reduce the state size + compact_current_out = { + "maskmem_features": maskmem_features, + "maskmem_pos_enc": maskmem_pos_enc, + "pred_masks": pred_masks, + "obj_ptr": obj_ptr, + } + return compact_current_out, pred_masks_gpu + + def _run_memory_encoder( + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts + ): + """ + Run the memory encoder on `high_res_masks`. This is usually after applying + non-overlapping constraints to object scores. Since their scores changed, their + memory also need to be computed again with the memory encoder. + """ + # Retrieve correct image features + _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( + inference_state, frame_idx, batch_size + ) + maskmem_features, maskmem_pos_enc = self._encode_new_memory( + current_vision_feats=current_vision_feats, + feat_sizes=feat_sizes, + pred_masks_high_res=high_res_masks, + is_mask_from_pts=is_mask_from_pts, + ) + + # optionally offload the output to CPU memory to save GPU space + storage_device = inference_state["storage_device"] + maskmem_features = maskmem_features.to(torch.bfloat16) + maskmem_features = maskmem_features.to(storage_device, non_blocking=True) + # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it + maskmem_pos_enc = self._get_maskmem_pos_enc( + inference_state, {"maskmem_pos_enc": maskmem_pos_enc} + ) + return maskmem_features, maskmem_pos_enc + + def _get_maskmem_pos_enc(self, inference_state, current_out): + """ + `maskmem_pos_enc` is the same across frames and objects, so we cache it as + a constant in the inference session to reduce session storage size. + """ + model_constants = inference_state["constants"] + # "out_maskmem_pos_enc" should be either a list of tensors or None + out_maskmem_pos_enc = current_out["maskmem_pos_enc"] + if out_maskmem_pos_enc is not None: + if "maskmem_pos_enc" not in model_constants: + assert isinstance(out_maskmem_pos_enc, list) + # only take the slice for one object, since it's same across objects + maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc] + model_constants["maskmem_pos_enc"] = maskmem_pos_enc + else: + maskmem_pos_enc = model_constants["maskmem_pos_enc"] + # expand the cached maskmem_pos_enc to the actual batch size + batch_size = out_maskmem_pos_enc[0].size(0) + expanded_maskmem_pos_enc = [ + x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc + ] + else: + expanded_maskmem_pos_enc = None + return expanded_maskmem_pos_enc + + def _clear_non_cond_mem_around_input(self, inference_state, frame_idx): + """ + Remove the non-conditioning memory around the input frame. When users provide + correction clicks, the surrounding frames' non-conditioning memories can still + contain outdated object appearance information and could confuse the model. + + This method clears those non-conditioning memories surrounding the interacted + frame to avoid giving the model both old and new information about the object. + """ + r = self.memory_temporal_stride_for_eval + frame_idx_begin = frame_idx - r * self.num_maskmem + frame_idx_end = frame_idx + r * self.num_maskmem + output_dict = inference_state["output_dict"] + non_cond_frame_outputs = output_dict["non_cond_frame_outputs"] + for t in range(frame_idx_begin, frame_idx_end + 1): + non_cond_frame_outputs.pop(t, None) + for obj_output_dict in inference_state["output_dict_per_obj"].values(): + obj_output_dict["non_cond_frame_outputs"].pop(t, None) diff --git a/third_parts/sam2/utils/__init__.py b/third_parts/sam2/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/third_parts/sam2/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/third_parts/sam2/utils/amg.py b/third_parts/sam2/utils/amg.py new file mode 100644 index 0000000000000000000000000000000000000000..986842960cf5deca00614b7b1cde1ab77dad7e6e --- /dev/null +++ b/third_parts/sam2/utils/amg.py @@ -0,0 +1,348 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + +import numpy as np +import torch + +# Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + assert isinstance( + item, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def cat(self, new_stats: "MaskData") -> None: + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def to_numpy(self) -> None: + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.float().detach().cpu().numpy() + + +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + assert len(args) > 0 and all( + len(a) == len(args[0]) for a in args + ), "Batched iteration must have inputs of all the same size." + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """ + Encodes masks to an uncompressed RLE, in the format expected by + pycoco tools. + """ + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat( + [ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), + ] + ) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({"size": [h, w], "counts": counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle["size"] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + return sum(rle["counts"][1::2]) + + +def calculate_stability_score( + masks: torch.Tensor, mask_threshold: float, threshold_offset: float +) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecessary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + unions = ( + (masks > (mask_threshold - threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def build_all_layer_point_grids( + n_per_side: int, n_layers: int, scale_per_layer: int +) -> List[np.ndarray]: + """Generates point grids for all crop layers.""" + points_by_layer = [] + for i in range(n_layers + 1): + n_points = int(n_per_side / (scale_per_layer**i)) + points_by_layer.append(build_point_grid(n_points)) + return points_by_layer + + +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer + has (2**i)**2 boxes for the ith layer. + """ + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks( + masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int +) -> torch.Tensor: + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + from pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle["size"] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + if len(shape) > 2: + masks = masks.flatten(0, -3) + else: + masks = masks.unsqueeze(0) + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + if len(shape) > 2: + out = out.reshape(*shape[:-2], 4) + else: + out = out[0] + + return out diff --git a/third_parts/sam2/utils/misc.py b/third_parts/sam2/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..b354b91cb534a34f8c51ac23dc83d70c8c28ed6a --- /dev/null +++ b/third_parts/sam2/utils/misc.py @@ -0,0 +1,249 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import warnings +from threading import Thread + +import numpy as np +import torch +from PIL import Image +from tqdm import tqdm + + +def get_sdpa_settings(): + if torch.cuda.is_available(): + old_gpu = torch.cuda.get_device_properties(0).major < 7 + # only use Flash Attention on Ampere (8.0) or newer GPUs + use_flash_attn = torch.cuda.get_device_properties(0).major >= 8 + if not use_flash_attn: + warnings.warn( + "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.", + category=UserWarning, + stacklevel=2, + ) + # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only + # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases) + pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2]) + if pytorch_version < (2, 2): + warnings.warn( + f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. " + "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).", + category=UserWarning, + stacklevel=2, + ) + math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn + else: + old_gpu = True + use_flash_attn = False + math_kernel_on = True + + return old_gpu, use_flash_attn, math_kernel_on + + +def get_connected_components(mask): + """ + Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W). + + Inputs: + - mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is + background. + + Outputs: + - labels: A tensor of shape (N, 1, H, W) containing the connected component labels + for foreground pixels and 0 for background pixels. + - counts: A tensor of shape (N, 1, H, W) containing the area of the connected + components for foreground pixels and 0 for background pixels. + """ + from torch.utils.cpp_extension import load + get_connected_componnets = load( + name="get_connected_componnets", + sources=["third_parts/sam2/csrc/connected_components.cu"], + verbose=True, + extra_cuda_cflags=[ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + ) + + return get_connected_componnets.get_connected_componnets(mask.to(torch.uint8).contiguous()) + + +def mask_to_box(masks: torch.Tensor): + """ + compute bounding box given an input mask + + Inputs: + - masks: [B, 1, H, W] boxes, dtype=torch.Tensor + + Returns: + - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor + """ + B, _, h, w = masks.shape + device = masks.device + xs = torch.arange(w, device=device, dtype=torch.int32) + ys = torch.arange(h, device=device, dtype=torch.int32) + grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy") + grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w) + grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w) + min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1) + max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1) + min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1) + max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1) + bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1) + + return bbox_coords + + +def _load_img_as_tensor(img_path, image_size): + img_pil = Image.open(img_path) + img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size))) + if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images + img_np = img_np / 255.0 + else: + raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}") + img = torch.from_numpy(img_np).permute(2, 0, 1) + video_width, video_height = img_pil.size # the original video size + return img, video_height, video_width + + +class AsyncVideoFrameLoader: + """ + A list of video frames to be load asynchronously without blocking session start. + """ + + def __init__(self, img_paths, image_size, offload_video_to_cpu, img_mean, img_std): + self.img_paths = img_paths + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + self.img_mean = img_mean + self.img_std = img_std + # items in `self._images` will be loaded asynchronously + self.images = [None] * len(img_paths) + # catch and raise any exceptions in the async loading thread + self.exception = None + # video_height and video_width be filled when loading the first image + self.video_height = None + self.video_width = None + + # load the first frame to fill video_height and video_width and also + # to cache it (since it's most likely where the user will click) + self.__getitem__(0) + + # load the rest of frames asynchronously without blocking the session start + def _load_frames(): + try: + for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"): + self.__getitem__(n) + except Exception as e: + self.exception = e + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + img = self.images[index] + if img is not None: + return img + + img, video_height, video_width = _load_img_as_tensor( + self.img_paths[index], self.image_size + ) + self.video_height = video_height + self.video_width = video_width + # normalize by mean and std + img -= self.img_mean + img /= self.img_std + if not self.offload_video_to_cpu: + img = img.cuda(non_blocking=True) + self.images[index] = img + return img + + def __len__(self): + return len(self.images) + + +def load_video_frames( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.485, 0.456, 0.406), + img_std=(0.229, 0.224, 0.225), + async_loading_frames=False, +): + """ + Load the video frames from a directory of JPEG files (".jpg" format). + + The frames are resized to image_size x image_size and are loaded to GPU if + `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`. + + You can load a frame asynchronously by setting `async_loading_frames` to `True`. + """ + if isinstance(video_path, str) and os.path.isdir(video_path): + jpg_folder = video_path + else: + raise NotImplementedError("Only JPEG frames are supported at this moment") + + frame_names = [ + p + for p in os.listdir(jpg_folder) + if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"] + ] + frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) + num_frames = len(frame_names) + if num_frames == 0: + raise RuntimeError(f"no images found in {jpg_folder}") + img_paths = [os.path.join(jpg_folder, frame_name) for frame_name in frame_names] + img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None] + + if async_loading_frames: + lazy_images = AsyncVideoFrameLoader( + img_paths, image_size, offload_video_to_cpu, img_mean, img_std + ) + return lazy_images, lazy_images.video_height, lazy_images.video_width + + images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32) + for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")): + images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size) + if not offload_video_to_cpu: + images = images.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width + + +def fill_holes_in_mask_scores(mask, max_area): + """ + A post processor to fill small holes in mask scores with area under `max_area`. + """ + # Holes are those connected components in background with area <= self.max_area + # (background regions are those with mask scores <= 0) + assert max_area > 0, "max_area must be positive" + labels, areas = get_connected_components(mask <= 0) + is_hole = (labels > 0) & (areas <= max_area) + # We fill holes with a small positive mask score (0.1) to change them to foreground. + mask = torch.where(is_hole, 0.1, mask) + return mask + + +def concat_points(old_point_inputs, new_points, new_labels): + """Add new points and labels to previous point inputs (add at the end).""" + if old_point_inputs is None: + points, labels = new_points, new_labels + else: + points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1) + labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1) + + return {"point_coords": points, "point_labels": labels} diff --git a/third_parts/sam2/utils/transforms.py b/third_parts/sam2/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..d877a460bc7b115f4a58e34b31d927b223097b71 --- /dev/null +++ b/third_parts/sam2/utils/transforms.py @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import Normalize, Resize, ToTensor + + +class SAM2Transforms(nn.Module): + def __init__( + self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0 + ): + """ + Transforms for SAM2. + """ + super().__init__() + self.resolution = resolution + self.mask_threshold = mask_threshold + self.max_hole_area = max_hole_area + self.max_sprinkle_area = max_sprinkle_area + self.mean = [0.485, 0.456, 0.406] + self.std = [0.229, 0.224, 0.225] + self.to_tensor = ToTensor() + self.transforms = torch.jit.script( + nn.Sequential( + Resize((self.resolution, self.resolution)), + Normalize(self.mean, self.std), + ) + ) + + def __call__(self, x): + x = self.to_tensor(x) + return self.transforms(x) + + def forward_batch(self, img_list): + img_batch = [self.transforms(self.to_tensor(img)) for img in img_list] + img_batch = torch.stack(img_batch, dim=0) + return img_batch + + def transform_coords( + self, coords: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates, + If the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + + Returns + Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model. + """ + if normalize: + assert orig_hw is not None + h, w = orig_hw + coords = coords.clone() + coords[..., 0] = coords[..., 0] / w + coords[..., 1] = coords[..., 1] / h + + coords = coords * self.resolution # unnormalize coords + return coords + + def transform_boxes( + self, boxes: torch.Tensor, normalize=False, orig_hw=None + ) -> torch.Tensor: + """ + Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates, + if the coords are in absolute image coordinates, normalize should be set to True and original image size is required. + """ + boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw) + return boxes + + def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor: + """ + Perform PostProcessing on output masks. + """ + from third_parts.sam2.utils.misc import get_connected_components + + masks = masks.float() + if self.max_hole_area > 0: + # Holes are those connected components in background with area <= self.fill_hole_area + # (background regions are those with mask scores <= self.mask_threshold) + mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image + labels, areas = get_connected_components(mask_flat <= self.mask_threshold) + is_hole = (labels > 0) & (areas <= self.max_hole_area) + is_hole = is_hole.reshape_as(masks) + # We fill holes with a small positive mask score (10.0) to change them to foreground. + masks = torch.where(is_hole, self.mask_threshold + 10.0, masks) + + if self.max_sprinkle_area > 0: + labels, areas = get_connected_components(mask_flat > self.mask_threshold) + is_hole = (labels > 0) & (areas <= self.max_sprinkle_area) + is_hole = is_hole.reshape_as(masks) + # We fill holes with negative mask score (-10.0) to change them to background. + masks = torch.where(is_hole, self.mask_threshold - 10.0, masks) + + masks = F.interpolate(masks, orig_hw, mode="bilinear", align_corners=False) + return masks diff --git a/third_parts/video_io.py b/third_parts/video_io.py new file mode 100644 index 0000000000000000000000000000000000000000..36670a59dc26909c6c5f18568e2762e2c394ddaa --- /dev/null +++ b/third_parts/video_io.py @@ -0,0 +1,271 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from collections import OrderedDict + +import cv2 +from cv2 import (CAP_PROP_FOURCC, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT, + CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, + CAP_PROP_POS_FRAMES) +from mmengine.utils import (check_file_exist, mkdir_or_exist, track_progress) + + +class Cache: + + def __init__(self, capacity): + self._cache = OrderedDict() + self._capacity = int(capacity) + if capacity <= 0: + raise ValueError('capacity must be a positive integer') + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._cache) + + def put(self, key, val): + if key in self._cache: + return + if len(self._cache) >= self.capacity: + self._cache.popitem(last=False) + self._cache[key] = val + + def get(self, key, default=None): + val = self._cache[key] if key in self._cache else default + return val + + +class VideoReader: + """Video class with similar usage to a list object. + + This video wrapper class provides convenient apis to access frames. + There exists an issue of OpenCV's VideoCapture class that jumping to a + certain frame may be inaccurate. It is fixed in this class by checking + the position after jumping each time. + Cache is used when decoding videos. So if the same frame is visited for + the second time, there is no need to decode again if it is stored in the + cache. + + Examples: + >>> import mmcv + >>> v = mmcv.VideoReader('sample.mp4') + >>> len(v) # get the total frame number with `len()` + 120 + >>> for img in v: # v is iterable + >>> mmcv.imshow(img) + >>> v[5] # get the 6th frame + """ + + def __init__(self, filename, cache_capacity=10): + # Check whether the video path is a url + if not filename.startswith(('https://', 'http://')): + check_file_exist(filename, 'Video file not found: ' + filename) + self._vcap = cv2.VideoCapture(filename) + assert cache_capacity > 0 + self._cache = Cache(cache_capacity) + self._position = 0 + # get basic info + self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) + self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) + self._fps = self._vcap.get(CAP_PROP_FPS) + self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) + self._fourcc = self._vcap.get(CAP_PROP_FOURCC) + + @property + def vcap(self): + """:obj:`cv2.VideoCapture`: The raw VideoCapture object.""" + return self._vcap + + @property + def opened(self): + """bool: Indicate whether the video is opened.""" + return self._vcap.isOpened() + + @property + def width(self): + """int: Width of video frames.""" + return self._width + + @property + def height(self): + """int: Height of video frames.""" + return self._height + + @property + def resolution(self): + """tuple: Video resolution (width, height).""" + return (self._width, self._height) + + @property + def fps(self): + """float: FPS of the video.""" + return self._fps + + @property + def frame_cnt(self): + """int: Total frames of the video.""" + return self._frame_cnt + + @property + def fourcc(self): + """str: "Four character code" of the video.""" + return self._fourcc + + @property + def position(self): + """int: Current cursor position, indicating frame decoded.""" + return self._position + + def _get_real_position(self): + return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) + + def _set_real_position(self, frame_id): + self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) + pos = self._get_real_position() + for _ in range(frame_id - pos): + self._vcap.read() + self._position = frame_id + + def read(self): + """Read the next frame. + + If the next frame have been decoded before and in the cache, then + return it directly, otherwise decode, cache and return it. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + # pos = self._position + if self._cache: + img = self._cache.get(self._position) + if img is not None: + ret = True + else: + if self._position != self._get_real_position(): + self._set_real_position(self._position) + ret, img = self._vcap.read() + if ret: + self._cache.put(self._position, img) + else: + ret, img = self._vcap.read() + if ret: + self._position += 1 + return img + + def get_frame(self, frame_id): + """Get frame by index. + + Args: + frame_id (int): Index of the expected frame, 0-based. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + if frame_id < 0 or frame_id >= self._frame_cnt: + raise IndexError( + f'"frame_id" must be between 0 and {self._frame_cnt - 1}') + if frame_id == self._position: + return self.read() + if self._cache: + img = self._cache.get(frame_id) + if img is not None: + self._position = frame_id + 1 + return img + self._set_real_position(frame_id) + ret, img = self._vcap.read() + if ret: + if self._cache: + self._cache.put(self._position, img) + self._position += 1 + return img + + def current_frame(self): + """Get the current frame (frame that is just visited). + + Returns: + ndarray or None: If the video is fresh, return None, otherwise + return the frame. + """ + if self._position == 0: + return None + return self._cache.get(self._position - 1) + + def cvt2frames(self, + frame_dir, + file_start=0, + filename_tmpl='{:06d}.jpg', + start=0, + max_num=0, + show_progress=True): + """Convert a video to frame images. + + Args: + frame_dir (str): Output directory to store all the frame images. + file_start (int): Filenames will start from the specified number. + filename_tmpl (str): Filename template with the index as the + placeholder. + start (int): The starting frame index. + max_num (int): Maximum number of frames to be written. + show_progress (bool): Whether to show a progress bar. + """ + mkdir_or_exist(frame_dir) + if max_num == 0: + task_num = self.frame_cnt - start + else: + task_num = min(self.frame_cnt - start, max_num) + if task_num <= 0: + raise ValueError('start must be less than total frame number') + if start > 0: + self._set_real_position(start) + + def write_frame(file_idx): + img = self.read() + if img is None: + return + filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) + cv2.imwrite(filename, img) + + if show_progress: + track_progress(write_frame, range(file_start, + file_start + task_num)) + else: + for i in range(task_num): + write_frame(file_start + i) + + def __len__(self): + return self.frame_cnt + + def __getitem__(self, index): + if isinstance(index, slice): + return [ + self.get_frame(i) + for i in range(*index.indices(self.frame_cnt)) + ] + # support negative indexing + if index < 0: + index += self.frame_cnt + if index < 0: + raise IndexError('index out of range') + return self.get_frame(index) + + def __iter__(self): + self._set_real_position(0) + return self + + def __next__(self): + img = self.read() + if img is not None: + return img + else: + raise StopIteration + + next = __next__ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._vcap.release() + diff --git a/tools/dist.sh b/tools/dist.sh new file mode 100644 index 0000000000000000000000000000000000000000..dee38c071b763e72422dce1e5be7cc3486ae3224 --- /dev/null +++ b/tools/dist.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -x + +FILE=$1 +CONFIG=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-$((28500 + $RANDOM % 2000))} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} +DEEPSPEED=${DEEPSPEED:-deepspeed_zero2} + + +if command -v torchrun &> /dev/null +then + echo "Using torchrun mode." + PYTHONPATH="$(dirname $0)/..":$PYTHONPATH OMP_NUM_THREADS=1 MKL_NUM_THREADS=1 \ + torchrun --nnodes=${NNODES} \ + --nnodes=${NNODES} \ + --node_rank=${NODE_RANK} \ + --master_addr=${MASTER_ADDR} \ + --master_port=${PORT} \ + --nproc_per_node=${GPUS} \ + tools/${FILE}.py ${CONFIG} --launcher pytorch --deepspeed $DEEPSPEED "${@:4}" +else + echo "Using launch mode." + PYTHONPATH="$(dirname $0)/..":$PYTHONPATH OMP_NUM_THREADS=1 MKL_NUM_THREADS=1 \ + python -m torch.distributed.launch \ + --nnodes=${NNODES} \ + --node_rank=${NODE_RANK} \ + --master_addr=${MASTER_ADDR} \ + --master_port=${PORT} \ + --nproc_per_node=${GPUS} \ + tools/${FILE}.py ${CONFIG} --launcher pytorch --deepspeed $DEEPSPEED "${@:4}" +fi diff --git a/tools/slurm.sh b/tools/slurm.sh new file mode 100644 index 0000000000000000000000000000000000000000..003d5bb9248010bb0b0710baf71107fd0ca23ab6 --- /dev/null +++ b/tools/slurm.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -x + +FILE=$1 +CONFIG=$2 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +MASTER_PORT=${MASTER_PORT:-$((28500 + $RANDOM % 2000))} +PARTITION=${PARTITION:-DUMMY} +JOB_NAME=${JOB_NAME:-DUMMY} +QUOTATYPE=${QUOTATYPE:-auto} +SRUN_ARGS=${SRUN_ARGS:-""} +DEEPSPEED=${DEEPSPEED:-deepspeed_zero2} +PY_ARGS=${@:3} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH OMP_NUM_THREADS=1 MKL_NUM_THREADS=1 \ +CUDA_HOME=${CONDA_PREFIX} \ +LD_LIBRARY_PATH=${CONDA_PREFIX}/lib:$(realpath ~/.local/lib) \ +MASTER_PORT=$MASTER_PORT \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=${QUOTATYPE} \ + ${SRUN_ARGS} \ + python -u tools/${FILE}.py ${CONFIG} --launcher="slurm" --deepspeed $DEEPSPEED ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py new file mode 100644 index 0000000000000000000000000000000000000000..1ce0a6707c7a329a05dd125ed12a591058ebe3ef --- /dev/null +++ b/tools/test.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import logging +import os +import os.path as osp +from types import FunctionType + +from mmengine import print_log +from mmengine.config import Config, DictAction +from mmengine.registry import RUNNERS +from mmengine.runner import Runner + +from xtuner.configs import cfgs_name_path +from xtuner.model.utils import guess_load_checkpoint +from xtuner.registry import MAP_FUNC +from mmengine.model import is_model_wrapper + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test model') + parser.add_argument('config', help='config file name or path.') + parser.add_argument('--checkpoint', default=None, help='checkpoint file') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing evaluation metrics') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--deepspeed', + default=None, + help='Dummy option' + ) + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def register_function(cfg_dict): + if isinstance(cfg_dict, dict): + for key, value in dict.items(cfg_dict): + if isinstance(value, FunctionType): + value_str = str(value) + if value_str not in MAP_FUNC: + MAP_FUNC.register_module(module=value, name=value_str) + cfg_dict[key] = value_str + else: + register_function(value) + elif isinstance(cfg_dict, (list, tuple)): + for value in cfg_dict: + register_function(value) + + +def main(): + args = parse_args() + + if args.deepspeed is not None: + print_log("Deepspeed is not adopted during inference, Skipped.", level=logging.WARN) + + # parse config + if not osp.isfile(args.config): + try: + args.config = cfgs_name_path[args.config] + except KeyError: + raise FileNotFoundError(f'Cannot find {args.config}') + + # load config + cfg = Config.fromfile(args.config) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # register FunctionType object in cfg to `MAP_FUNC` Registry and + # change these FunctionType object to str + register_function(cfg._cfg_dict) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + if args.checkpoint is not None: + state_dict = guess_load_checkpoint(args.checkpoint) + + if is_model_wrapper(runner.model): + runner.model.module.load_state_dict(state_dict, strict=False) + else: + runner.model.load_state_dict(state_dict, strict=False) + runner.logger.info(f'Load checkpoint from {args.checkpoint}') + else: + Warning("No checkpoint !!!") + + # start testing + runner.test() + + +if __name__ == '__main__': + main() diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 0000000000000000000000000000000000000000..22aa630c61384d60c53cf34ef98e0a2226781c7f --- /dev/null +++ b/tools/train.py @@ -0,0 +1,9 @@ +from xtuner.tools.train import main as train +try: + import torch + import torch_npu + from torch_npu.contrib import transfer_to_npu +except: + pass +if __name__ == '__main__': + train() diff --git a/vlm/engine/hooks/dataset_info_hook.py b/vlm/engine/hooks/dataset_info_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..fd909dd3209c77aa34f0f898b7f729cd52ab527b --- /dev/null +++ b/vlm/engine/hooks/dataset_info_hook.py @@ -0,0 +1,47 @@ +from mmengine.hooks import Hook + +from xtuner.registry import BUILDER + + +class SpecialDatasetInfoHook(Hook): + + def __init__(self, tokenizer, is_intern_repo_dataset=False, special_tokens=None): + self.tokenizer = BUILDER.build(tokenizer) + if special_tokens is not None: + self.tokenizer.add_tokens(special_tokens, special_tokens=True) + self.is_intern_repo_dataset = is_intern_repo_dataset + + def log(self, runner, dataset, mode='train'): + + def _log(input_ids, log_prefix=''): + if self.is_intern_repo_dataset: + input_ids = [abs(x) for x in input_ids] + + text = self.tokenizer.decode(input_ids) + runner.logger.info(text) + + runner.logger.info(f'Num {mode} samples {len(dataset)}') + runner.logger.info(f'{mode} example:') + if 'chosen_ids' in dataset[0]: + _log(dataset[0]['chosen_ids'], log_prefix='chosen: ') + _log(dataset[0]['rejected_ids'], log_prefix='rejected: ') + else: + _log(dataset[0]['input_ids']) + + def before_train(self, runner) -> None: + do_train = runner.train_loop is not None + do_eval = runner.val_loop is not None + if do_train: + train_dataset = runner.train_dataloader.dataset + self.log(runner, train_dataset, mode='train') + if do_eval: + eval_dataset = runner.val_dataloader.dataset + self.log(runner, eval_dataset, mode='eval') + + def before_val(self, runner) -> None: + eval_dataset = runner.val_dataloader.dataset + self.log(runner, eval_dataset, mode='eval') + + def before_test(self, runner) -> None: + test_dataset = runner.test_dataloader.dataset + self.log(runner, test_dataset, mode='test') diff --git a/vlm/engine/runner/__init__.py b/vlm/engine/runner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a586f7804e7f8e81a47727e19dc039dfc0c2278e --- /dev/null +++ b/vlm/engine/runner/__init__.py @@ -0,0 +1,2 @@ +from .loops import TestLoop +from .video_loops import VideoTestLoop diff --git a/vlm/engine/runner/loops.py b/vlm/engine/runner/loops.py new file mode 100644 index 0000000000000000000000000000000000000000..44eabb2dbf58b41c9d1f76034b9047515b057510 --- /dev/null +++ b/vlm/engine/runner/loops.py @@ -0,0 +1,216 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from mmengine.runner import ValLoop as MMENGINE_ValLoop +from mmengine.dist import broadcast_object_list, is_main_process, get_world_size, get_rank, barrier, collect_results +import math +import torch +from mmengine.model import is_model_wrapper +from types import MethodType +from xtuner.utils import (DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX, + PROMPT_TEMPLATE) +from xtuner.tools.utils import get_stop_criteria, is_cn_string +from transformers import GenerationConfig + +TORCH_DTYPE_MAP = dict( + fp16=torch.float16, bf16=torch.bfloat16, fp32=torch.float32, auto='auto') + +class TestLoop(MMENGINE_ValLoop): + def __init__(self, runner, dataloader, evaluator=None, torch_dtype='fp16', select_metric='first') -> None: + # must be concatset + super(MMENGINE_ValLoop, self).__init__(runner, dataloader) + self._runner = runner + self.torch_dtype = torch_dtype + if torch_dtype is not None: + self.torch_dtype = TORCH_DTYPE_MAP[torch_dtype] + self.select_metric = select_metric + + def run(self) -> dict: + """Launch Test.""" + self.runner.logger.info('==================== Start test loop ===================') + self.runner.call_hook('before_test') + self.runner.call_hook('before_test_epoch') + + if is_model_wrapper(self.runner.model): + model = self.runner.model.module + else: + model = self.runner.model + + model.gradient_checkpointing_disable() + model.eval() + model.cuda() + + rank = get_rank() + metrics = [] + # Ensure that eta and log are displayed correctly. + current_run_total_ids = 0 + for _, dataset in enumerate(self.dataloader.dataset.datasets): + if not hasattr(model, 'preparing_for_generation'): + model.preparing_for_generation = MethodType(default_preparing_for_generation, model) + print("Warning, the model do not have the preparing_for_generation() function, using the default!!!") + model.preparing_for_generation(dataset.metainfo) + + # split per rank + results = [] + n_samples = len(dataset) + per_rank_samples = math.ceil(n_samples / get_world_size()) + per_rank_ids = range(per_rank_samples * rank, + min(n_samples, per_rank_samples * (rank + 1))) + for idx in per_rank_ids: + data_batch = dataset[idx] + self.run_iter(current_run_total_ids, data_batch, results, model) + current_run_total_ids += 1 + + barrier() + self.runner.logger.info('==================== Start collect results ===================') + results = collect_results(results, len(dataset)) + self.runner.logger.info('========= Starting the evaluation of a data ===========') + if is_main_process(): + metric = dataset.evaluate(results, self.runner.work_dir) + objects = [metric] + else: + objects = [None] + broadcast_object_list(objects) + metric = objects[0] + metrics.append(metric) + + # select metrics + if self.select_metric == 'first': + metrics = metrics[0] + else: + raise NotImplementedError + + self.runner.logger.info('================ Ending test loop ================') + self.runner.call_hook('after_test_epoch', metrics=metrics) + self.runner.call_hook('after_test') + return metrics + + @torch.no_grad() + def run_iter(self, idx, data_batch, results, model): + assert 'text_prompts' in data_batch and 'pixel_values' in data_batch and 'img_id' in data_batch + prediction = {'img_id': data_batch['img_id']} + + self.runner.call_hook( + 'before_test_iter', batch_idx=idx, data_batch=data_batch) + + outputs = model.predict_forward(**data_batch) + prediction.update(outputs) + results.append(prediction) + + self.runner.call_hook( + 'after_test_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=outputs) + +def default_preparing_for_generation(self, metainfo): + # set stop criteria and generation configs for model + + assert hasattr(self, 'tokenizer'), "The Model does not have the tokenizer!!!" + + self.bot_name = 'BOT' + template = PROMPT_TEMPLATE['internlm2_chat'] + self.template = template + stop_words = [] + stop_words += template.get('STOP_WORDS', []) + stop_criteria = get_stop_criteria( + tokenizer=self.tokenizer, stop_words=stop_words) + self.stop_criteria = stop_criteria + + default_generation_kwargs = dict( + max_new_tokens=2048, + do_sample=False, + eos_token_id=self.tokenizer.eos_token_id, + pad_token_id=( + self.tokenizer.pad_token_id + if self.tokenizer.pad_token_id is not None + else self.tokenizer.eos_token_id + ), + ) + default_generation_kwargs.update(metainfo.get('generation_kwargs', {})) + self.gen_config = GenerationConfig(**default_generation_kwargs) + return + + +class AnnoLoop(MMENGINE_ValLoop): + def __init__(self, runner, dataloader, evaluator=None, torch_dtype='fp16', select_metric='first') -> None: + # must be concatset + super(MMENGINE_ValLoop, self).__init__(runner, dataloader) + self._runner = runner + self.torch_dtype = torch_dtype + if torch_dtype is not None: + self.torch_dtype = TORCH_DTYPE_MAP[torch_dtype] + self.select_metric = select_metric + + def run(self) -> dict: + """Launch Test.""" + self.runner.logger.info('==================== Start test loop ===================') + self.runner.call_hook('before_test') + self.runner.call_hook('before_test_epoch') + + if is_model_wrapper(self.runner.model): + model = self.runner.model.module + else: + model = self.runner.model + + model.eval() + + rank = get_rank() + metrics = [] + # Ensure that eta and log are displayed correctly. + current_run_total_ids = 0 + for _, dataset in enumerate(self.dataloader.dataset.datasets): + + # split per rank + results = [] + n_samples = len(dataset) + per_rank_samples = math.ceil(n_samples / get_world_size()) + per_rank_ids = range(per_rank_samples * rank, + min(n_samples, per_rank_samples * (rank + 1))) + for idx in per_rank_ids: + data_batch = dataset[idx] + self.run_iter(current_run_total_ids, data_batch, results, model) + current_run_total_ids += 1 + if hasattr(model, 'save_step'): + model.save_step(last=True) + + barrier() + self.runner.logger.info('==================== Start collect results ===================') + results = collect_results(results, len(dataset)) + self.runner.logger.info('========= Starting the evaluation of a data ===========') + if is_main_process(): + metric = dataset.evaluate(results, self.runner.work_dir) + objects = [metric] + else: + objects = [None] + broadcast_object_list(objects) + metric = objects[0] + metrics.append(metric) + + # select metrics + if self.select_metric == 'first': + metrics = metrics[0] + else: + raise NotImplementedError + + self.runner.logger.info('================ Ending test loop ================') + self.runner.call_hook('after_test_epoch', metrics=metrics) + self.runner.call_hook('after_test') + return metrics + + @torch.no_grad() + def run_iter(self, idx, data_batch, results, model): + prediction = {} + + self.runner.call_hook( + 'before_test_iter', batch_idx=idx, data_batch=data_batch) + + outputs = model.predict_forward(**data_batch) + prediction.update(outputs) + results.append(prediction) + + self.runner.call_hook( + 'after_test_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=outputs) \ No newline at end of file diff --git a/vlm/engine/runner/video_loops.py b/vlm/engine/runner/video_loops.py new file mode 100644 index 0000000000000000000000000000000000000000..fd18b0180e0c2cc3040ac9669dc69671c3a6f9b2 --- /dev/null +++ b/vlm/engine/runner/video_loops.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path + +import cv2 +import mmengine +from mmengine.runner import ValLoop as MMENGINE_ValLoop +from mmengine.dist import broadcast_object_list, is_main_process, get_world_size, get_rank, barrier, collect_results +import math +import torch +from mmengine.model import is_model_wrapper +from types import MethodType +from xtuner.utils import PROMPT_TEMPLATE +from xtuner.tools.utils import get_stop_criteria +from transformers import GenerationConfig +from pycocotools import mask as _mask +from mmengine.visualization.visualizer import Visualizer + +from vlm.utils import VideoReader + +TORCH_DTYPE_MAP = dict(fp16=torch.float16, bf16=torch.bfloat16, fp32=torch.float32, auto='auto') + +VID_INTERVAL = 4 + +def visualize(data_batch, prediction, visualize_path='work_dirs/visualize'): + if 'video_path' in data_batch: + vid_frames = VideoReader(data_batch['video_path'])[::VID_INTERVAL] + vid_id = os.path.basename(data_batch['video_path']).split('.')[0] + text_prompts = data_batch['text_prompts'] + mmengine.mkdir_or_exist(os.path.join(visualize_path, vid_id)) + visualizer = Visualizer() + + mmengine.mkdir_or_exist(os.path.join(visualize_path, vid_id, "vid")) + for id_frame, img in enumerate(vid_frames): + out_path = os.path.join(visualize_path, vid_id, "vid", "{:06d}.jpg".format(id_frame)) + cv2.imwrite(out_path, img) + + for id_text, text in enumerate(text_prompts): + mmengine.mkdir_or_exist(os.path.join(visualize_path, vid_id, "sample_{:06d}".format(id_text))) + mmengine.put_text(text, os.path.join(visualize_path, vid_id, "sample_{:06d}".format(id_text), 'text.txt')) + for id_frame, img in enumerate(vid_frames): + visualizer.set_image(img) + mask = prediction['prediction_masks'][id_text][id_frame] + mask = _mask.decode(mask).astype(bool) + visualizer.draw_binary_masks(mask, colors='g') + visual_result = visualizer.get_image() + out_path = os.path.join(visualize_path, vid_id, "sample_{:06d}".format(id_text), + "{:06d}.jpg".format(id_frame)) + cv2.imwrite(out_path, visual_result) + else: + images_files = data_batch['images'] + vid_id = data_batch['video_id'] + text_prompts = data_batch['text_prompts'] + image_folder = data_batch['image_folder'] + mmengine.mkdir_or_exist(os.path.join(visualize_path, "{:06d}".format(vid_id))) + visualizer = Visualizer() + + mmengine.mkdir_or_exist(os.path.join(visualize_path, "{:06d}".format(vid_id), "vid")) + for id_frame, img_file in enumerate(images_files): + img = cv2.imread(os.path.join(image_folder, img_file)) + out_path = os.path.join(visualize_path, "{:06d}".format(vid_id), "vid", os.path.basename(img_file)) + cv2.imwrite(out_path, img) + + for id_text, text in enumerate(text_prompts): + mmengine.mkdir_or_exist(os.path.join(visualize_path, "{:06d}".format(vid_id), "sample_{:06d}".format(id_text))) + mmengine.put_text(text, os.path.join(visualize_path, "{:06d}".format(vid_id), "sample_{:06d}".format(id_text), + 'text.txt')) + for id_frame, img_file in enumerate(images_files): + img = cv2.imread(os.path.join(image_folder, img_file)) + visualizer.set_image(img) + mask = prediction['prediction_masks'][id_text][id_frame] + mask = _mask.decode(mask).astype(bool) + visualizer.draw_binary_masks(mask, colors='g') + visual_result = visualizer.get_image() + + out_path = os.path.join(visualize_path, "{:06d}".format(vid_id), "sample_{:06d}".format(id_text), + os.path.basename(img_file)) + cv2.imwrite(out_path, visual_result) + + + +class VideoTestLoop(MMENGINE_ValLoop): + def __init__(self, runner, dataloader, torch_dtype='fp16', select_metric='first', visualize=None, evaluator=None) -> None: + # must be concatset + super(MMENGINE_ValLoop, self).__init__(runner, dataloader) + self._runner = runner + self.torch_dtype = torch_dtype + if torch_dtype is not None: + self.torch_dtype = TORCH_DTYPE_MAP[torch_dtype] + self.select_metric = select_metric + + self.visualize = visualize + self.evaluator = evaluator + + def run(self) -> dict: + """Launch Test.""" + self.runner.logger.info('==================== Start test loop ===================') + self.runner.call_hook('before_test') + self.runner.call_hook('before_test_epoch') + + if is_model_wrapper(self.runner.model): + model = self.runner.model.module + else: + model = self.runner.model + + model.gradient_checkpointing_disable() + model.eval() + model.cuda() + + rank = get_rank() + metrics = [] + # Ensure that eta and log are displayed correctly. + current_run_total_ids = 0 + for _, dataset in enumerate(self.dataloader.dataset.datasets): + if not hasattr(model, 'preparing_for_generation'): + model.preparing_for_generation = MethodType(default_preparing_for_generation, model) + print("Warning, the model do not have the preparing_for_generation() function, using the default!!!") + model.preparing_for_generation(dataset.metainfo) + + # split per rank + results = [] + n_samples = len(dataset) + per_rank_samples = math.ceil(n_samples / get_world_size()) + running_tot = per_rank_samples * get_world_size() + assert running_tot >= n_samples + per_rank_ids = range(per_rank_samples * rank, per_rank_samples * (rank + 1)) + for idx in per_rank_ids: + if n_samples <= idx: + data_batch = dataset[n_samples - 1] + else: + data_batch = dataset[idx] + self.run_iter(current_run_total_ids, data_batch, results, model) + current_run_total_ids += 1 + + barrier() + self.runner.logger.info('==================== Start collect results ===================') + results = collect_results(results, n_samples) + self.runner.logger.info('========= Starting the evaluation of a data ===========') + if is_main_process(): + metric = dataset.evaluate(results, self.runner.work_dir) + objects = [metric] + else: + objects = [None] + broadcast_object_list(objects) + metric = objects[0] + metrics.append(metric) + + # select metrics + if self.select_metric == 'first': + metrics = metrics[0] + else: + raise NotImplementedError + + self.runner.logger.info('================ Ending test loop ================') + self.runner.call_hook('after_test_epoch', metrics=metrics) + self.runner.call_hook('after_test') + return metrics + + @torch.no_grad() + def run_iter(self, idx, data_batch, results, model): + prediction = {'video_id': data_batch['video_id']} + + self.runner.call_hook( + 'before_test_iter', batch_idx=idx, data_batch=data_batch) + + outputs = model.predict_forward(**data_batch) + prediction.update(outputs) + results.append(prediction) + + if self.visualize: + # if not prediction['is_exists'][0].all(): + # print(prediction['is_exists']) + visualize(data_batch=data_batch, prediction=prediction, visualize_path=self.visualize) + + self.runner.call_hook( + 'after_test_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=outputs) + +def default_preparing_for_generation(self, metainfo): + # set stop criteria and generation configs for model + + assert hasattr(self, 'tokenizer'), "The Model does not have the tokenizer!!!" + + self.bot_name = 'BOT' + template = PROMPT_TEMPLATE['internlm2_chat'] + self.template = template + stop_words = [] + stop_words += template.get('STOP_WORDS', []) + stop_criteria = get_stop_criteria( + tokenizer=self.tokenizer, stop_words=stop_words) + self.stop_criteria = stop_criteria + + default_generation_kwargs = dict( + max_new_tokens=2048, + do_sample=False, + eos_token_id=self.tokenizer.eos_token_id, + pad_token_id=( + self.tokenizer.pad_token_id + if self.tokenizer.pad_token_id is not None + else self.tokenizer.eos_token_id + ), + ) + default_generation_kwargs.update(metainfo.get('generation_kwargs', {})) + self.gen_config = GenerationConfig(**default_generation_kwargs) + return diff --git a/vlm/utils/__init__.py b/vlm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..811ab05efb7cfc9568b0b49e2427e0e632e53865 --- /dev/null +++ b/vlm/utils/__init__.py @@ -0,0 +1,2 @@ +from .load_checkpoint import load_checkpoint_with_prefix, load_state_dict_to_model +from .video_io import VideoReader diff --git a/vlm/utils/load_checkpoint.py b/vlm/utils/load_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..5e424f6a29d59c42c4ed2af038990274b4104c0e --- /dev/null +++ b/vlm/utils/load_checkpoint.py @@ -0,0 +1,59 @@ +import logging + +from mmengine.runner.checkpoint import CheckpointLoader +from mmengine.logging.logger import print_log +from huggingface_hub import hf_hub_download + +HF_HUB_PREFIX = 'hf-hub:' + +def load_checkpoint_with_prefix(filename, prefix=None, map_location='cpu', logger='current'): + """Load partial pretrained model with specific prefix. + + Args: + prefix (str): The prefix of sub-module. + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for + details. + map_location (str | None): Same as :func:`torch.load`. + Defaults to None. + logger: logger + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + if filename.startswith('hf-hub:'): + model_id = filename[len(HF_HUB_PREFIX):] + filename = hf_hub_download(model_id, 'pytorch_model.bin') + + checkpoint = CheckpointLoader.load_checkpoint(filename, map_location=map_location, logger=logger) + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + if not prefix: + return state_dict + if not prefix.endswith('.'): + prefix += '.' + prefix_len = len(prefix) + + state_dict = { + k[prefix_len:]: v + for k, v in state_dict.items() if k.startswith(prefix) + } + + assert state_dict, f'{prefix} is not in the pretrained model' + return state_dict + + +def load_state_dict_to_model(model, state_dict, logger='current'): + missing_keys, unexpected_keys = model.load_state_dict(state_dict) + if missing_keys: + print_log(missing_keys, logger=logger, level=logging.ERROR) + raise RuntimeError() + if unexpected_keys: + print_log(unexpected_keys, logger=logger, level=logging.ERROR) + raise RuntimeError() + print_log("Loaded checkpoint successfully", logger=logger) diff --git a/vlm/utils/modeling_rope_utils.py b/vlm/utils/modeling_rope_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..80728481c05ab419eacdc3eab22df7fa15a30b0a --- /dev/null +++ b/vlm/utils/modeling_rope_utils.py @@ -0,0 +1,573 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Optional, Tuple + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import is_torch_available, logging + + +logger = logging.get_logger(__name__) + + +if is_torch_available(): + import torch + + +def _compute_default_rope_parameters( + config: Optional[PretrainedConfig] = None, + device: Optional["torch.device"] = None, + seq_len: Optional[int] = None, + **rope_kwargs, +) -> Tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies according to the original RoPE implementation + Args: + config ([`~transformers.PretrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. Unused for this type of RoPE. + rope_kwargs (`Dict`, *optional*): + BC compatibility with the previous RoPE class instantiation, will be removed in v4.45. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). + """ + if config is not None and len(rope_kwargs) > 0: + raise ValueError( + "Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in " + f"`_compute_default_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}" + ) + if len(rope_kwargs) > 0: + base = rope_kwargs["base"] + dim = rope_kwargs["dim"] + elif config is not None: + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + dim = int(head_dim * partial_rotary_factor) + + attention_factor = 1.0 # Unused in this type of RoPE + + # Compute the inverse frequencies + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim)) + return inv_freq, attention_factor + + +def _compute_linear_scaling_rope_parameters( + config: Optional[PretrainedConfig] = None, + device: Optional["torch.device"] = None, + seq_len: Optional[int] = None, + **rope_kwargs, +) -> Tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev + Args: + config ([`~transformers.PretrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. Unused for this type of RoPE. + rope_kwargs (`Dict`, *optional*): + BC compatibility with the previous RoPE class instantiation, will be removed in v4.45. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). + """ + if config is not None and len(rope_kwargs) > 0: + raise ValueError( + "Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in " + f"`_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}" + ) + if len(rope_kwargs) > 0: + factor = rope_kwargs["factor"] + elif config is not None: + factor = config.rope_scaling["factor"] + + # Gets the default RoPE parameters + inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs) + + # Then applies linear scaling to the frequencies. + # NOTE: originally, scaling was applied to the position_ids. However, we get `embs = inv_freq @ position_ids`, so + # applying scaling to the inverse frequencies is equivalent. + inv_freq /= factor + return inv_freq, attention_factor + + +def _compute_dynamic_ntk_parameters( + config: Optional[PretrainedConfig] = None, + device: Optional["torch.device"] = None, + seq_len: Optional[int] = None, + **rope_kwargs, +) -> Tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies with NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla + Args: + config ([`~transformers.PretrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length, used to update the dynamic RoPE at inference time. + rope_kwargs (`Dict`, *optional*): + BC compatibility with the previous RoPE class instantiation, will be removed in v4.45. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). + """ + # TODO (joao): use the new `original_max_position_embeddings` from rope_scaling + if config is not None and len(rope_kwargs) > 0: + raise ValueError( + "Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in " + f"`_compute_dynamic_ntk_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}" + ) + if len(rope_kwargs) > 0: + base = rope_kwargs["base"] + dim = rope_kwargs["dim"] + max_position_embeddings = rope_kwargs["max_position_embeddings"] + factor = rope_kwargs["factor"] + elif config is not None: + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + dim = int(head_dim * partial_rotary_factor) + max_position_embeddings = config.max_position_embeddings + factor = config.rope_scaling["factor"] + + attention_factor = 1.0 # Unused in this type of RoPE + + # seq_len: default to max_position_embeddings, e.g. at init time + seq_len = seq_len if seq_len is not None and seq_len > max_position_embeddings else max_position_embeddings + + # Compute the inverse frequencies + base = base * ((factor * seq_len / max_position_embeddings) - (factor - 1)) ** (dim / (dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim)) + return inv_freq, attention_factor + + +def _compute_yarn_parameters( + config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, **rope_kwargs +) -> Tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies with NTK scaling. Please refer to the + [original paper](https://arxiv.org/abs/2309.00071) + Args: + config ([`~transformers.PretrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. Unused for this type of RoPE. + rope_kwargs (`Dict`, *optional*): + BC compatibility with the previous RoPE class instantiation, will be removed in v4.45. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin. + """ + # No need to keep BC with yarn, unreleased when this new pattern was created. + if len(rope_kwargs) > 0: + raise ValueError( + f"Unexpected arguments: `**rope_kwargs` should be unset in `_compute_yarn_parameters`, got {rope_kwargs}" + ) + + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + dim = int(head_dim * partial_rotary_factor) + max_position_embeddings = config.max_position_embeddings + factor = config.rope_scaling["factor"] + + # Sets the attention factor as suggested in the paper + attention_factor = config.rope_scaling.get("attention_factor") + if attention_factor is None: + attention_factor = 0.1 * math.log(factor) + 1.0 + + # Optional config options + # beta_fast/beta_slow: as suggested in the paper, default to 32/1 (correspondingly) + beta_fast = config.rope_scaling.get("beta_fast") or 32 + beta_slow = config.rope_scaling.get("beta_slow") or 1 + + # Compute the inverse frequencies + def find_correction_dim(num_rotations, dim, base, max_position_embeddings): + """Inverse dimension formula to find the dimension based on the number of rotations""" + return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (2 * math.log(base)) + + def find_correction_range(low_rot, high_rot, dim, base, max_position_embeddings): + """Find dimension range bounds based on rotations""" + low = math.floor(find_correction_dim(low_rot, dim, base, max_position_embeddings)) + high = math.ceil(find_correction_dim(high_rot, dim, base, max_position_embeddings)) + return max(low, 0), min(high, dim - 1) + + def linear_ramp_factor(min, max, dim): + if min == max: + max += 0.001 # Prevent singularity + + linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min) + ramp_func = torch.clamp(linear_func, 0, 1) + return ramp_func + + # Note on variable naming: "interpolation" comes from the original technique, where we interpolate the position IDs + # to expand the possible context length. In other words, interpolation = apply scaling factor. + pos_freqs = base ** (torch.arange(0, dim, 2).float().to(device) / dim) + inv_freq_extrapolation = 1.0 / pos_freqs + inv_freq_interpolation = 1.0 / (factor * pos_freqs) + + low, high = find_correction_range(beta_fast, beta_slow, dim, base, max_position_embeddings) + + # Get n-dimensional rotational scaling corrected for extrapolation + inv_freq_extrapolation_factor = 1 - linear_ramp_factor(low, high, dim // 2).float().to(device) + inv_freq = ( + inv_freq_interpolation * (1 - inv_freq_extrapolation_factor) + + inv_freq_extrapolation * inv_freq_extrapolation_factor + ) + + return inv_freq, attention_factor + + +def _compute_longrope_parameters( + config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, **rope_kwargs +) -> Tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies with LongRoPE scaling. Please refer to the + [original implementation](https://github.com/microsoft/LongRoPE) + Args: + config ([`~transformers.PretrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. + rope_kwargs (`Dict`, *optional*): + BC compatibility with the previous RoPE class instantiation, will be removed in v4.45. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin. + """ + # TODO (joao): use the new `original_max_position_embeddings` from rope_scaling + # No need to keep BC with longrope, unreleased when this new pattern was created. + if len(rope_kwargs) > 0: + raise ValueError( + "Unexpected arguments: `**rope_kwargs` should be unset in `_compute_longrope_parameters`, got " + f"{rope_kwargs}" + ) + + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + dim = int(head_dim * partial_rotary_factor) + long_factor = config.rope_scaling["long_factor"] + short_factor = config.rope_scaling["short_factor"] + factor = config.rope_scaling.get("factor") + attention_factor = config.rope_scaling.get("attention_factor") + + # NOTE: Phi3 (and potentially other models) modify `max_position_embeddings` and have a + # `original_max_position_embeddings` field containing the pretrained value. They use the ratio between these two + # values to compute the default attention scaling factor, instead of using `factor`. + if hasattr(config, "original_max_position_embeddings"): + if seq_len and seq_len < config.original_max_position_embeddings: + expanded_max_position_embeddings = config.original_max_position_embeddings + else: + expanded_max_position_embeddings = config.max_position_embeddings + max_position_embeddings = config.original_max_position_embeddings + factor = expanded_max_position_embeddings / max_position_embeddings + else: + max_position_embeddings = config.max_position_embeddings + expanded_max_position_embeddings = max_position_embeddings * factor + + # Sets the attention factor as suggested in the paper + if attention_factor is None: + if factor <= 1.0: + attention_factor = 1.0 + else: + attention_factor = math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings)) + + # Compute the inverse frequencies -- scaled based on the target sequence length + if expanded_max_position_embeddings > max_position_embeddings: + ext_factors = torch.tensor(long_factor, dtype=torch.float32, device=device) + else: + ext_factors = torch.tensor(short_factor, dtype=torch.float32, device=device) + inv_freq_shape = torch.arange(0, dim, 2, dtype=torch.int64, device=device).float() / dim + inv_freq = 1.0 / (ext_factors * base**inv_freq_shape) + + return inv_freq, attention_factor + + +def _compute_llama3_parameters( + config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, **rope_kwargs +) -> Tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies for llama 3.1. + + Args: + config ([`~transformers.PretrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. Unused for this type of RoPE. + rope_kwargs (`Dict`, *optional*): + BC compatibility with the previous RoPE class instantiation, will be removed in v4.45. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin. + """ + # Gets the default RoPE parameters + inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs) + + factor = config.rope_scaling["factor"] # `8` in the original implementation + low_freq_factor = config.rope_scaling["low_freq_factor"] # `1` in the original implementation + high_freq_factor = config.rope_scaling["high_freq_factor"] # `4` in the original implementation + old_context_len = config.rope_scaling["original_max_position_embeddings"] # `8192` in the original implementation + + low_freq_wavelen = old_context_len / low_freq_factor + high_freq_wavelen = old_context_len / high_freq_factor + + wavelen = 2 * math.pi / inv_freq + # wavelen < high_freq_wavelen: do nothing + # wavelen > low_freq_wavelen: divide by factor + inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq) + # otherwise: interpolate between the two, using a smooth factor + smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor) + smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama + is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen) + inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama) + + return inv_freq_llama, attention_factor + + +# This maps the "rope_type" string field in rope config to the corresponding function to compute the RoPE parameters +# from the model config. You can append new {'rope_type': callable} pairs to this dictionary to enable custom RoPE +# parameterizations, as long as the callable has the same signature. +ROPE_INIT_FUNCTIONS = { + "default": _compute_default_rope_parameters, + "linear": _compute_linear_scaling_rope_parameters, + "dynamic": _compute_dynamic_ntk_parameters, + "yarn": _compute_yarn_parameters, + "longrope": _compute_longrope_parameters, + "llama3": _compute_llama3_parameters, +} + + +def _check_received_keys( + rope_type: str, + received_keys: set, + required_keys: set, + optional_keys: Optional[set] = None, + ignore_keys: Optional[set] = None, +): + """Compare the received keys in `config.rope_scaling` against the expected and optional keys""" + # BC: "rope_type" was originally "type" -- let's check for "rope_type" when "type" is present + if "type" in received_keys: + received_keys -= {"type"} + required_keys.add("rope_type") + + # Some models need to store model-specific keys, and we don't want to throw warning at them + if ignore_keys is not None: + received_keys -= ignore_keys + + missing_keys = required_keys - received_keys + if missing_keys: + raise KeyError(f"Missing required keys in `rope_scaling` for 'rope_type'='{rope_type}': {missing_keys}") + + if optional_keys is not None: + unused_keys = received_keys - required_keys - optional_keys + else: + unused_keys = received_keys - required_keys + if unused_keys: + logger.warning(f"Unrecognized keys in `rope_scaling` for 'rope_type'='{rope_type}': {unused_keys}") + + +def _validate_default_rope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None): + rope_scaling = config.rope_scaling + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type" + required_keys = {"rope_type"} + received_keys = set(rope_scaling.keys()) + _check_received_keys(rope_type, received_keys, required_keys, ignore_keys=ignore_keys) + + +def _validate_linear_scaling_rope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None): + rope_scaling = config.rope_scaling + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type" + required_keys = {"rope_type", "factor"} + received_keys = set(rope_scaling.keys()) + _check_received_keys(rope_type, received_keys, required_keys, ignore_keys=ignore_keys) + + factor = rope_scaling["factor"] + if factor is None or not isinstance(factor, float) or factor < 1.0: + logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") + + +def _validate_dynamic_scaling_rope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None): + rope_scaling = config.rope_scaling + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type" + required_keys = {"rope_type", "factor"} + # TODO (joao): update logic for the inclusion of `original_max_position_embeddings` + optional_keys = {"original_max_position_embeddings"} + received_keys = set(rope_scaling.keys()) + _check_received_keys(rope_type, received_keys, required_keys, optional_keys, ignore_keys=ignore_keys) + + factor = rope_scaling["factor"] + if factor is None or not isinstance(factor, float) or factor < 1.0: + logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") + + +def _validate_yarn_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None): + rope_scaling = config.rope_scaling + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type" + required_keys = {"rope_type", "factor"} + optional_keys = {"attention_factor", "beta_fast", "beta_slow"} + received_keys = set(rope_scaling.keys()) + _check_received_keys(rope_type, received_keys, required_keys, optional_keys, ignore_keys=ignore_keys) + + factor = rope_scaling["factor"] + if factor is None or not isinstance(factor, float) or factor < 1.0: + logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") + + attention_factor = rope_scaling.get("attention_factor") + if attention_factor is not None and (not isinstance(attention_factor, float) or attention_factor < 0): + logger.warning( + f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}" + ) + beta_fast = rope_scaling.get("beta_fast") + if beta_fast is not None and not isinstance(beta_fast, float): + logger.warning(f"`rope_scaling`'s beta_fast field must be a float, got {beta_fast}") + beta_slow = rope_scaling.get("beta_slow") + if beta_slow is not None and not isinstance(beta_slow, float): + logger.warning(f"`rope_scaling`'s beta_slow field must be a float, got {beta_slow}") + + if (beta_fast or 32) < (beta_slow or 1): + logger.warning( + f"`rope_scaling`'s beta_fast field must be greater than beta_slow, got beta_fast={beta_fast} " + f"(defaults to 32 if None) and beta_slow={beta_slow} (defaults to 1 if None)" + ) + + +def _validate_longrope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None): + rope_scaling = config.rope_scaling + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type" + required_keys = {"rope_type", "short_factor", "long_factor"} + # TODO (joao): update logic for the inclusion of `original_max_position_embeddings` + optional_keys = {"attention_factor", "factor", "original_max_position_embeddings"} + received_keys = set(rope_scaling.keys()) + _check_received_keys(rope_type, received_keys, required_keys, optional_keys, ignore_keys=ignore_keys) + + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + dim = int(head_dim * partial_rotary_factor) + + short_factor = rope_scaling.get("short_factor") + if not isinstance(short_factor, list) and all(isinstance(x, (int, float)) for x in short_factor): + logger.warning(f"`rope_scaling`'s short_factor field must be a list of numbers, got {short_factor}") + if not len(short_factor) == dim // 2: + logger.warning(f"`rope_scaling`'s short_factor field must have length {dim // 2}, got {len(short_factor)}") + + long_factor = rope_scaling.get("long_factor") + if not isinstance(long_factor, list) and all(isinstance(x, (int, float)) for x in long_factor): + logger.warning(f"`rope_scaling`'s long_factor field must be a list of numbers, got {long_factor}") + if not len(long_factor) == dim // 2: + logger.warning(f"`rope_scaling`'s long_factor field must have length {dim // 2}, got {len(long_factor)}") + + # Handle Phi3 divergence: prefer the use of `attention_factor` and/or `factor` over + # `original_max_position_embeddings` to compute internal variables. The latter lives outside `rope_scaling` and is + # unique to longrope (= undesirable) + if hasattr(config, "original_max_position_embeddings"): + logger.warning_once( + "This model has set a `original_max_position_embeddings` field, to be used together with " + "`max_position_embeddings` to determine a scaling factor. Please set the `factor` field of `rope_scaling`" + "with this ratio instead -- we recommend the use of this field over `original_max_position_embeddings`, " + "as it is compatible with most model architectures." + ) + else: + factor = rope_scaling.get("factor") + if factor is None: + logger.warning("Missing required keys in `rope_scaling`: 'factor'") + elif not isinstance(factor, float) or factor < 1.0: + logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") + + attention_factor = rope_scaling.get("attention_factor") + if attention_factor is not None: + if not isinstance(attention_factor, float) or attention_factor < 0.0: + logger.warning( + f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}" + ) + + +def _validate_llama3_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None): + rope_scaling = config.rope_scaling + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type" + required_keys = {"rope_type", "factor", "original_max_position_embeddings", "low_freq_factor", "high_freq_factor"} + received_keys = set(rope_scaling.keys()) + _check_received_keys(rope_type, received_keys, required_keys, ignore_keys=ignore_keys) + + factor = rope_scaling["factor"] + if factor is None or not isinstance(factor, float) or factor < 1.0: + logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") + + low_freq_factor = rope_scaling["low_freq_factor"] + high_freq_factor = rope_scaling["high_freq_factor"] + if low_freq_factor is None or not isinstance(low_freq_factor, float): + logger.warning(f"`rope_scaling`'s low_freq_factor field must be a float, got {low_freq_factor}") + if high_freq_factor is None or not isinstance(high_freq_factor, float): + logger.warning(f"`rope_scaling`'s high_freq_factor field must be a float, got {high_freq_factor}") + if high_freq_factor <= low_freq_factor: + logger.warning( + "`rope_scaling`'s high_freq_factor field must be greater than low_freq_factor, got high_freq_factor=" + f"{high_freq_factor} and low_freq_factor={low_freq_factor}" + ) + + original_max_position_embeddings = rope_scaling["original_max_position_embeddings"] + if original_max_position_embeddings is None or not isinstance(original_max_position_embeddings, int): + logger.warning( + "`rope_scaling`'s original_max_position_embeddings field must be an integer, got " + f"{original_max_position_embeddings}" + ) + if original_max_position_embeddings >= config.max_position_embeddings: + logger.warning( + "`rope_scaling`'s original_max_position_embeddings field must be less than max_position_embeddings, got " + f"{original_max_position_embeddings} and max_position_embeddings={config.max_position_embeddings}" + ) + + +# Like `ROPE_INIT_FUNCTIONS`, this validation function mapping can be dynamically updated for custom RoPE types. +ROPE_VALIDATION_FUNCTIONS = { + "default": _validate_default_rope_parameters, + "linear": _validate_linear_scaling_rope_parameters, + "dynamic": _validate_dynamic_scaling_rope_parameters, + "yarn": _validate_yarn_parameters, + "longrope": _validate_longrope_parameters, + "llama3": _validate_llama3_parameters, +} + + +def rope_config_validation(config: PretrainedConfig, ignore_keys: Optional[set] = None): + """ + Validate the RoPE config arguments, given a `PretrainedConfig` object + """ + rope_scaling = getattr(config, "rope_scaling", None) # not a default parameter in `PretrainedConfig` + if rope_scaling is None: + return + + # BC: "rope_type" was originally "type" + rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", "default")) + validation_fn = ROPE_VALIDATION_FUNCTIONS.get(rope_type) + if validation_fn is not None: + validation_fn(config, ignore_keys=ignore_keys) + else: + logger.warning( + f"Missing validation function mapping in `ROPE_VALIDATION_FUNCTIONS` for 'rope_type'='{rope_type}'" + ) \ No newline at end of file diff --git a/vlm/utils/video_io.py b/vlm/utils/video_io.py new file mode 100644 index 0000000000000000000000000000000000000000..36670a59dc26909c6c5f18568e2762e2c394ddaa --- /dev/null +++ b/vlm/utils/video_io.py @@ -0,0 +1,271 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from collections import OrderedDict + +import cv2 +from cv2 import (CAP_PROP_FOURCC, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT, + CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, + CAP_PROP_POS_FRAMES) +from mmengine.utils import (check_file_exist, mkdir_or_exist, track_progress) + + +class Cache: + + def __init__(self, capacity): + self._cache = OrderedDict() + self._capacity = int(capacity) + if capacity <= 0: + raise ValueError('capacity must be a positive integer') + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._cache) + + def put(self, key, val): + if key in self._cache: + return + if len(self._cache) >= self.capacity: + self._cache.popitem(last=False) + self._cache[key] = val + + def get(self, key, default=None): + val = self._cache[key] if key in self._cache else default + return val + + +class VideoReader: + """Video class with similar usage to a list object. + + This video wrapper class provides convenient apis to access frames. + There exists an issue of OpenCV's VideoCapture class that jumping to a + certain frame may be inaccurate. It is fixed in this class by checking + the position after jumping each time. + Cache is used when decoding videos. So if the same frame is visited for + the second time, there is no need to decode again if it is stored in the + cache. + + Examples: + >>> import mmcv + >>> v = mmcv.VideoReader('sample.mp4') + >>> len(v) # get the total frame number with `len()` + 120 + >>> for img in v: # v is iterable + >>> mmcv.imshow(img) + >>> v[5] # get the 6th frame + """ + + def __init__(self, filename, cache_capacity=10): + # Check whether the video path is a url + if not filename.startswith(('https://', 'http://')): + check_file_exist(filename, 'Video file not found: ' + filename) + self._vcap = cv2.VideoCapture(filename) + assert cache_capacity > 0 + self._cache = Cache(cache_capacity) + self._position = 0 + # get basic info + self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) + self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) + self._fps = self._vcap.get(CAP_PROP_FPS) + self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) + self._fourcc = self._vcap.get(CAP_PROP_FOURCC) + + @property + def vcap(self): + """:obj:`cv2.VideoCapture`: The raw VideoCapture object.""" + return self._vcap + + @property + def opened(self): + """bool: Indicate whether the video is opened.""" + return self._vcap.isOpened() + + @property + def width(self): + """int: Width of video frames.""" + return self._width + + @property + def height(self): + """int: Height of video frames.""" + return self._height + + @property + def resolution(self): + """tuple: Video resolution (width, height).""" + return (self._width, self._height) + + @property + def fps(self): + """float: FPS of the video.""" + return self._fps + + @property + def frame_cnt(self): + """int: Total frames of the video.""" + return self._frame_cnt + + @property + def fourcc(self): + """str: "Four character code" of the video.""" + return self._fourcc + + @property + def position(self): + """int: Current cursor position, indicating frame decoded.""" + return self._position + + def _get_real_position(self): + return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) + + def _set_real_position(self, frame_id): + self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) + pos = self._get_real_position() + for _ in range(frame_id - pos): + self._vcap.read() + self._position = frame_id + + def read(self): + """Read the next frame. + + If the next frame have been decoded before and in the cache, then + return it directly, otherwise decode, cache and return it. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + # pos = self._position + if self._cache: + img = self._cache.get(self._position) + if img is not None: + ret = True + else: + if self._position != self._get_real_position(): + self._set_real_position(self._position) + ret, img = self._vcap.read() + if ret: + self._cache.put(self._position, img) + else: + ret, img = self._vcap.read() + if ret: + self._position += 1 + return img + + def get_frame(self, frame_id): + """Get frame by index. + + Args: + frame_id (int): Index of the expected frame, 0-based. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + if frame_id < 0 or frame_id >= self._frame_cnt: + raise IndexError( + f'"frame_id" must be between 0 and {self._frame_cnt - 1}') + if frame_id == self._position: + return self.read() + if self._cache: + img = self._cache.get(frame_id) + if img is not None: + self._position = frame_id + 1 + return img + self._set_real_position(frame_id) + ret, img = self._vcap.read() + if ret: + if self._cache: + self._cache.put(self._position, img) + self._position += 1 + return img + + def current_frame(self): + """Get the current frame (frame that is just visited). + + Returns: + ndarray or None: If the video is fresh, return None, otherwise + return the frame. + """ + if self._position == 0: + return None + return self._cache.get(self._position - 1) + + def cvt2frames(self, + frame_dir, + file_start=0, + filename_tmpl='{:06d}.jpg', + start=0, + max_num=0, + show_progress=True): + """Convert a video to frame images. + + Args: + frame_dir (str): Output directory to store all the frame images. + file_start (int): Filenames will start from the specified number. + filename_tmpl (str): Filename template with the index as the + placeholder. + start (int): The starting frame index. + max_num (int): Maximum number of frames to be written. + show_progress (bool): Whether to show a progress bar. + """ + mkdir_or_exist(frame_dir) + if max_num == 0: + task_num = self.frame_cnt - start + else: + task_num = min(self.frame_cnt - start, max_num) + if task_num <= 0: + raise ValueError('start must be less than total frame number') + if start > 0: + self._set_real_position(start) + + def write_frame(file_idx): + img = self.read() + if img is None: + return + filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) + cv2.imwrite(filename, img) + + if show_progress: + track_progress(write_frame, range(file_start, + file_start + task_num)) + else: + for i in range(task_num): + write_frame(file_start + i) + + def __len__(self): + return self.frame_cnt + + def __getitem__(self, index): + if isinstance(index, slice): + return [ + self.get_frame(i) + for i in range(*index.indices(self.frame_cnt)) + ] + # support negative indexing + if index < 0: + index += self.frame_cnt + if index < 0: + raise IndexError('index out of range') + return self.get_frame(index) + + def __iter__(self): + self._set_real_position(0) + return self + + def __next__(self): + img = self.read() + if img is not None: + return img + else: + raise StopIteration + + next = __next__ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._vcap.release() +